aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/wireless/intel/iwlwifi/pcie
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/pcie')
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h128
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c370
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c410
5 files changed, 597 insertions, 385 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 78cf9a7f3eac..001be406a3d3 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -487,6 +487,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24FD, 0x1130, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0130, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x1010, iwl8265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x10D0, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0050, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0150, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x9010, iwl8265_2ac_cfg)},
@@ -500,22 +501,36 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24FD, 0x0930, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0950, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0850, iwl8265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x0012, iwl8275_2ac_cfg)},
/* 9000 Series */
+ {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl5165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl5165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl5165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl5165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x1420, iwl5165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl5165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x1030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg)},
/* a000 Series */
{IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg)},
@@ -608,7 +623,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
const struct iwl_cfg *cfg_7265d __maybe_unused = NULL;
- const struct iwl_cfg *cfg_9260lc __maybe_unused = NULL;
struct iwl_trans *iwl_trans;
int ret;
@@ -637,11 +651,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
if (iwl_trans->cfg->rf_id) {
- if (cfg == &iwl9260_2ac_cfg)
- cfg_9260lc = &iwl9260lc_2ac_cfg;
- if (cfg_9260lc && iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_LC) {
- cfg = cfg_9260lc;
- iwl_trans->cfg = cfg_9260lc;
+ if (cfg == &iwl9460_2ac_cfg &&
+ iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_LC) {
+ cfg = &iwl9000lc_2ac_cfg;
+ iwl_trans->cfg = cfg;
}
}
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 11e347dd44c7..cac6d99012b3 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -37,6 +37,7 @@
#include <linux/wait.h>
#include <linux/pci.h>
#include <linux/timer.h>
+#include <linux/cpu.h>
#include "iwl-fh.h"
#include "iwl-csr.h"
@@ -49,7 +50,7 @@
* be needed for potential data in the SKB's head. The remaining ones can
* be used for frags.
*/
-#define IWL_PCIE_MAX_FRAGS (IWL_NUM_OF_TBS - 3)
+#define IWL_PCIE_MAX_FRAGS(x) (x->max_tbs - 3)
/*
* RX related structures and functions
@@ -192,41 +193,9 @@ struct iwl_cmd_meta {
/* only for SYNC commands, iff the reply skb is wanted */
struct iwl_host_cmd *source;
u32 flags;
+ u32 tbs;
};
-/*
- * Generic queue structure
- *
- * Contains common data for Rx and Tx queues.
- *
- * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
- * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
- * there might be HW changes in the future). For the normal TX
- * queues, n_window, which is the size of the software queue data
- * is also 256; however, for the command queue, n_window is only
- * 32 since we don't need so many commands pending. Since the HW
- * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. As a result,
- * the software buffers (in the variables @meta, @txb in struct
- * iwl_txq) only have 32 entries, while the HW buffers (@tfds in
- * the same struct) have 256.
- * This means that we end up with the following:
- * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
- * SW entries: | 0 | ... | 31 |
- * where N is a number between 0 and 7. This means that the SW
- * data is a window overlayed over the HW queue.
- */
-struct iwl_queue {
- int write_ptr; /* 1-st empty entry (index) host_w*/
- int read_ptr; /* last used entry (index) host_r*/
- /* use for monitoring and recovering the stuck queue */
- dma_addr_t dma_addr; /* physical addr for BD's */
- int n_window; /* safe queue window */
- u32 id;
- int low_mark; /* low watermark, resume queue if free
- * space more than this */
- int high_mark; /* high watermark, stop queue if free
- * space less than this */
-};
#define TFD_TX_CMD_SLOTS 256
#define TFD_CMD_SLOTS 32
@@ -273,13 +242,32 @@ struct iwl_pcie_first_tb_buf {
* @wd_timeout: queue watchdog timeout (jiffies) - per queue
* @frozen: tx stuck queue timer is frozen
* @frozen_expiry_remainder: remember how long until the timer fires
+ * @write_ptr: 1-st empty entry (index) host_w
+ * @read_ptr: last used entry (index) host_r
+ * @dma_addr: physical addr for BD's
+ * @n_window: safe queue window
+ * @id: queue id
+ * @low_mark: low watermark, resume queue if free space more than this
+ * @high_mark: high watermark, stop queue if free space less than this
*
* A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
* descriptors) and required locking structures.
+ *
+ * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
+ * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
+ * there might be HW changes in the future). For the normal TX
+ * queues, n_window, which is the size of the software queue data
+ * is also 256; however, for the command queue, n_window is only
+ * 32 since we don't need so many commands pending. Since the HW
+ * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.
+ * This means that we end up with the following:
+ * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
+ * SW entries: | 0 | ... | 31 |
+ * where N is a number between 0 and 7. This means that the SW
+ * data is a window overlayed over the HW queue.
*/
struct iwl_txq {
- struct iwl_queue q;
- struct iwl_tfd *tfds;
+ void *tfds;
struct iwl_pcie_first_tb_buf *first_tb_bufs;
dma_addr_t first_tb_dma;
struct iwl_pcie_txq_entry *entries;
@@ -294,6 +282,14 @@ struct iwl_txq {
bool block;
unsigned long wd_timeout;
struct sk_buff_head overflow_q;
+
+ int write_ptr;
+ int read_ptr;
+ dma_addr_t dma_addr;
+ int n_window;
+ u32 id;
+ int low_mark;
+ int high_mark;
};
static inline dma_addr_t
@@ -309,6 +305,16 @@ struct iwl_tso_hdr_page {
};
/**
+ * enum iwl_shared_irq_flags - level of sharing for irq
+ * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes.
+ * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue.
+ */
+enum iwl_shared_irq_flags {
+ IWL_SHARED_IRQ_NON_RX = BIT(0),
+ IWL_SHARED_IRQ_FIRST_RSS = BIT(1),
+};
+
+/**
* struct iwl_trans_pcie - PCIe transport specific data
* @rxq: all the RX queue data
* @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
@@ -326,7 +332,6 @@ struct iwl_tso_hdr_page {
* @rx_buf_size: Rx buffer size
* @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
* @scd_set_active: should the transport configure the SCD for HCMD queue
- * @wide_cmd_header: true when ucode supports wide command header format
* @sw_csum_tx: if true, then the transport will compute the csum of the TXed
* frame.
* @rx_page_order: page order for receive buffer size
@@ -338,8 +343,10 @@ struct iwl_tso_hdr_page {
* @fw_mon_size: size of the buffer for the firmware monitor
* @msix_entries: array of MSI-X entries
* @msix_enabled: true if managed to enable MSI-X
- * @allocated_vector: the number of interrupt vector allocated by the OS
- * @default_irq_num: default irq for non rx interrupt
+ * @shared_vec_mask: the type of causes the shared vector handles
+ * (see iwl_shared_irq_flags).
+ * @alloc_vecs: the number of interrupt vectors allocated by the OS
+ * @def_irq: default irq for non rx causes
* @fh_init_mask: initial unmasked fh causes
* @hw_init_mask: initial unmasked hw causes
* @fh_mask: current unmasked fh causes
@@ -391,11 +398,12 @@ struct iwl_trans_pcie {
unsigned int cmd_q_wdg_timeout;
u8 n_no_reclaim_cmds;
u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
+ u8 max_tbs;
+ u16 tfd_size;
enum iwl_amsdu_size rx_buf_size;
bool bc_table_dword;
bool scd_set_active;
- bool wide_cmd_header;
bool sw_csum_tx;
u32 rx_page_order;
@@ -410,12 +418,14 @@ struct iwl_trans_pcie {
struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
bool msix_enabled;
- u32 allocated_vector;
- u32 default_irq_num;
+ u8 shared_vec_mask;
+ u32 alloc_vecs;
+ u32 def_irq;
u32 fh_init_mask;
u32 hw_init_mask;
u32 fh_mask;
u32 hw_mask;
+ cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
};
static inline struct iwl_trans_pcie *
@@ -474,6 +484,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
bool configure_scd);
void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
bool shared_mode);
+dma_addr_t iwl_trans_pcie_get_txq_byte_table(struct iwl_trans *trans, int txq);
void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
struct iwl_txq *txq);
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
@@ -486,11 +497,20 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs);
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
-static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
+static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd,
+ u8 idx)
{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+ if (trans->cfg->use_tfh) {
+ struct iwl_tfh_tfd *tfd = _tfd;
+ struct iwl_tfh_tb *tb = &tfd->tbs[idx];
+
+ return le16_to_cpu(tb->tb_len);
+ } else {
+ struct iwl_tfd *tfd = _tfd;
+ struct iwl_tfd_tb *tb = &tfd->tbs[idx];
- return le16_to_cpu(tb->hi_n_len) >> 4;
+ return le16_to_cpu(tb->hi_n_len) >> 4;
+ }
}
/*****************************************************
@@ -617,9 +637,9 @@ static inline void iwl_wake_queue(struct iwl_trans *trans,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) {
- IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id);
- iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id);
+ if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) {
+ IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
+ iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
}
}
@@ -628,22 +648,22 @@ static inline void iwl_stop_queue(struct iwl_trans *trans,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) {
- iwl_op_mode_queue_full(trans->op_mode, txq->q.id);
- IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id);
+ if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) {
+ iwl_op_mode_queue_full(trans->op_mode, txq->id);
+ IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
} else
IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
- txq->q.id);
+ txq->id);
}
-static inline bool iwl_queue_used(const struct iwl_queue *q, int i)
+static inline bool iwl_queue_used(const struct iwl_txq *q, int i)
{
return q->write_ptr >= q->read_ptr ?
(i >= q->read_ptr && i < q->write_ptr) :
!(i < q->read_ptr && i >= q->write_ptr);
}
-static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
+static inline u8 get_cmd_index(struct iwl_txq *q, u32 index)
{
return index & (q->n_window - 1);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 5c36e6d00622..6fe5546dc773 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -487,15 +487,13 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
while (pending) {
int i;
- struct list_head local_allocated;
+ LIST_HEAD(local_allocated);
gfp_t gfp_mask = GFP_KERNEL;
/* Do not post a warning if there are only a few requests */
if (pending < RX_PENDING_WATERMARK)
gfp_mask |= __GFP_NOWARN;
- INIT_LIST_HEAD(&local_allocated);
-
for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
struct iwl_rx_mem_buffer *rxb;
struct page *page;
@@ -1108,13 +1106,14 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
FH_RSCSR_RXQ_POS != rxq->id);
IWL_DEBUG_RX(trans,
- "cmd at offset %d: %s (0x%.2x, seq 0x%x)\n",
+ "cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n",
rxcb._offset,
iwl_get_cmd_string(trans,
iwl_cmd_id(pkt->hdr.cmd,
pkt->hdr.group_id,
0)),
- pkt->hdr.cmd, le16_to_cpu(pkt->hdr.sequence));
+ pkt->hdr.group_id, pkt->hdr.cmd,
+ le16_to_cpu(pkt->hdr.sequence));
len = iwl_rx_packet_len(pkt);
len += sizeof(u32); /* account for status word */
@@ -1142,7 +1141,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
sequence = le16_to_cpu(pkt->hdr.sequence);
index = SEQ_TO_INDEX(sequence);
- cmd_index = get_cmd_index(&txq->q, index);
+ cmd_index = get_cmd_index(txq, index);
if (rxq->id == 0)
iwl_op_mode_rx(trans->op_mode, &rxq->napi,
@@ -1885,6 +1884,20 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
inta_fh,
iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
+ if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
+ inta_fh & MSIX_FH_INT_CAUSES_Q0) {
+ local_bh_disable();
+ iwl_pcie_rx_handle(trans, 0);
+ local_bh_enable();
+ }
+
+ if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) &&
+ inta_fh & MSIX_FH_INT_CAUSES_Q1) {
+ local_bh_disable();
+ iwl_pcie_rx_handle(trans, 1);
+ local_bh_enable();
+ }
+
/* This "Tx" DMA channel is used only for loading uCode */
if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 74f2f035bd28..ae95533e587d 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -827,10 +827,16 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
if (ret)
return ret;
- /* Notify the ucode of the loaded section number and status */
- val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
- val = val | (sec_num << shift_param);
- iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
+ /* Notify ucode of loaded section number and status */
+ if (trans->cfg->use_tfh) {
+ val = iwl_read_prph(trans, UREG_UCODE_LOAD_STATUS);
+ val = val | (sec_num << shift_param);
+ iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, val);
+ } else {
+ val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
+ val = val | (sec_num << shift_param);
+ iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
+ }
sec_num = (sec_num << 1) | 0x1;
}
@@ -838,10 +844,21 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
iwl_enable_interrupts(trans);
- if (cpu == 1)
- iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFF);
- else
- iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
+ if (trans->cfg->use_tfh) {
+ if (cpu == 1)
+ iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
+ 0xFFFF);
+ else
+ iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
+ 0xFFFFFFFF);
+ } else {
+ if (cpu == 1)
+ iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
+ 0xFFFF);
+ else
+ iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
+ 0xFFFFFFFF);
+ }
return 0;
}
@@ -886,14 +903,6 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
return ret;
}
- if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
- iwl_set_bits_prph(trans,
- CSR_UCODE_LOAD_STATUS_ADDR,
- (LMPM_CPU_UCODE_LOADING_COMPLETED |
- LMPM_CPU_HDRS_LOADING_COMPLETED |
- LMPM_CPU_UCODE_LOADING_STARTED) <<
- shift_param);
-
*first_ucode_section = last_read_idx;
return 0;
@@ -1161,7 +1170,7 @@ static void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
if (trans_pcie->msix_enabled) {
int i;
- for (i = 0; i < trans_pcie->allocated_vector; i++)
+ for (i = 0; i < trans_pcie->alloc_vecs; i++)
synchronize_irq(trans_pcie->msix_entries[i].vector);
} else {
synchronize_irq(trans_pcie->pci_dev->irq);
@@ -1420,13 +1429,58 @@ static struct iwl_causes_list causes_list[] = {
{MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
};
+static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
+ int i;
+
+ /*
+ * Access all non RX causes and map them to the default irq.
+ * In case we are missing at least one interrupt vector,
+ * the first interrupt vector will serve non-RX and FBQ causes.
+ */
+ for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
+ iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val);
+ iwl_clear_bit(trans, causes_list[i].mask_reg,
+ causes_list[i].cause_num);
+ }
+}
+
+static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 offset =
+ trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
+ u32 val, idx;
+
+ /*
+ * The first RX queue - fallback queue, which is designated for
+ * management frame, command responses etc, is always mapped to the
+ * first interrupt vector. The other RX queues are mapped to
+ * the other (N - 2) interrupt vectors.
+ */
+ val = BIT(MSIX_FH_INT_CAUSES_Q(0));
+ for (idx = 1; idx < trans->num_rx_queues; idx++) {
+ iwl_write8(trans, CSR_MSIX_RX_IVAR(idx),
+ MSIX_FH_INT_CAUSES_Q(idx - offset));
+ val |= BIT(MSIX_FH_INT_CAUSES_Q(idx));
+ }
+ iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val);
+
+ val = MSIX_FH_INT_CAUSES_Q(0);
+ if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
+ val |= MSIX_NON_AUTO_CLEAR_CAUSE;
+ iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val);
+
+ if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
+ iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val);
+}
+
static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
{
- u32 val, max_rx_vector, i;
struct iwl_trans *trans = trans_pcie->trans;
- max_rx_vector = trans_pcie->allocated_vector - 1;
-
if (!trans_pcie->msix_enabled) {
if (trans->cfg->mq_rx_supported)
iwl_write_prph(trans, UREG_CHICK,
@@ -1437,25 +1491,16 @@ static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
iwl_write_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);
/*
- * Each cause from the list above and the RX causes is represented as
- * a byte in the IVAR table. We access the first (N - 1) bytes and map
- * them to the (N - 1) vectors so these vectors will be used as rx
- * vectors. Then access all non rx causes and map them to the
- * default queue (N'th queue).
+ * Each cause from the causes list above and the RX causes is
+ * represented as a byte in the IVAR table. The first nibble
+ * represents the bound interrupt vector of the cause, the second
+ * represents no auto clear for this cause. This will be set if its
+ * interrupt vector is bound to serve other causes.
*/
- for (i = 0; i < max_rx_vector; i++) {
- iwl_write8(trans, CSR_MSIX_RX_IVAR(i), MSIX_FH_INT_CAUSES_Q(i));
- iwl_clear_bit(trans, CSR_MSIX_FH_INT_MASK_AD,
- BIT(MSIX_FH_INT_CAUSES_Q(i)));
- }
+ iwl_pcie_map_rx_causes(trans);
+
+ iwl_pcie_map_non_rx_causes(trans);
- for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
- val = trans_pcie->default_irq_num |
- MSIX_NON_AUTO_CLEAR_CAUSE;
- iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val);
- iwl_clear_bit(trans, causes_list[i].mask_reg,
- causes_list[i].cause_num);
- }
trans_pcie->fh_init_mask =
~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD);
trans_pcie->fh_mask = trans_pcie->fh_init_mask;
@@ -1468,40 +1513,55 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int max_irqs, num_irqs, i, ret, nr_online_cpus;
u16 pci_cmd;
- int max_vector;
- int ret, i;
-
- if (trans->cfg->mq_rx_supported) {
- max_vector = min_t(u32, (num_possible_cpus() + 2),
- IWL_MAX_RX_HW_QUEUES);
- for (i = 0; i < max_vector; i++)
- trans_pcie->msix_entries[i].entry = i;
-
- ret = pci_enable_msix_range(pdev, trans_pcie->msix_entries,
- MSIX_MIN_INTERRUPT_VECTORS,
- max_vector);
- if (ret > 1) {
- IWL_DEBUG_INFO(trans,
- "Enable MSI-X allocate %d interrupt vector\n",
- ret);
- trans_pcie->allocated_vector = ret;
- trans_pcie->default_irq_num =
- trans_pcie->allocated_vector - 1;
- trans_pcie->trans->num_rx_queues =
- trans_pcie->allocated_vector - 1;
- trans_pcie->msix_enabled = true;
-
- return;
- }
+
+ if (!trans->cfg->mq_rx_supported)
+ goto enable_msi;
+
+ nr_online_cpus = num_online_cpus();
+ max_irqs = min_t(u32, nr_online_cpus + 2, IWL_MAX_RX_HW_QUEUES);
+ for (i = 0; i < max_irqs; i++)
+ trans_pcie->msix_entries[i].entry = i;
+
+ num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries,
+ MSIX_MIN_INTERRUPT_VECTORS,
+ max_irqs);
+ if (num_irqs < 0) {
IWL_DEBUG_INFO(trans,
- "ret = %d %s move to msi mode\n", ret,
- (ret == 1) ?
- "can't allocate more than 1 interrupt vector" :
- "failed to enable msi-x mode");
- pci_disable_msix(pdev);
+ "Failed to enable msi-x mode (ret %d). Moving to msi mode.\n",
+ num_irqs);
+ goto enable_msi;
}
+ trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0;
+
+ IWL_DEBUG_INFO(trans,
+ "MSI-X enabled. %d interrupt vectors were allocated\n",
+ num_irqs);
+ /*
+ * In case the OS provides fewer interrupts than requested, different
+ * causes will share the same interrupt vector as follows:
+ * One interrupt less: non rx causes shared with FBQ.
+ * Two interrupts less: non rx causes shared with FBQ and RSS.
+ * More than two interrupts: we will use fewer RSS queues.
+ */
+ if (num_irqs <= nr_online_cpus) {
+ trans_pcie->trans->num_rx_queues = num_irqs + 1;
+ trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX |
+ IWL_SHARED_IRQ_FIRST_RSS;
+ } else if (num_irqs == nr_online_cpus + 1) {
+ trans_pcie->trans->num_rx_queues = num_irqs;
+ trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX;
+ } else {
+ trans_pcie->trans->num_rx_queues = num_irqs - 1;
+ }
+
+ trans_pcie->alloc_vecs = num_irqs;
+ trans_pcie->msix_enabled = true;
+ return;
+
+enable_msi:
ret = pci_enable_msi(pdev);
if (ret) {
dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret);
@@ -1514,36 +1574,57 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
}
}
+static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
+{
+ int iter_rx_q, i, ret, cpu, offset;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1;
+ iter_rx_q = trans_pcie->trans->num_rx_queues - 1 + i;
+ offset = 1 + i;
+ for (; i < iter_rx_q ; i++) {
+ /*
+ * Get the cpu prior to the place to search
+ * (i.e. return will be > i - 1).
+ */
+ cpu = cpumask_next(i - offset, cpu_online_mask);
+ cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]);
+ ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector,
+ &trans_pcie->affinity_mask[i]);
+ if (ret)
+ IWL_ERR(trans_pcie->trans,
+ "Failed to set affinity mask for IRQ %d\n",
+ i);
+ }
+}
+
static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
struct iwl_trans_pcie *trans_pcie)
{
- int i, last_vector;
-
- last_vector = trans_pcie->trans->num_rx_queues;
+ int i;
- for (i = 0; i < trans_pcie->allocated_vector; i++) {
+ for (i = 0; i < trans_pcie->alloc_vecs; i++) {
int ret;
-
- ret = request_threaded_irq(trans_pcie->msix_entries[i].vector,
- iwl_pcie_msix_isr,
- (i == last_vector) ?
- iwl_pcie_irq_msix_handler :
- iwl_pcie_irq_rx_msix_handler,
- IRQF_SHARED,
- DRV_NAME,
- &trans_pcie->msix_entries[i]);
+ struct msix_entry *msix_entry;
+
+ msix_entry = &trans_pcie->msix_entries[i];
+ ret = devm_request_threaded_irq(&pdev->dev,
+ msix_entry->vector,
+ iwl_pcie_msix_isr,
+ (i == trans_pcie->def_irq) ?
+ iwl_pcie_irq_msix_handler :
+ iwl_pcie_irq_rx_msix_handler,
+ IRQF_SHARED,
+ DRV_NAME,
+ msix_entry);
if (ret) {
- int j;
-
IWL_ERR(trans_pcie->trans,
"Error allocating IRQ %d\n", i);
- for (j = 0; j < i; j++)
- free_irq(trans_pcie->msix_entries[j].vector,
- &trans_pcie->msix_entries[j]);
- pci_disable_msix(pdev);
+
return ret;
}
}
+ iwl_pcie_irq_set_affinity(trans_pcie->trans);
return 0;
}
@@ -1672,7 +1753,6 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
trans_pcie->rx_page_order =
iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size);
- trans_pcie->wide_cmd_header = trans_cfg->wide_cmd_header;
trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
trans_pcie->scd_set_active = trans_cfg->scd_set_active;
trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx;
@@ -1703,22 +1783,16 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
iwl_pcie_rx_free(trans);
if (trans_pcie->msix_enabled) {
- for (i = 0; i < trans_pcie->allocated_vector; i++)
- free_irq(trans_pcie->msix_entries[i].vector,
- &trans_pcie->msix_entries[i]);
+ for (i = 0; i < trans_pcie->alloc_vecs; i++) {
+ irq_set_affinity_hint(
+ trans_pcie->msix_entries[i].vector,
+ NULL);
+ }
- pci_disable_msix(trans_pcie->pci_dev);
trans_pcie->msix_enabled = false;
} else {
- free_irq(trans_pcie->pci_dev->irq, trans);
-
iwl_pcie_free_ict(trans);
-
- pci_disable_msi(trans_pcie->pci_dev);
}
- iounmap(trans_pcie->hw_base);
- pci_release_regions(trans_pcie->pci_dev);
- pci_disable_device(trans_pcie->pci_dev);
iwl_pcie_free_fw_monitor(trans);
@@ -1890,7 +1964,7 @@ static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
txq->frozen = freeze;
- if (txq->q.read_ptr == txq->q.write_ptr)
+ if (txq->read_ptr == txq->write_ptr)
goto next_queue;
if (freeze) {
@@ -1938,7 +2012,7 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
txq->block--;
if (!txq->block) {
iwl_write32(trans, HBUS_TARG_WRPTR,
- txq->q.write_ptr | (i << 8));
+ txq->write_ptr | (i << 8));
}
} else if (block) {
txq->block++;
@@ -1958,10 +2032,14 @@ void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
int cnt;
IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
- txq->q.read_ptr, txq->q.write_ptr);
+ txq->read_ptr, txq->write_ptr);
+
+ if (trans->cfg->use_tfh)
+ /* TODO: access new SCD registers and dump them */
+ return;
scd_sram_addr = trans_pcie->scd_base_addr +
- SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
+ SCD_TX_STTS_QUEUE_OFFSET(txq->id);
iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
iwl_print_hex_error(trans, buf, sizeof(buf));
@@ -1996,7 +2074,6 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq;
- struct iwl_queue *q;
int cnt;
unsigned long now = jiffies;
int ret = 0;
@@ -2014,13 +2091,12 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
txq = &trans_pcie->txq[cnt];
- q = &txq->q;
- wr_ptr = ACCESS_ONCE(q->write_ptr);
+ wr_ptr = ACCESS_ONCE(txq->write_ptr);
- while (q->read_ptr != ACCESS_ONCE(q->write_ptr) &&
+ while (txq->read_ptr != ACCESS_ONCE(txq->write_ptr) &&
!time_after(jiffies,
now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
- u8 write_ptr = ACCESS_ONCE(q->write_ptr);
+ u8 write_ptr = ACCESS_ONCE(txq->write_ptr);
if (WARN_ONCE(wr_ptr != write_ptr,
"WR pointer moved while flushing %d -> %d\n",
@@ -2029,7 +2105,7 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
usleep_range(1000, 2000);
}
- if (q->read_ptr != q->write_ptr) {
+ if (txq->read_ptr != txq->write_ptr) {
IWL_ERR(trans,
"fail to flush all tx fifo queues Q %d\n", cnt);
ret = -ETIMEDOUT;
@@ -2197,7 +2273,6 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
struct iwl_trans *trans = file->private_data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq;
- struct iwl_queue *q;
char *buf;
int pos = 0;
int cnt;
@@ -2215,10 +2290,9 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
txq = &trans_pcie->txq[cnt];
- q = &txq->q;
pos += scnprintf(buf + pos, bufsz - pos,
"hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
- cnt, q->read_ptr, q->write_ptr,
+ cnt, txq->read_ptr, txq->write_ptr,
!!test_bit(cnt, trans_pcie->queue_used),
!!test_bit(cnt, trans_pcie->queue_stopped),
txq->need_update, txq->frozen,
@@ -2424,13 +2498,14 @@ err:
}
#endif /*CONFIG_IWLWIFI_DEBUGFS */
-static u32 iwl_trans_pcie_get_cmdlen(struct iwl_tfd *tfd)
+static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 cmdlen = 0;
int i;
- for (i = 0; i < IWL_NUM_OF_TBS; i++)
- cmdlen += iwl_pcie_tfd_tb_get_len(tfd, i);
+ for (i = 0; i < trans_pcie->max_tbs; i++)
+ cmdlen += iwl_pcie_tfd_tb_get_len(trans, tfd, i);
return cmdlen;
}
@@ -2645,7 +2720,7 @@ static struct iwl_trans_dump_data
/* host commands */
len += sizeof(*data) +
- cmdq->q.n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
+ cmdq->n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
/* FW monitor */
if (trans_pcie->fw_mon_page) {
@@ -2713,12 +2788,13 @@ static struct iwl_trans_dump_data
data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
txcmd = (void *)data->data;
spin_lock_bh(&cmdq->lock);
- ptr = cmdq->q.write_ptr;
- for (i = 0; i < cmdq->q.n_window; i++) {
- u8 idx = get_cmd_index(&cmdq->q, ptr);
+ ptr = cmdq->write_ptr;
+ for (i = 0; i < cmdq->n_window; i++) {
+ u8 idx = get_cmd_index(cmdq, ptr);
u32 caplen, cmdlen;
- cmdlen = iwl_trans_pcie_get_cmdlen(&cmdq->tfds[ptr]);
+ cmdlen = iwl_trans_pcie_get_cmdlen(trans, cmdq->tfds +
+ trans_pcie->tfd_size * ptr);
caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
if (cmdlen) {
@@ -2788,6 +2864,8 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.txq_disable = iwl_trans_pcie_txq_disable,
.txq_enable = iwl_trans_pcie_txq_enable,
+ .get_txq_byte_table = iwl_trans_pcie_get_txq_byte_table,
+
.txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode,
.wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
@@ -2821,13 +2899,15 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
struct iwl_trans *trans;
int ret, addr_size;
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ERR_PTR(ret);
+
trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
&pdev->dev, cfg, &trans_ops_pcie, 0);
if (!trans)
return ERR_PTR(-ENOMEM);
- trans->max_skb_frags = IWL_PCIE_MAX_FRAGS;
-
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
trans_pcie->trans = trans;
@@ -2841,9 +2921,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
goto out_no_pci;
}
- ret = pci_enable_device(pdev);
- if (ret)
- goto out_no_pci;
if (!cfg->base_params->pcie_l1_allowed) {
/*
@@ -2861,6 +2938,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
else
addr_size = 36;
+ if (cfg->use_tfh) {
+ trans_pcie->max_tbs = IWL_TFH_NUM_TBS;
+ trans_pcie->tfd_size = sizeof(struct iwl_tfh_tfd);
+
+ } else {
+ trans_pcie->max_tbs = IWL_NUM_OF_TBS;
+ trans_pcie->tfd_size = sizeof(struct iwl_tfd);
+ }
+ trans->max_skb_frags = IWL_PCIE_MAX_FRAGS(trans_pcie);
+
pci_set_master(pdev);
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(addr_size));
@@ -2875,21 +2962,21 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
/* both attempts failed: */
if (ret) {
dev_err(&pdev->dev, "No suitable DMA available\n");
- goto out_pci_disable_device;
+ goto out_no_pci;
}
}
- ret = pci_request_regions(pdev, DRV_NAME);
+ ret = pcim_iomap_regions_request_all(pdev, BIT(0), DRV_NAME);
if (ret) {
- dev_err(&pdev->dev, "pci_request_regions failed\n");
- goto out_pci_disable_device;
+ dev_err(&pdev->dev, "pcim_iomap_regions_request_all failed\n");
+ goto out_no_pci;
}
- trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
+ trans_pcie->hw_base = pcim_iomap_table(pdev)[0];
if (!trans_pcie->hw_base) {
- dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
+ dev_err(&pdev->dev, "pcim_iomap_table failed\n");
ret = -ENODEV;
- goto out_pci_release_regions;
+ goto out_no_pci;
}
/* We disable the RETRY_TIMEOUT register (0x41) to keep
@@ -2916,7 +3003,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
ret = iwl_pcie_prepare_card_hw(trans);
if (ret) {
IWL_WARN(trans, "Exit HW not ready\n");
- goto out_pci_disable_msi;
+ goto out_no_pci;
}
/*
@@ -2933,7 +3020,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
25000);
if (ret < 0) {
IWL_DEBUG_INFO(trans, "Failed to wake up the nic\n");
- goto out_pci_disable_msi;
+ goto out_no_pci;
}
if (iwl_trans_grab_nic_access(trans, &flags)) {
@@ -2965,15 +3052,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
if (trans_pcie->msix_enabled) {
if (iwl_pcie_init_msix_handler(pdev, trans_pcie))
- goto out_pci_release_regions;
+ goto out_no_pci;
} else {
ret = iwl_pcie_alloc_ict(trans);
if (ret)
- goto out_pci_disable_msi;
+ goto out_no_pci;
- ret = request_threaded_irq(pdev->irq, iwl_pcie_isr,
- iwl_pcie_irq_handler,
- IRQF_SHARED, DRV_NAME, trans);
+ ret = devm_request_threaded_irq(&pdev->dev, pdev->irq,
+ iwl_pcie_isr,
+ iwl_pcie_irq_handler,
+ IRQF_SHARED, DRV_NAME, trans);
if (ret) {
IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
goto out_free_ict;
@@ -2991,12 +3079,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
out_free_ict:
iwl_pcie_free_ict(trans);
-out_pci_disable_msi:
- pci_disable_msi(pdev);
-out_pci_release_regions:
- pci_release_regions(pdev);
-out_pci_disable_device:
- pci_disable_device(pdev);
out_no_pci:
free_percpu(trans_pcie->tso_hdr_page);
iwl_trans_free(trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 18650dccdb58..e9a278b60dfd 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -71,7 +71,7 @@
*
***************************************************/
-static int iwl_queue_space(const struct iwl_queue *q)
+static int iwl_queue_space(const struct iwl_txq *q)
{
unsigned int max;
unsigned int used;
@@ -102,7 +102,7 @@ static int iwl_queue_space(const struct iwl_queue *q)
/*
* iwl_queue_init - Initialize queue's high/low-water and read/write indexes
*/
-static int iwl_queue_init(struct iwl_queue *q, int slots_num, u32 id)
+static int iwl_queue_init(struct iwl_txq *q, int slots_num, u32 id)
{
q->n_window = slots_num;
q->id = id;
@@ -158,13 +158,13 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
spin_lock(&txq->lock);
/* check if triggered erroneously */
- if (txq->q.read_ptr == txq->q.write_ptr) {
+ if (txq->read_ptr == txq->write_ptr) {
spin_unlock(&txq->lock);
return;
}
spin_unlock(&txq->lock);
- IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
+ IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->id,
jiffies_to_msecs(txq->wd_timeout));
iwl_trans_pcie_log_scd_error(trans, txq);
@@ -176,22 +176,21 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
* iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
*/
static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
- struct iwl_txq *txq, u16 byte_cnt)
+ struct iwl_txq *txq, u16 byte_cnt,
+ int num_tbs)
{
struct iwlagn_scd_bc_tbl *scd_bc_tbl;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int write_ptr = txq->q.write_ptr;
- int txq_id = txq->q.id;
+ int write_ptr = txq->write_ptr;
+ int txq_id = txq->id;
u8 sec_ctl = 0;
- u8 sta_id = 0;
u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
__le16 bc_ent;
struct iwl_tx_cmd *tx_cmd =
- (void *) txq->entries[txq->q.write_ptr].cmd->payload;
+ (void *)txq->entries[txq->write_ptr].cmd->payload;
scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
- sta_id = tx_cmd->sta_id;
sec_ctl = tx_cmd->sec_ctl;
switch (sec_ctl & TX_CMD_SEC_MSK) {
@@ -205,14 +204,32 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
break;
}
-
if (trans_pcie->bc_table_dword)
len = DIV_ROUND_UP(len, 4);
if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
return;
- bc_ent = cpu_to_le16(len | (sta_id << 12));
+ if (trans->cfg->use_tfh) {
+ u8 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
+ num_tbs * sizeof(struct iwl_tfh_tb);
+ /*
+ * filled_tfd_size contains the number of filled bytes in the
+ * TFD.
+ * Dividing it by 64 will give the number of chunks to fetch
+ * to SRAM- 0 for one chunk, 1 for 2 and so on.
+ * If, for example, TFD contains only 3 TBs then 32 bytes
+ * of the TFD are used, and only one chunk of 64 bytes should
+ * be fetched
+ */
+ u8 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
+
+ bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
+ } else {
+ u8 sta_id = tx_cmd->sta_id;
+
+ bc_ent = cpu_to_le16(len | (sta_id << 12));
+ }
scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
@@ -227,12 +244,12 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
- int txq_id = txq->q.id;
- int read_ptr = txq->q.read_ptr;
+ int txq_id = txq->id;
+ int read_ptr = txq->read_ptr;
u8 sta_id = 0;
__le16 bc_ent;
struct iwl_tx_cmd *tx_cmd =
- (void *)txq->entries[txq->q.read_ptr].cmd->payload;
+ (void *)txq->entries[read_ptr].cmd->payload;
WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
@@ -240,6 +257,7 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
sta_id = tx_cmd->sta_id;
bc_ent = cpu_to_le16(1 | (sta_id << 12));
+
scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
@@ -255,7 +273,7 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 reg = 0;
- int txq_id = txq->q.id;
+ int txq_id = txq->id;
lockdep_assert_held(&txq->lock);
@@ -289,10 +307,10 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
* if not in power-save mode, uCode will never sleep when we're
* trying to tx (during RFKILL, we're not trying to tx).
*/
- IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr);
+ IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr);
if (!txq->block)
iwl_write32(trans, HBUS_TARG_WRPTR,
- txq->q.write_ptr | (txq_id << 8));
+ txq->write_ptr | (txq_id << 8));
}
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
@@ -312,49 +330,93 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
}
}
-static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
+static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie,
+ struct iwl_txq *txq, int idx)
+{
+ return txq->tfds + trans_pcie->tfd_size * idx;
+}
+
+static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans,
+ void *_tfd, u8 idx)
{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
- dma_addr_t addr = get_unaligned_le32(&tb->lo);
- if (sizeof(dma_addr_t) > sizeof(u32))
- addr |=
- ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
+ if (trans->cfg->use_tfh) {
+ struct iwl_tfh_tfd *tfd = _tfd;
+ struct iwl_tfh_tb *tb = &tfd->tbs[idx];
+
+ return (dma_addr_t)(le64_to_cpu(tb->addr));
+ } else {
+ struct iwl_tfd *tfd = _tfd;
+ struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+ dma_addr_t addr = get_unaligned_le32(&tb->lo);
+ dma_addr_t hi_len;
+
+ if (sizeof(dma_addr_t) <= sizeof(u32))
+ return addr;
- return addr;
+ hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
+
+ /*
+ * shift by 16 twice to avoid warnings on 32-bit
+ * (where this code never runs anyway due to the
+ * if statement above)
+ */
+ return addr | ((hi_len << 16) << 16);
+ }
}
-static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
- dma_addr_t addr, u16 len)
+static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd,
+ u8 idx, dma_addr_t addr, u16 len)
{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
- u16 hi_n_len = len << 4;
+ if (trans->cfg->use_tfh) {
+ struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
+ struct iwl_tfh_tb *tb = &tfd_fh->tbs[idx];
- put_unaligned_le32(addr, &tb->lo);
- if (sizeof(dma_addr_t) > sizeof(u32))
- hi_n_len |= ((addr >> 16) >> 16) & 0xF;
+ put_unaligned_le64(addr, &tb->addr);
+ tb->tb_len = cpu_to_le16(len);
- tb->hi_n_len = cpu_to_le16(hi_n_len);
+ tfd_fh->num_tbs = cpu_to_le16(idx + 1);
+ } else {
+ struct iwl_tfd *tfd_fh = (void *)tfd;
+ struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx];
- tfd->num_tbs = idx + 1;
+ u16 hi_n_len = len << 4;
+
+ put_unaligned_le32(addr, &tb->lo);
+ if (sizeof(dma_addr_t) > sizeof(u32))
+ hi_n_len |= ((addr >> 16) >> 16) & 0xF;
+
+ tb->hi_n_len = cpu_to_le16(hi_n_len);
+
+ tfd_fh->num_tbs = idx + 1;
+ }
}
-static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd)
+static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_trans *trans, void *_tfd)
{
- return tfd->num_tbs & 0x1f;
+ if (trans->cfg->use_tfh) {
+ struct iwl_tfh_tfd *tfd = _tfd;
+
+ return le16_to_cpu(tfd->num_tbs) & 0x1f;
+ } else {
+ struct iwl_tfd *tfd = _tfd;
+
+ return tfd->num_tbs & 0x1f;
+ }
}
static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
struct iwl_cmd_meta *meta,
- struct iwl_tfd *tfd)
+ struct iwl_txq *txq, int index)
{
- int i;
- int num_tbs;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int i, num_tbs;
+ void *tfd = iwl_pcie_get_tfd(trans_pcie, txq, index);
/* Sanity check on number of chunks */
- num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
+ num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
- if (num_tbs >= IWL_NUM_OF_TBS) {
+ if (num_tbs >= trans_pcie->max_tbs) {
IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
/* @todo issue fatal error, it is quite serious situation */
return;
@@ -363,18 +425,30 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
/* first TB is never freed - it's the bidirectional DMA data */
for (i = 1; i < num_tbs; i++) {
- if (meta->flags & BIT(i + CMD_TB_BITMAP_POS))
+ if (meta->tbs & BIT(i))
dma_unmap_page(trans->dev,
- iwl_pcie_tfd_tb_get_addr(tfd, i),
- iwl_pcie_tfd_tb_get_len(tfd, i),
+ iwl_pcie_tfd_tb_get_addr(trans, tfd, i),
+ iwl_pcie_tfd_tb_get_len(trans, tfd, i),
DMA_TO_DEVICE);
else
dma_unmap_single(trans->dev,
- iwl_pcie_tfd_tb_get_addr(tfd, i),
- iwl_pcie_tfd_tb_get_len(tfd, i),
+ iwl_pcie_tfd_tb_get_addr(trans, tfd,
+ i),
+ iwl_pcie_tfd_tb_get_len(trans, tfd,
+ i),
DMA_TO_DEVICE);
}
- tfd->num_tbs = 0;
+
+ if (trans->cfg->use_tfh) {
+ struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
+
+ tfd_fh->num_tbs = 0;
+ } else {
+ struct iwl_tfd *tfd_fh = (void *)tfd;
+
+ tfd_fh->num_tbs = 0;
+ }
+
}
/*
@@ -388,20 +462,18 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
*/
static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
{
- struct iwl_tfd *tfd_tmp = txq->tfds;
-
/* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
* idx is bounded by n_window
*/
- int rd_ptr = txq->q.read_ptr;
- int idx = get_cmd_index(&txq->q, rd_ptr);
+ int rd_ptr = txq->read_ptr;
+ int idx = get_cmd_index(txq, rd_ptr);
lockdep_assert_held(&txq->lock);
/* We have only q->n_window txq->entries, but we use
* TFD_QUEUE_SIZE_MAX tfds
*/
- iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]);
+ iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr);
/* free SKB */
if (txq->entries) {
@@ -423,23 +495,21 @@ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
dma_addr_t addr, u16 len, bool reset)
{
- struct iwl_queue *q;
- struct iwl_tfd *tfd, *tfd_tmp;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ void *tfd;
u32 num_tbs;
- q = &txq->q;
- tfd_tmp = txq->tfds;
- tfd = &tfd_tmp[q->write_ptr];
+ tfd = txq->tfds + trans_pcie->tfd_size * txq->write_ptr;
if (reset)
- memset(tfd, 0, sizeof(*tfd));
+ memset(tfd, 0, trans_pcie->tfd_size);
- num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
+ num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
- /* Each TFD can point to a maximum 20 Tx buffers */
- if (num_tbs >= IWL_NUM_OF_TBS) {
+ /* Each TFD can point to a maximum max_tbs Tx buffers */
+ if (num_tbs >= trans_pcie->max_tbs) {
IWL_ERR(trans, "Error can not send more than %d chunks\n",
- IWL_NUM_OF_TBS);
+ trans_pcie->max_tbs);
return -EINVAL;
}
@@ -447,7 +517,7 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
"Unaligned address = %llx\n", (unsigned long long)addr))
return -EINVAL;
- iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);
+ iwl_pcie_tfd_set_tb(trans, tfd, num_tbs, addr, len);
return num_tbs;
}
@@ -457,7 +527,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
u32 txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
+ size_t tfd_sz = trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX;
size_t tb0_buf_sz;
int i;
@@ -468,7 +538,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
(unsigned long)txq);
txq->trans_pcie = trans_pcie;
- txq->q.n_window = slots_num;
+ txq->n_window = slots_num;
txq->entries = kcalloc(slots_num,
sizeof(struct iwl_pcie_txq_entry),
@@ -489,7 +559,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
/* Circular buffer of transmit frame descriptors (TFDs),
* shared with device */
txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
- &txq->q.dma_addr, GFP_KERNEL);
+ &txq->dma_addr, GFP_KERNEL);
if (!txq->tfds)
goto error;
@@ -503,11 +573,11 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
if (!txq->first_tb_bufs)
goto err_free_tfds;
- txq->q.id = txq_id;
+ txq->id = txq_id;
return 0;
err_free_tfds:
- dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->q.dma_addr);
+ dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
error:
if (txq->entries && txq_id == trans_pcie->cmd_queue)
for (i = 0; i < slots_num; i++)
@@ -531,7 +601,7 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
/* Initialize queue's high/low-water marks, and head/tail indexes */
- ret = iwl_queue_init(&txq->q, slots_num, txq_id);
+ ret = iwl_queue_init(txq, slots_num, txq_id);
if (ret)
return ret;
@@ -545,10 +615,10 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
if (trans->cfg->use_tfh)
iwl_write_direct64(trans,
FH_MEM_CBBC_QUEUE(trans, txq_id),
- txq->q.dma_addr);
+ txq->dma_addr);
else
iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
- txq->q.dma_addr >> 8);
+ txq->dma_addr >> 8);
return 0;
}
@@ -595,15 +665,14 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
- struct iwl_queue *q = &txq->q;
spin_lock_bh(&txq->lock);
- while (q->write_ptr != q->read_ptr) {
+ while (txq->write_ptr != txq->read_ptr) {
IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
- txq_id, q->read_ptr);
+ txq_id, txq->read_ptr);
if (txq_id != trans_pcie->cmd_queue) {
- struct sk_buff *skb = txq->entries[q->read_ptr].skb;
+ struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
if (WARN_ON_ONCE(!skb))
continue;
@@ -611,15 +680,15 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
iwl_pcie_free_tso_page(trans_pcie, skb);
}
iwl_pcie_txq_free_tfd(trans, txq);
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr);
+ txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr);
- if (q->read_ptr == q->write_ptr) {
+ if (txq->read_ptr == txq->write_ptr) {
unsigned long flags;
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
if (txq_id != trans_pcie->cmd_queue) {
IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n",
- q->id);
+ txq->id);
iwl_trans_unref(trans);
} else {
iwl_pcie_clear_cmd_in_flight(trans);
@@ -663,7 +732,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
/* De-alloc array of command/tx buffers */
if (txq_id == trans_pcie->cmd_queue)
- for (i = 0; i < txq->q.n_window; i++) {
+ for (i = 0; i < txq->n_window; i++) {
kzfree(txq->entries[i].cmd);
kzfree(txq->entries[i].free_buf);
}
@@ -671,13 +740,13 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
/* De-alloc circular buffer of TFDs */
if (txq->tfds) {
dma_free_coherent(dev,
- sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX,
- txq->tfds, txq->q.dma_addr);
- txq->q.dma_addr = 0;
+ trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX,
+ txq->tfds, txq->dma_addr);
+ txq->dma_addr = 0;
txq->tfds = NULL;
dma_free_coherent(dev,
- sizeof(*txq->first_tb_bufs) * txq->q.n_window,
+ sizeof(*txq->first_tb_bufs) * txq->n_window,
txq->first_tb_bufs, txq->first_tb_dma);
}
@@ -703,6 +772,9 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+ if (trans->cfg->use_tfh)
+ return;
+
trans_pcie->scd_base_addr =
iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
@@ -758,14 +830,14 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
if (trans->cfg->use_tfh)
iwl_write_direct64(trans,
FH_MEM_CBBC_QUEUE(trans, txq_id),
- txq->q.dma_addr);
+ txq->dma_addr);
else
iwl_write_direct32(trans,
FH_MEM_CBBC_QUEUE(trans, txq_id),
- txq->q.dma_addr >> 8);
+ txq->dma_addr >> 8);
iwl_pcie_txq_unmap(trans, txq_id);
- txq->q.read_ptr = 0;
- txq->q.write_ptr = 0;
+ txq->read_ptr = 0;
+ txq->write_ptr = 0;
}
/* Tell NIC where to find the "keep warm" buffer */
@@ -970,11 +1042,13 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
}
}
- if (trans->cfg->use_tfh)
+ if (trans->cfg->use_tfh) {
iwl_write_direct32(trans, TFH_TRANSFER_MODE,
TFH_TRANSFER_MAX_PENDING_REQ |
TFH_CHUNK_SIZE_128 |
TFH_CHUNK_SPLIT_MODE);
+ return 0;
+ }
iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
if (trans->cfg->base_params->num_of_queues > 20)
@@ -1007,7 +1081,7 @@ static inline void iwl_pcie_txq_progress(struct iwl_txq *txq)
* if empty delete timer, otherwise move timer forward
* since we're making progress on this queue
*/
- if (txq->q.read_ptr == txq->q.write_ptr)
+ if (txq->read_ptr == txq->write_ptr)
del_timer(&txq->stuck_timer);
else
mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
@@ -1020,7 +1094,6 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1);
- struct iwl_queue *q = &txq->q;
int last_to_free;
/* This function is not meant to release cmd queue*/
@@ -1035,21 +1108,21 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
goto out;
}
- if (txq->q.read_ptr == tfd_num)
+ if (txq->read_ptr == tfd_num)
goto out;
IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
- txq_id, txq->q.read_ptr, tfd_num, ssn);
+ txq_id, txq->read_ptr, tfd_num, ssn);
/*Since we free until index _not_ inclusive, the one before index is
* the last we will free. This one must be used */
last_to_free = iwl_queue_dec_wrap(tfd_num);
- if (!iwl_queue_used(q, last_to_free)) {
+ if (!iwl_queue_used(txq, last_to_free)) {
IWL_ERR(trans,
"%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
__func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX,
- q->write_ptr, q->read_ptr);
+ txq->write_ptr, txq->read_ptr);
goto out;
}
@@ -1057,9 +1130,9 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
goto out;
for (;
- q->read_ptr != tfd_num;
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
- struct sk_buff *skb = txq->entries[txq->q.read_ptr].skb;
+ txq->read_ptr != tfd_num;
+ txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) {
+ struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
if (WARN_ON_ONCE(!skb))
continue;
@@ -1068,16 +1141,17 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
__skb_queue_tail(skbs, skb);
- txq->entries[txq->q.read_ptr].skb = NULL;
+ txq->entries[txq->read_ptr].skb = NULL;
- iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
+ if (!trans->cfg->use_tfh)
+ iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
iwl_pcie_txq_free_tfd(trans, txq);
}
iwl_pcie_txq_progress(txq);
- if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
+ if (iwl_queue_space(txq) > txq->low_mark &&
test_bit(txq_id, trans_pcie->queue_stopped)) {
struct sk_buff_head overflow_skbs;
@@ -1109,12 +1183,12 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
}
spin_lock_bh(&txq->lock);
- if (iwl_queue_space(&txq->q) > txq->q.low_mark)
+ if (iwl_queue_space(txq) > txq->low_mark)
iwl_wake_queue(trans, txq);
}
- if (q->read_ptr == q->write_ptr) {
- IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", q->id);
+ if (txq->read_ptr == txq->write_ptr) {
+ IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", txq->id);
iwl_trans_unref(trans);
}
@@ -1176,31 +1250,30 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
- struct iwl_queue *q = &txq->q;
unsigned long flags;
int nfreed = 0;
lockdep_assert_held(&txq->lock);
- if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(q, idx))) {
+ if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(txq, idx))) {
IWL_ERR(trans,
"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
__func__, txq_id, idx, TFD_QUEUE_SIZE_MAX,
- q->write_ptr, q->read_ptr);
+ txq->write_ptr, txq->read_ptr);
return;
}
- for (idx = iwl_queue_inc_wrap(idx); q->read_ptr != idx;
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
+ for (idx = iwl_queue_inc_wrap(idx); txq->read_ptr != idx;
+ txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) {
if (nfreed++ > 0) {
IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
- idx, q->write_ptr, q->read_ptr);
+ idx, txq->write_ptr, txq->read_ptr);
iwl_force_nmi(trans);
}
}
- if (q->read_ptr == q->write_ptr) {
+ if (txq->read_ptr == txq->write_ptr) {
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
iwl_pcie_clear_cmd_in_flight(trans);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
@@ -1249,6 +1322,9 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
if (test_and_set_bit(txq_id, trans_pcie->queue_used))
WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
+ if (cfg && trans->cfg->use_tfh)
+ WARN_ONCE(1, "Expected no calls to SCD configuration");
+
txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
if (cfg) {
@@ -1283,14 +1359,14 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
*/
iwl_scd_txq_disable_agg(trans, txq_id);
- ssn = txq->q.read_ptr;
+ ssn = txq->read_ptr;
}
}
/* Place first TFD at index corresponding to start sequence number.
* Assumes that ssn_idx is valid (!= 0xFFF) */
- txq->q.read_ptr = (ssn & 0xff);
- txq->q.write_ptr = (ssn & 0xff);
+ txq->read_ptr = (ssn & 0xff);
+ txq->write_ptr = (ssn & 0xff);
iwl_write_direct32(trans, HBUS_TARG_WRPTR,
(ssn & 0xff) | (txq_id << 8));
@@ -1343,6 +1419,14 @@ void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
txq->ampdu = !shared_mode;
}
+dma_addr_t iwl_trans_pcie_get_txq_byte_table(struct iwl_trans *trans, int txq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ return trans_pcie->scd_bc_tbls.dma +
+ txq * sizeof(struct iwlagn_scd_bc_tbl);
+}
+
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
bool configure_scd)
{
@@ -1366,6 +1450,9 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
return;
}
+ if (configure_scd && trans->cfg->use_tfh)
+ WARN_ONCE(1, "Expected no calls to SCD configuration");
+
if (configure_scd) {
iwl_scd_txq_set_inactive(trans, txq_id);
@@ -1395,7 +1482,6 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
- struct iwl_queue *q = &txq->q;
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
unsigned long flags;
@@ -1410,7 +1496,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
- if (WARN(!trans_pcie->wide_cmd_header &&
+ if (WARN(!trans->wide_cmd_header &&
group_id > IWL_ALWAYS_LONG_GROUP,
"unsupported wide command %#x\n", cmd->id))
return -EINVAL;
@@ -1494,7 +1580,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
spin_lock_bh(&txq->lock);
- if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+ if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
spin_unlock_bh(&txq->lock);
IWL_ERR(trans, "No space in command queue\n");
@@ -1503,7 +1589,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
goto free_dup_buf;
}
- idx = get_cmd_index(q, q->write_ptr);
+ idx = get_cmd_index(txq, txq->write_ptr);
out_cmd = txq->entries[idx].cmd;
out_meta = &txq->entries[idx].meta;
@@ -1522,7 +1608,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
out_cmd->hdr_wide.reserved = 0;
out_cmd->hdr_wide.sequence =
cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
- INDEX_TO_SEQ(q->write_ptr));
+ INDEX_TO_SEQ(txq->write_ptr));
cmd_pos = sizeof(struct iwl_cmd_header_wide);
copy_size = sizeof(struct iwl_cmd_header_wide);
@@ -1530,7 +1616,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
out_cmd->hdr.sequence =
cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
- INDEX_TO_SEQ(q->write_ptr));
+ INDEX_TO_SEQ(txq->write_ptr));
out_cmd->hdr.group_id = 0;
cmd_pos = sizeof(struct iwl_cmd_header);
@@ -1580,7 +1666,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
iwl_get_cmd_string(trans, cmd->id),
group_id, out_cmd->hdr.cmd,
le16_to_cpu(out_cmd->hdr.sequence),
- cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
+ cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue);
/* start the TFD with the minimum copy bytes */
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
@@ -1596,8 +1682,8 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
copy_size - tb0_size,
DMA_TO_DEVICE);
if (dma_mapping_error(trans->dev, phys_addr)) {
- iwl_pcie_tfd_unmap(trans, out_meta,
- &txq->tfds[q->write_ptr]);
+ iwl_pcie_tfd_unmap(trans, out_meta, txq,
+ txq->write_ptr);
idx = -ENOMEM;
goto out;
}
@@ -1620,8 +1706,8 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
phys_addr = dma_map_single(trans->dev, (void *)data,
cmdlen[i], DMA_TO_DEVICE);
if (dma_mapping_error(trans->dev, phys_addr)) {
- iwl_pcie_tfd_unmap(trans, out_meta,
- &txq->tfds[q->write_ptr]);
+ iwl_pcie_tfd_unmap(trans, out_meta, txq,
+ txq->write_ptr);
idx = -ENOMEM;
goto out;
}
@@ -1629,8 +1715,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
}
- BUILD_BUG_ON(IWL_NUM_OF_TBS + CMD_TB_BITMAP_POS >
- sizeof(out_meta->flags) * BITS_PER_BYTE);
+ BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
out_meta->flags = cmd->flags;
if (WARN_ON_ONCE(txq->entries[idx].free_buf))
kzfree(txq->entries[idx].free_buf);
@@ -1639,7 +1724,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
/* start timer if queue currently empty */
- if (q->read_ptr == q->write_ptr && txq->wd_timeout)
+ if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
@@ -1651,7 +1736,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
}
/* Increment and update queue's write index */
- q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
+ txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
iwl_pcie_txq_inc_wr_ptr(trans, txq);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
@@ -1689,20 +1774,20 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
if (WARN(txq_id != trans_pcie->cmd_queue,
"wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
txq_id, trans_pcie->cmd_queue, sequence,
- trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
- trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
+ trans_pcie->txq[trans_pcie->cmd_queue].read_ptr,
+ trans_pcie->txq[trans_pcie->cmd_queue].write_ptr)) {
iwl_print_hex_error(trans, pkt, 32);
return;
}
spin_lock_bh(&txq->lock);
- cmd_index = get_cmd_index(&txq->q, index);
+ cmd_index = get_cmd_index(txq, index);
cmd = txq->entries[cmd_index].cmd;
meta = &txq->entries[cmd_index].meta;
cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0);
- iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]);
+ iwl_pcie_tfd_unmap(trans, meta, txq, index);
/* Input error checking is done when commands are added to queue. */
if (meta->flags & CMD_WANT_SKB) {
@@ -1815,14 +1900,13 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
HOST_COMPLETE_TIMEOUT);
if (!ret) {
struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
- struct iwl_queue *q = &txq->q;
IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
iwl_get_cmd_string(trans, cmd->id),
jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
- q->read_ptr, q->write_ptr);
+ txq->read_ptr, txq->write_ptr);
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
@@ -1900,7 +1984,7 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_cmd_meta *out_meta,
struct iwl_device_cmd *dev_cmd, u16 tb1_len)
{
- struct iwl_queue *q = &txq->q;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u16 tb2_len;
int i;
@@ -1915,8 +1999,8 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
skb->data + hdr_len,
tb2_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) {
- iwl_pcie_tfd_unmap(trans, out_meta,
- &txq->tfds[q->write_ptr]);
+ iwl_pcie_tfd_unmap(trans, out_meta, txq,
+ txq->write_ptr);
return -EINVAL;
}
iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
@@ -1935,19 +2019,19 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
skb_frag_size(frag), DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
- iwl_pcie_tfd_unmap(trans, out_meta,
- &txq->tfds[q->write_ptr]);
+ iwl_pcie_tfd_unmap(trans, out_meta, txq,
+ txq->write_ptr);
return -EINVAL;
}
tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
skb_frag_size(frag), false);
- out_meta->flags |= BIT(tb_idx + CMD_TB_BITMAP_POS);
+ out_meta->tbs |= BIT(tb_idx);
}
trace_iwlwifi_dev_tx(trans->dev, skb,
- &txq->tfds[txq->q.write_ptr],
- sizeof(struct iwl_tfd),
+ iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr),
+ trans_pcie->tfd_size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
skb->data + hdr_len, tb2_len);
trace_iwlwifi_dev_tx_data(trans->dev, skb,
@@ -2008,7 +2092,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
unsigned int mss = skb_shinfo(skb)->gso_size;
- struct iwl_queue *q = &txq->q;
u16 length, iv_len, amsdu_pad;
u8 *start_hdr;
struct iwl_tso_hdr_page *hdr_page;
@@ -2022,8 +2105,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
IEEE80211_CCMP_HDR_LEN : 0;
trace_iwlwifi_dev_tx(trans->dev, skb,
- &txq->tfds[txq->q.write_ptr],
- sizeof(struct iwl_tfd),
+ iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr),
+ trans_pcie->tfd_size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
NULL, 0);
@@ -2179,7 +2262,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
return 0;
out_unmap:
- iwl_pcie_tfd_unmap(trans, out_meta, &txq->tfds[q->write_ptr]);
+ iwl_pcie_tfd_unmap(trans, out_meta, txq, txq->write_ptr);
return ret;
}
#else /* CONFIG_INET */
@@ -2203,9 +2286,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
struct iwl_cmd_meta *out_meta;
struct iwl_txq *txq;
- struct iwl_queue *q;
dma_addr_t tb0_phys, tb1_phys, scratch_phys;
void *tb1_addr;
+ void *tfd;
u16 len, tb1_len;
bool wait_write_ptr;
__le16 fc;
@@ -2214,7 +2297,6 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
bool amsdu;
txq = &trans_pcie->txq[txq_id];
- q = &txq->q;
if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
"TX on unused queue %d\n", txq_id))
@@ -2236,7 +2318,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
}
if (skb_is_nonlinear(skb) &&
- skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS &&
+ skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) &&
__skb_linearize(skb))
return -ENOMEM;
@@ -2249,11 +2331,11 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
spin_lock(&txq->lock);
- if (iwl_queue_space(q) < q->high_mark) {
+ if (iwl_queue_space(txq) < txq->high_mark) {
iwl_stop_queue(trans, txq);
/* don't put the packet on the ring, if there is no room */
- if (unlikely(iwl_queue_space(q) < 3)) {
+ if (unlikely(iwl_queue_space(txq) < 3)) {
struct iwl_device_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
@@ -2274,19 +2356,19 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
*/
wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
WARN_ONCE(txq->ampdu &&
- (wifi_seq & 0xff) != q->write_ptr,
+ (wifi_seq & 0xff) != txq->write_ptr,
"Q: %d WiFi Seq %d tfdNum %d",
- txq_id, wifi_seq, q->write_ptr);
+ txq_id, wifi_seq, txq->write_ptr);
/* Set up driver data for this TFD */
- txq->entries[q->write_ptr].skb = skb;
- txq->entries[q->write_ptr].cmd = dev_cmd;
+ txq->entries[txq->write_ptr].skb = skb;
+ txq->entries[txq->write_ptr].cmd = dev_cmd;
dev_cmd->hdr.sequence =
cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
- INDEX_TO_SEQ(q->write_ptr)));
+ INDEX_TO_SEQ(txq->write_ptr)));
- tb0_phys = iwl_pcie_get_first_tb_dma(txq, q->write_ptr);
+ tb0_phys = iwl_pcie_get_first_tb_dma(txq, txq->write_ptr);
scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
offsetof(struct iwl_tx_cmd, scratch);
@@ -2294,7 +2376,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
/* Set up first empty entry in queue's array of Tx/cmd buffers */
- out_meta = &txq->entries[q->write_ptr].meta;
+ out_meta = &txq->entries[txq->write_ptr].meta;
out_meta->flags = 0;
/*
@@ -2319,7 +2401,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
}
/* The first TB points to bi-directional DMA data */
- memcpy(&txq->first_tb_bufs[q->write_ptr], &dev_cmd->hdr,
+ memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
IWL_FIRST_TB_SIZE);
iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
IWL_FIRST_TB_SIZE, true);
@@ -2344,13 +2426,15 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
goto out_err;
}
+ tfd = iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
/* Set up entry for this TFD in Tx byte-count array */
- iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
+ iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
+ iwl_pcie_tfd_get_num_tbs(trans, tfd));
wait_write_ptr = ieee80211_has_morefrags(fc);
/* start timer if queue currently empty */
- if (q->read_ptr == q->write_ptr) {
+ if (txq->read_ptr == txq->write_ptr) {
if (txq->wd_timeout) {
/*
* If the TXQ is active, then set the timer, if not,
@@ -2364,12 +2448,12 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
else
txq->frozen_expiry_remainder = txq->wd_timeout;
}
- IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id);
+ IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id);
iwl_trans_ref(trans);
}
/* Tell device the write index *just past* this latest filled TFD */
- q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
+ txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
if (!wait_write_ptr)
iwl_pcie_txq_inc_wr_ptr(trans, txq);