diff options
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/pcie')
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 3 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/pcie/internal.h | 59 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 12 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 45 |
4 files changed, 86 insertions, 33 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index bc83d2ba55c6..e8687683ff29 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -1196,6 +1196,9 @@ static int map_crf_id(struct iwl_trans *iwl_trans) case REG_CRF_ID_TYPE_FMR: iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_FM << 12); break; + case REG_CRF_ID_TYPE_WHP: + iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_WH << 12); + break; default: ret = -EIO; IWL_ERR(iwl_trans, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 0f6493dab8cb..56def20374f3 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -58,10 +58,7 @@ struct iwl_rx_mem_buffer { bool invalid; }; -/** - * struct isr_statistics - interrupt statistics - * - */ +/* interrupt statistics */ struct isr_statistics { u32 hw; u32 sw; @@ -127,6 +124,8 @@ struct iwl_rx_completion_desc_bz { * @used_bd_dma: physical address of buffer of used receive buffer descriptors (rbd) * @read: Shared index to newest available Rx buffer * @write: Shared index to oldest written Rx packet + * @write_actual: actual write pointer written to device, since we update in + * blocks of 8 only * @free_count: Number of pre-allocated buffers in rx_free * @used_count: Number of RBDs handled to allocator to use for allocation * @write_actual: @@ -135,10 +134,12 @@ struct iwl_rx_completion_desc_bz { * @need_update: flag to indicate we need to update read/write index * @rb_stts: driver's pointer to receive buffer status * @rb_stts_dma: bus address of receive buffer status - * @lock: + * @lock: per-queue lock * @queue: actual rx queue. Not used for multi-rx queue. * @next_rb_is_fragment: indicates that the previous RB that we handled set * the fragmented flag, so the next one is still another fragment + * @napi: NAPI struct for this queue + * @queue_size: size of this queue * * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers */ @@ -188,19 +189,20 @@ struct iwl_rb_allocator { /** * iwl_get_closed_rb_stts - get closed rb stts from different structs - * @rxq - the rxq to get the rb stts from + * @trans: transport pointer (for configuration) + * @rxq: the rxq to get the rb stts from */ -static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans, - struct iwl_rxq *rxq) +static inline u16 iwl_get_closed_rb_stts(struct iwl_trans *trans, + struct iwl_rxq *rxq) { if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { __le16 *rb_stts = rxq->rb_stts; - return READ_ONCE(*rb_stts); + return le16_to_cpu(READ_ONCE(*rb_stts)); } else { struct iwl_rb_status *rb_stts = rxq->rb_stts; - return READ_ONCE(rb_stts->closed_rb_num); + return le16_to_cpu(READ_ONCE(rb_stts->closed_rb_num)) & 0xFFF; } } @@ -243,6 +245,7 @@ enum iwl_image_response_code { IWL_IMAGE_RESP_FAIL = 2, }; +#ifdef CONFIG_IWLWIFI_DEBUGFS /** * struct cont_rec: continuous recording data structure * @prev_wr_ptr: the last address that was read in monitor_data @@ -253,7 +256,6 @@ enum iwl_image_response_code { * in &iwl_fw_mon_dbgfs_state enum * @mutex: locked while reading from monitor_data debugfs file */ -#ifdef CONFIG_IWLWIFI_DEBUGFS struct cont_rec { u32 prev_wr_ptr; u32 prev_wrap_cnt; @@ -298,10 +300,6 @@ enum iwl_pcie_imr_status { * @prph_info_dma_addr: dma addr of prph info * @prph_scratch_dma_addr: dma addr of prph scratch * @ctxt_info_dma_addr: dma addr of context information - * @init_dram: DRAM data of firmware image (including paging). - * Context information addresses will be taken from here. - * This is driver's local copy for keeping track of size and - * count for allocating and freeing the memory. * @iml: image loader image virtual address * @iml_dma_addr: image loader image DMA address * @trans: pointer to the generic transport area @@ -321,10 +319,9 @@ enum iwl_pcie_imr_status { * @rx_buf_bytes: RX buffer (RB) size in bytes * @reg_lock: protect hw register access * @mutex: to protect stop_device / start_fw / start_hw - * @cmd_in_flight: true when we have a host command in flight -#ifdef CONFIG_IWLWIFI_DEBUGFS * @fw_mon_data: fw continuous recording data -#endif + * @cmd_hold_nic_awake: indicates NIC is held awake for APMG workaround + * during commands in flight * @msix_entries: array of MSI-X entries * @msix_enabled: true if managed to enable MSI-X * @shared_vec_mask: the type of causes the shared vector handles @@ -344,8 +341,32 @@ enum iwl_pcie_imr_status { * @alloc_page: allocated page to still use parts of * @alloc_page_used: how much of the allocated page was already used (bytes) * @imr_status: imr dma state machine - * @wait_queue_head_t: imr wait queue for dma completion + * @imr_waitq: imr wait queue for dma completion * @rf_name: name/version of the CRF, if any + * @use_ict: whether or not ICT (interrupt table) is used + * @ict_index: current ICT read index + * @ict_tbl: ICT table pointer + * @ict_tbl_dma: ICT table DMA address + * @inta_mask: interrupt (INT-A) mask + * @irq_lock: lock to synchronize IRQ handling + * @txq_memory: TXQ allocation array + * @sx_waitq: waitqueue for Sx transitions + * @sx_complete: completion for Sx transitions + * @pcie_dbg_dumped_once: indicates PCIe regs were dumped already + * @opmode_down: indicates opmode went away + * @num_rx_bufs: number of RX buffers to allocate/use + * @no_reclaim_cmds: special commands not using reclaim flow + * (firmware workaround) + * @n_no_reclaim_cmds: number of special commands not using reclaim flow + * @affinity_mask: IRQ affinity mask for each RX queue + * @debug_rfkill: RF-kill debugging state, -1 for unset, 0/1 for radio + * enable/disable + * @fw_reset_handshake: indicates FW reset handshake is needed + * @fw_reset_state: state of FW reset handshake + * @fw_reset_waitq: waitqueue for FW reset handshake + * @is_down: indicates the NIC is down + * @isr_stats: interrupt statistics + * @napi_dev: (fake) netdev for NAPI registration */ struct iwl_trans_pcie { struct iwl_rxq *rxq; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 4614acee9f7b..146bc7bd14fb 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -1510,7 +1510,7 @@ restart: spin_lock(&rxq->lock); /* uCode's read index (stored in shared DRAM) indicates the last Rx * buffer that the driver may process (last buffer filled by ucode). */ - r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF; + r = iwl_get_closed_rb_stts(trans, rxq); i = rxq->read; /* W/A 9000 device step A0 wrap-around bug */ @@ -1660,9 +1660,7 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id) IWL_DEBUG_ISR(trans, "[%d] Got interrupt\n", entry->entry); local_bh_disable(); - if (napi_schedule_prep(&rxq->napi)) - __napi_schedule(&rxq->napi); - else + if (!napi_schedule(&rxq->napi)) iwl_pcie_clear_irq(trans, entry->entry); local_bh_enable(); @@ -2291,6 +2289,12 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) else sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR; + if (inta_hw & MSIX_HW_INT_CAUSES_REG_TOP_FATAL_ERR) { + IWL_ERR(trans, "TOP Fatal error detected, inta_hw=0x%x.\n", + inta_hw); + /* TODO: PLDR flow required here for >= Bz */ + } + /* Error detected by uCode */ if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || sw_err) { IWL_ERR(trans, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 198933f853c5..385e152f04fe 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1111,6 +1111,7 @@ static const struct iwl_causes_list causes_list_common[] = { IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_ALIVE), IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_WAKEUP), IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RESET_DONE), + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_TOP_FATAL_ERR), IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_CT_KILL), IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RF_KILL), IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_PERIODIC), @@ -2112,8 +2113,11 @@ static void iwl_trans_pcie_removal_wk(struct work_struct *wk) pci_lock_rescan_remove(); pci_dev_put(pdev); pci_stop_and_remove_bus_device(pdev); - if (removal->rescan) - pci_rescan_bus(bus->parent); + if (removal->rescan && bus) { + if (bus->parent) + bus = bus->parent; + pci_rescan_bus(bus); + } pci_unlock_rescan_remove(); kfree(removal); @@ -2173,6 +2177,9 @@ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP; u32 poll = CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN; + if (test_bit(STATUS_TRANS_DEAD, &trans->status)) + return false; + spin_lock(&trans_pcie->reg_lock); if (trans_pcie->cmd_hold_nic_awake) @@ -2285,6 +2292,8 @@ out: static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, void *buf, int dwords) { +#define IWL_MAX_HW_ERRS 5 + unsigned int num_consec_hw_errors = 0; int offs = 0; u32 *vals = buf; @@ -2300,6 +2309,17 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, while (offs < dwords) { vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT); + + if (iwl_trans_is_hw_error_value(vals[offs])) + num_consec_hw_errors++; + else + num_consec_hw_errors = 0; + + if (num_consec_hw_errors >= IWL_MAX_HW_ERRS) { + iwl_trans_release_nic_access(trans); + return -EIO; + } + offs++; if (time_after(jiffies, end)) { @@ -2712,11 +2732,9 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n", rxq->free_count); if (rxq->rb_stts) { - u32 r = __le16_to_cpu(iwl_get_closed_rb_stts(trans, - rxq)); + u32 r = iwl_get_closed_rb_stts(trans, rxq); pos += scnprintf(buf + pos, bufsz - pos, - "\tclosed_rb_num: %u\n", - r & 0x0FFF); + "\tclosed_rb_num: %u\n", r); } else { pos += scnprintf(buf + pos, bufsz - pos, "\tclosed_rb_num: Not Allocated\n"); @@ -3089,7 +3107,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, spin_lock(&rxq->lock); - r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF; + r = iwl_get_closed_rb_stts(trans, rxq); for (i = rxq->read, j = 0; i != r && j < allocated_rb_nums; @@ -3385,9 +3403,7 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans, /* Dump RBs is supported only for pre-9000 devices (1 queue) */ struct iwl_rxq *rxq = &trans_pcie->rxq[0]; /* RBs */ - num_rbs = - le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) - & 0x0FFF; + num_rbs = iwl_get_closed_rb_stts(trans, rxq); num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK; len += num_rbs * (sizeof(*data) + sizeof(struct iwl_fw_error_dump_rb) + @@ -3597,10 +3613,19 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, int ret, addr_size; const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2; void __iomem * const *table; + u32 bar0; if (!cfg_trans->gen2) ops = &trans_ops_pcie; + /* reassign our BAR 0 if invalid due to possible runtime PM races */ + pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &bar0); + if (bar0 == PCI_BASE_ADDRESS_MEM_TYPE_64) { + ret = pci_assign_resource(pdev, 0); + if (ret) + return ERR_PTR(ret); + } + ret = pcim_enable_device(pdev); if (ret) return ERR_PTR(ret); |