diff options
author | Jiri Kosina <jkosina@suse.cz> | 2017-05-02 11:02:41 +0200 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2017-05-02 11:02:41 +0200 |
commit | 4d6ca227c768b50b05cf183974b40abe444e9d0c (patch) | |
tree | bf953d8e895281053548b9967a2c4b58d641df00 /drivers/net/wireless/intel/iwlwifi/mvm | |
parent | 800f3eef8ebc1264e9c135bfa892c8ae41fa4792 (diff) | |
parent | af22a610bc38508d5ea760507d31be6b6983dfa8 (diff) |
Merge branch 'for-4.12/asus' into for-linus
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/mvm')
22 files changed, 1162 insertions, 749 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index b88e2048ae0b..c7eb1983c4f9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -91,7 +91,7 @@ void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw, memcpy(mvmvif->rekey_data.kek, data->kek, NL80211_KEK_LEN); memcpy(mvmvif->rekey_data.kck, data->kck, NL80211_KCK_LEN); mvmvif->rekey_data.replay_ctr = - cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr)); + cpu_to_le64(be64_to_cpup((__be64 *)data->replay_ctr)); mvmvif->rekey_data.valid = true; mutex_unlock(&mvm->mutex); @@ -1262,12 +1262,15 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, iwl_trans_d3_suspend(mvm->trans, test, !unified_image); out: if (ret < 0) { - iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); - if (mvm->restart_fw > 0) { - mvm->restart_fw--; - ieee80211_restart_hw(mvm->hw); - } iwl_mvm_free_nd(mvm); + + if (!unified_image) { + iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); + if (mvm->restart_fw > 0) { + mvm->restart_fw--; + ieee80211_restart_hw(mvm->hw); + } + } } out_noreset: mutex_unlock(&mvm->mutex); @@ -1738,7 +1741,7 @@ out: static struct iwl_wowlan_status * iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { - u32 base = mvm->error_event_table; + u32 base = mvm->error_event_table[0]; struct error_table_start { /* cf. struct iwl_error_event_table */ u32 valid; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 7b7d2a146e30..a260cd503200 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -798,7 +798,7 @@ static ssize_t iwl_dbgfs_drv_rx_stats_read(struct file *file, static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { - int ret; + int __maybe_unused ret; mutex_lock(&mvm->mutex); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h index 0246506ab595..480a54af4534 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h @@ -64,13 +64,14 @@ #define __fw_api_mac_h__ /* - * The first MAC indices (starting from 0) - * are available to the driver, AUX follows + * The first MAC indices (starting from 0) are available to the driver, + * AUX indices follows - 1 for non-CDB, 2 for CDB. */ #define MAC_INDEX_AUX 4 #define MAC_INDEX_MIN_DRIVER 0 #define NUM_MAC_INDEX_DRIVER MAC_INDEX_AUX -#define NUM_MAC_INDEX (MAC_INDEX_AUX + 1) +#define NUM_MAC_INDEX (NUM_MAC_INDEX_DRIVER + 1) +#define NUM_MAC_INDEX_CDB (NUM_MAC_INDEX_DRIVER + 2) #define IWL_MVM_STATION_COUNT 16 #define IWL_MVM_TDLS_STA_COUNT 4 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h index 0c294c9f98e9..c78a0c499459 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h @@ -453,6 +453,8 @@ enum scan_config_flags { SCAN_CONFIG_FLAG_CLEAR_CAM_MODE = BIT(19), SCAN_CONFIG_FLAG_SET_PROMISC_MODE = BIT(20), SCAN_CONFIG_FLAG_CLEAR_PROMISC_MODE = BIT(21), + SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED = BIT(22), + SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED = BIT(23), /* Bits 26-31 are for num of channels in channel_array */ #define SCAN_CONFIG_N_CHANNELS(n) ((n) << 26) @@ -486,6 +488,20 @@ enum iwl_channel_flags { }; /** + * struct iwl_scan_dwell + * @active: default dwell time for active scan + * @passive: default dwell time for passive scan + * @fragmented: default dwell time for fragmented scan + * @extended: default dwell time for channels 1, 6 and 11 + */ +struct iwl_scan_dwell { + u8 active; + u8 passive; + u8 fragmented; + u8 extended; +} __packed; + +/** * struct iwl_scan_config * @flags: enum scan_config_flags * @tx_chains: valid_tx antenna - ANT_* definitions @@ -493,10 +509,7 @@ enum iwl_channel_flags { * @legacy_rates: default legacy rates - enum scan_config_rates * @out_of_channel_time: default max out of serving channel time * @suspend_time: default max suspend time - * @dwell_active: default dwell time for active scan - * @dwell_passive: default dwell time for passive scan - * @dwell_fragmented: default dwell time for fragmented scan - * @dwell_extended: default dwell time for channels 1, 6 and 11 + * @dwell: dwells for the scan * @mac_addr: default mac address to be used in probes * @bcast_sta_id: the index of the station in the fw * @channel_flags: default channel flags - enum iwl_channel_flags @@ -510,16 +523,29 @@ struct iwl_scan_config { __le32 legacy_rates; __le32 out_of_channel_time; __le32 suspend_time; - u8 dwell_active; - u8 dwell_passive; - u8 dwell_fragmented; - u8 dwell_extended; + struct iwl_scan_dwell dwell; u8 mac_addr[ETH_ALEN]; u8 bcast_sta_id; u8 channel_flags; u8 channel_array[]; } __packed; /* SCAN_CONFIG_DB_CMD_API_S */ +#define SCAN_TWO_LMACS 2 + +struct iwl_scan_config_cdb { + __le32 flags; + __le32 tx_chains; + __le32 rx_chains; + __le32 legacy_rates; + __le32 out_of_channel_time[SCAN_TWO_LMACS]; + __le32 suspend_time[SCAN_TWO_LMACS]; + struct iwl_scan_dwell dwell; + u8 mac_addr[ETH_ALEN]; + u8 bcast_sta_id; + u8 channel_flags; + u8 channel_array[]; +} __packed; /* SCAN_CONFIG_DB_CMD_API_S_3 */ + /** * iwl_umac_scan_flags *@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request @@ -540,17 +566,18 @@ enum iwl_umac_scan_uid_offsets { }; enum iwl_umac_scan_general_flags { - IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC = BIT(0), - IWL_UMAC_SCAN_GEN_FLAGS_OVER_BT = BIT(1), - IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL = BIT(2), - IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE = BIT(3), - IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT = BIT(4), - IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE = BIT(5), - IWL_UMAC_SCAN_GEN_FLAGS_MULTIPLE_SSID = BIT(6), - IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED = BIT(7), - IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED = BIT(8), - IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9), - IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL = BIT(10), + IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC = BIT(0), + IWL_UMAC_SCAN_GEN_FLAGS_OVER_BT = BIT(1), + IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL = BIT(2), + IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE = BIT(3), + IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT = BIT(4), + IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE = BIT(5), + IWL_UMAC_SCAN_GEN_FLAGS_MULTIPLE_SSID = BIT(6), + IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED = BIT(7), + IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED = BIT(8), + IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9), + IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL = BIT(10), + IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED = BIT(11), }; /** @@ -610,8 +637,9 @@ struct iwl_scan_req_umac_tail { * @active_dwell: dwell time for active scan * @passive_dwell: dwell time for passive scan * @fragmented_dwell: dwell time for fragmented passive scan - * @max_out_time: max out of serving channel time - * @suspend_time: max suspend time + * @max_out_time: max out of serving channel time, per LMAC - for CDB there + * are 2 LMACs + * @suspend_time: max suspend time, per LMAC - for CDB there are 2 LMACs * @scan_priority: scan internal prioritization &enum iwl_scan_priority * @channel_flags: &enum iwl_scan_channel_flags * @n_channels: num of channels in scan request @@ -631,15 +659,33 @@ struct iwl_scan_req_umac { u8 active_dwell; u8 passive_dwell; u8 fragmented_dwell; - __le32 max_out_time; - __le32 suspend_time; - __le32 scan_priority; - /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */ - u8 channel_flags; - u8 n_channels; - __le16 reserved; - u8 data[]; -} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */ + union { + struct { + __le32 max_out_time; + __le32 suspend_time; + __le32 scan_priority; + /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */ + u8 channel_flags; + u8 n_channels; + __le16 reserved; + u8 data[]; + } no_cdb; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */ + struct { + __le32 max_out_time[SCAN_TWO_LMACS]; + __le32 suspend_time[SCAN_TWO_LMACS]; + __le32 scan_priority; + /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */ + u8 channel_flags; + u8 n_channels; + __le16 reserved; + u8 data[]; + } cdb; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_5 */ + }; +} __packed; + +#define IWL_SCAN_REQ_UMAC_SIZE_CDB sizeof(struct iwl_scan_req_umac) +#define IWL_SCAN_REQ_UMAC_SIZE (sizeof(struct iwl_scan_req_umac) - \ + 2 * sizeof(__le32)) /** * struct iwl_umac_scan_abort diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h index 4e638a44babb..6371c342b96d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h @@ -220,7 +220,7 @@ struct mvm_statistics_bt_activity { __le32 lo_priority_rx_denied_cnt; } __packed; /* STATISTICS_BT_ACTIVITY_API_S_VER_1 */ -struct mvm_statistics_general_v8 { +struct mvm_statistics_general_common { __le32 radio_temperature; __le32 radio_voltage; struct mvm_statistics_dbg dbg; @@ -248,11 +248,22 @@ struct mvm_statistics_general_v8 { __le64 on_time_rf; __le64 on_time_scan; __le64 tx_time; +} __packed; + +struct mvm_statistics_general_v8 { + struct mvm_statistics_general_common common; __le32 beacon_counter[NUM_MAC_INDEX]; u8 beacon_average_energy[NUM_MAC_INDEX]; u8 reserved[4 - (NUM_MAC_INDEX % 4)]; } __packed; /* STATISTICS_GENERAL_API_S_VER_8 */ +struct mvm_statistics_general_cdb { + struct mvm_statistics_general_common common; + __le32 beacon_counter[NUM_MAC_INDEX_CDB]; + u8 beacon_average_energy[NUM_MAC_INDEX_CDB]; + u8 reserved[4 - (NUM_MAC_INDEX_CDB % 4)]; +} __packed; /* STATISTICS_GENERAL_API_S_VER_9 */ + /** * struct mvm_statistics_load - RX statistics for multi-queue devices * @air_time: accumulated air time, per mac @@ -267,6 +278,13 @@ struct mvm_statistics_load { u8 avg_energy[IWL_MVM_STATION_COUNT]; } __packed; /* STATISTICS_RX_MAC_STATION_S_VER_1 */ +struct mvm_statistics_load_cdb { + __le32 air_time[NUM_MAC_INDEX_CDB]; + __le32 byte_count[NUM_MAC_INDEX_CDB]; + __le32 pkt_count[NUM_MAC_INDEX_CDB]; + u8 avg_energy[IWL_MVM_STATION_COUNT]; +} __packed; /* STATISTICS_RX_MAC_STATION_S_VER_2 */ + struct mvm_statistics_rx { struct mvm_statistics_rx_phy ofdm; struct mvm_statistics_rx_phy cck; @@ -281,6 +299,7 @@ struct mvm_statistics_rx { * while associated. To disable this behavior, set DISABLE_NOTIF flag in the * STATISTICS_CMD (0x9c), below. */ + struct iwl_notif_statistics_v10 { __le32 flag; struct mvm_statistics_rx rx; @@ -296,6 +315,14 @@ struct iwl_notif_statistics_v11 { struct mvm_statistics_load load_stats; } __packed; /* STATISTICS_NTFY_API_S_VER_11 */ +struct iwl_notif_statistics_cdb { + __le32 flag; + struct mvm_statistics_rx rx; + struct mvm_statistics_tx tx; + struct mvm_statistics_general_cdb general; + struct mvm_statistics_load_cdb load_stats; +} __packed; /* STATISTICS_NTFY_API_S_VER_12 */ + #define IWL_STATISTICS_FLG_CLEAR 0x1 #define IWL_STATISTICS_FLG_DISABLE_NOTIF 0x2 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h index 59ca97a11b2b..b38cc073adcc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h @@ -672,8 +672,7 @@ struct iwl_mac_beacon_cmd_v6 { } __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_6 */ /** - * struct iwl_mac_beacon_cmd - beacon template command with offloaded CSA - * @tx: the tx commands associated with the beacon frame + * struct iwl_mac_beacon_cmd_data - data of beacon template with offloaded CSA * @template_id: currently equal to the mac context id of the coresponding * mac. * @tim_idx: the offset of the tim IE in the beacon @@ -682,16 +681,38 @@ struct iwl_mac_beacon_cmd_v6 { * @csa_offset: offset to the CSA IE if present * @frame: the template of the beacon frame */ -struct iwl_mac_beacon_cmd { - struct iwl_tx_cmd tx; +struct iwl_mac_beacon_cmd_data { __le32 template_id; __le32 tim_idx; __le32 tim_size; __le32 ecsa_offset; __le32 csa_offset; struct ieee80211_hdr frame[0]; +}; + +/** + * struct iwl_mac_beacon_cmd_v7 - beacon template command with offloaded CSA + * @tx: the tx commands associated with the beacon frame + * @data: see &iwl_mac_beacon_cmd_data + */ +struct iwl_mac_beacon_cmd_v7 { + struct iwl_tx_cmd tx; + struct iwl_mac_beacon_cmd_data data; } __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_7 */ +/** + * struct iwl_mac_beacon_cmd - beacon template command with offloaded CSA + * @byte_cnt: byte count of the beacon frame + * @flags: for future use + * @data: see &iwl_mac_beacon_cmd_data + */ +struct iwl_mac_beacon_cmd { + __le16 byte_cnt; + __le16 flags; + __le64 reserved; + struct iwl_mac_beacon_cmd_data data; +} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_8 */ + struct iwl_beacon_notif { struct iwl_mvm_tx_resp beacon_notify_hdr; __le64 tsf; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index ae12badc0c2a..cf2b836f3888 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -341,6 +341,10 @@ enum iwl_prot_offload_subcmd_ids { STORED_BEACON_NTF = 0xFF, }; +enum iwl_regulatory_and_nvm_subcmd_ids { + NVM_ACCESS_COMPLETE = 0x0, +}; + enum iwl_fmac_debug_cmds { LMAC_RD_WR = 0x0, UMAC_RD_WR = 0x1, @@ -355,6 +359,7 @@ enum { PHY_OPS_GROUP = 0x4, DATA_PATH_GROUP = 0x5, PROT_OFFLOAD_GROUP = 0xb, + REGULATORY_AND_NVM_GROUP = 0xc, DEBUG_GROUP = 0xf, }; @@ -593,60 +598,7 @@ enum { #define IWL_ALIVE_FLG_RFKILL BIT(0) -struct mvm_alive_resp_ver1 { - __le16 status; - __le16 flags; - u8 ucode_minor; - u8 ucode_major; - __le16 id; - u8 api_minor; - u8 api_major; - u8 ver_subtype; - u8 ver_type; - u8 mac; - u8 opt; - __le16 reserved2; - __le32 timestamp; - __le32 error_event_table_ptr; /* SRAM address for error log */ - __le32 log_event_table_ptr; /* SRAM address for event log */ - __le32 cpu_register_ptr; - __le32 dbgm_config_ptr; - __le32 alive_counter_ptr; - __le32 scd_base_ptr; /* SRAM address for SCD */ -} __packed; /* ALIVE_RES_API_S_VER_1 */ - -struct mvm_alive_resp_ver2 { - __le16 status; - __le16 flags; - u8 ucode_minor; - u8 ucode_major; - __le16 id; - u8 api_minor; - u8 api_major; - u8 ver_subtype; - u8 ver_type; - u8 mac; - u8 opt; - __le16 reserved2; - __le32 timestamp; - __le32 error_event_table_ptr; /* SRAM address for error log */ - __le32 log_event_table_ptr; /* SRAM address for LMAC event log */ - __le32 cpu_register_ptr; - __le32 dbgm_config_ptr; - __le32 alive_counter_ptr; - __le32 scd_base_ptr; /* SRAM address for SCD */ - __le32 st_fwrd_addr; /* pointer to Store and forward */ - __le32 st_fwrd_size; - u8 umac_minor; /* UMAC version: minor */ - u8 umac_major; /* UMAC version: major */ - __le16 umac_id; /* UMAC version: id */ - __le32 error_info_addr; /* SRAM address for UMAC error log */ - __le32 dbg_print_buff_addr; -} __packed; /* ALIVE_RES_API_S_VER_2 */ - -struct mvm_alive_resp { - __le16 status; - __le16 flags; +struct iwl_lmac_alive { __le32 ucode_minor; __le32 ucode_major; u8 ver_subtype; @@ -662,12 +614,29 @@ struct mvm_alive_resp { __le32 scd_base_ptr; /* SRAM address for SCD */ __le32 st_fwrd_addr; /* pointer to Store and forward */ __le32 st_fwrd_size; +} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_3 */ + +struct iwl_umac_alive { __le32 umac_minor; /* UMAC version: minor */ __le32 umac_major; /* UMAC version: major */ __le32 error_info_addr; /* SRAM address for UMAC error log */ __le32 dbg_print_buff_addr; +} __packed; /* UMAC_ALIVE_DATA_API_S_VER_2 */ + +struct mvm_alive_resp_v3 { + __le16 status; + __le16 flags; + struct iwl_lmac_alive lmac_data; + struct iwl_umac_alive umac_data; } __packed; /* ALIVE_RES_API_S_VER_3 */ +struct mvm_alive_resp { + __le16 status; + __le16 flags; + struct iwl_lmac_alive lmac_data[2]; + struct iwl_umac_alive umac_data; +} __packed; /* ALIVE_RES_API_S_VER_4 */ + /* Error response/notification */ enum { FW_ERR_UNKNOWN_CMD = 0x0, @@ -708,7 +677,6 @@ struct iwl_error_resp { #define MAX_MACS_IN_BINDING (3) #define MAX_BINDINGS (4) #define AUX_BINDING_INDEX (3) -#define MAX_PHYS (4) /* Used to extract ID and color from the context dword */ #define FW_CTXT_ID_POS (0) @@ -1251,13 +1219,16 @@ struct iwl_missed_beacons_notif { * @external_ver: external image version * @status: MFUART loading status * @duration: MFUART loading time + * @image_size: MFUART image size in bytes */ struct iwl_mfuart_load_notif { __le32 installed_ver; __le32 external_ver; __le32 status; __le32 duration; -} __packed; /*MFU_LOADER_NTFY_API_S_VER_1*/ + /* image size valid only in v2 of the command */ + __le32 image_size; +} __packed; /*MFU_LOADER_NTFY_API_S_VER_2*/ /** * struct iwl_set_calib_default_cmd - set default value for calibration. @@ -2075,7 +2046,7 @@ struct iwl_mu_group_mgmt_notif { * @system_time: system time on air rise * @tsf: TSF on air rise * @beacon_timestamp: beacon on air rise - * @phy_flags: general phy flags: band, modulation, etc. + * @band: band, matches &RX_RES_PHY_FLAGS_BAND_24 definition * @channel: channel this beacon was received on * @rates: rate in ucode internal format * @byte_count: frame's byte count @@ -2084,12 +2055,12 @@ struct iwl_stored_beacon_notif { __le32 system_time; __le64 tsf; __le32 beacon_timestamp; - __le16 phy_flags; + __le16 band; __le16 channel; __le32 rates; __le32 byte_count; u8 data[MAX_STORED_BEACON_SIZE]; -} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_1 */ +} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_2 */ #define LQM_NUMBER_OF_STATIONS_IN_REPORT 16 @@ -2200,4 +2171,11 @@ struct iwl_dbg_mem_access_rsp { __le32 data[]; } __packed; /* DEBUG_(U|L)MAC_RD_WR_RSP_API_S_VER_1 */ +/** + * struct iwl_nvm_access_complete_cmd - NVM_ACCESS commands are completed + */ +struct iwl_nvm_access_complete_cmd { + __le32 reserved; +} __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */ + #endif /* __fw_api_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c index 2e8e3e8e30a3..a027b11bbdb3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c @@ -406,46 +406,63 @@ static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = { { .start = 0x00a02400, .end = 0x00a02758 }, }; -static u32 iwl_dump_prph(struct iwl_trans *trans, - struct iwl_fw_error_dump_data **data, - const struct iwl_prph_range *iwl_prph_dump_addr, - u32 range_len) +static void _iwl_read_prph_block(struct iwl_trans *trans, u32 start, + u32 len_bytes, __le32 *data) +{ + u32 i; + + for (i = 0; i < len_bytes; i += 4) + *data++ = cpu_to_le32(iwl_read_prph_no_grab(trans, start + i)); +} + +static bool iwl_read_prph_block(struct iwl_trans *trans, u32 start, + u32 len_bytes, __le32 *data) +{ + unsigned long flags; + bool success = false; + + if (iwl_trans_grab_nic_access(trans, &flags)) { + success = true; + _iwl_read_prph_block(trans, start, len_bytes, data); + iwl_trans_release_nic_access(trans, &flags); + } + + return success; +} + +static void iwl_dump_prph(struct iwl_trans *trans, + struct iwl_fw_error_dump_data **data, + const struct iwl_prph_range *iwl_prph_dump_addr, + u32 range_len) { struct iwl_fw_error_dump_prph *prph; unsigned long flags; - u32 prph_len = 0, i; + u32 i; if (!iwl_trans_grab_nic_access(trans, &flags)) - return 0; + return; for (i = 0; i < range_len; i++) { /* The range includes both boundaries */ int num_bytes_in_chunk = iwl_prph_dump_addr[i].end - iwl_prph_dump_addr[i].start + 4; - int reg; - __le32 *val; - - prph_len += sizeof(**data) + sizeof(*prph) + num_bytes_in_chunk; (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH); (*data)->len = cpu_to_le32(sizeof(*prph) + num_bytes_in_chunk); prph = (void *)(*data)->data; prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start); - val = (void *)prph->data; - for (reg = iwl_prph_dump_addr[i].start; - reg <= iwl_prph_dump_addr[i].end; - reg += 4) - *val++ = cpu_to_le32(iwl_read_prph_no_grab(trans, - reg)); + _iwl_read_prph_block(trans, iwl_prph_dump_addr[i].start, + /* our range is inclusive, hence + 4 */ + iwl_prph_dump_addr[i].end - + iwl_prph_dump_addr[i].start + 4, + (void *)prph->data); *data = iwl_fw_error_next_data(*data); } iwl_trans_release_nic_access(trans, &flags); - - return prph_len; } /* @@ -495,11 +512,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) struct iwl_mvm_dump_ptrs *fw_error_dump; struct scatterlist *sg_dump_data; u32 sram_len, sram_ofs; - struct iwl_fw_dbg_mem_seg_tlv * const *fw_dbg_mem = - mvm->fw->dbg_mem_tlv; + const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = mvm->fw->dbg_mem_tlv; u32 file_len, fifo_data_len = 0, prph_len = 0, radio_len = 0; - u32 smem_len = mvm->fw->dbg_dynamic_mem ? 0 : mvm->cfg->smem_len; - u32 sram2_len = mvm->fw->dbg_dynamic_mem ? 0 : mvm->cfg->dccm2_len; + u32 smem_len = mvm->fw->n_dbg_mem_tlv ? 0 : mvm->cfg->smem_len; + u32 sram2_len = mvm->fw->n_dbg_mem_tlv ? 0 : mvm->cfg->dccm2_len; bool monitor_dump_only = false; int i; @@ -624,10 +640,9 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len; /* Make room for MEM segments */ - for (i = 0; i < ARRAY_SIZE(mvm->fw->dbg_mem_tlv); i++) { - if (fw_dbg_mem[i]) - file_len += sizeof(*dump_data) + sizeof(*dump_mem) + - le32_to_cpu(fw_dbg_mem[i]->len); + for (i = 0; i < mvm->fw->n_dbg_mem_tlv; i++) { + file_len += sizeof(*dump_data) + sizeof(*dump_mem) + + le32_to_cpu(fw_dbg_mem[i].len); } /* Make room for fw's virtual image pages, if it exists */ @@ -656,7 +671,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) file_len += sizeof(*dump_data) + sizeof(*dump_trig) + mvm->fw_dump_desc->len; - if (!mvm->fw->dbg_dynamic_mem) + if (!mvm->fw->n_dbg_mem_tlv) file_len += sram_len + sizeof(*dump_mem); dump_file = vzalloc(file_len); @@ -708,7 +723,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) if (monitor_dump_only) goto dump_trans_data; - if (!mvm->fw->dbg_dynamic_mem) { + if (!mvm->fw->n_dbg_mem_tlv) { dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem)); dump_mem = (void *)dump_data->data; @@ -719,22 +734,39 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) dump_data = iwl_fw_error_next_data(dump_data); } - for (i = 0; i < ARRAY_SIZE(mvm->fw->dbg_mem_tlv); i++) { - if (fw_dbg_mem[i]) { - u32 len = le32_to_cpu(fw_dbg_mem[i]->len); - u32 ofs = le32_to_cpu(fw_dbg_mem[i]->ofs); - - dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); - dump_data->len = cpu_to_le32(len + - sizeof(*dump_mem)); - dump_mem = (void *)dump_data->data; - dump_mem->type = fw_dbg_mem[i]->data_type; - dump_mem->offset = cpu_to_le32(ofs); + for (i = 0; i < mvm->fw->n_dbg_mem_tlv; i++) { + u32 len = le32_to_cpu(fw_dbg_mem[i].len); + u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs); + bool success; + + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); + dump_data->len = cpu_to_le32(len + sizeof(*dump_mem)); + dump_mem = (void *)dump_data->data; + dump_mem->type = fw_dbg_mem[i].data_type; + dump_mem->offset = cpu_to_le32(ofs); + + switch (dump_mem->type & cpu_to_le32(FW_DBG_MEM_TYPE_MASK)) { + case cpu_to_le32(FW_DBG_MEM_TYPE_REGULAR): iwl_trans_read_mem_bytes(mvm->trans, ofs, dump_mem->data, len); - dump_data = iwl_fw_error_next_data(dump_data); + success = true; + break; + case cpu_to_le32(FW_DBG_MEM_TYPE_PRPH): + success = iwl_read_prph_block(mvm->trans, ofs, len, + (void *)dump_mem->data); + break; + default: + /* + * shouldn't get here, we ignored this kind + * of TLV earlier during the TLV parsing?! + */ + WARN_ON(1); + success = false; } + + if (success) + dump_data = iwl_fw_error_next_data(dump_data); } if (smem_len) { @@ -779,12 +811,16 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) struct iwl_fw_error_dump_paging *paging; struct page *pages = mvm->fw_paging_db[i].fw_paging_block; + dma_addr_t addr = mvm->fw_paging_db[i].fw_paging_phys; dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING); dump_data->len = cpu_to_le32(sizeof(*paging) + PAGING_BLOCK_SIZE); paging = (void *)dump_data->data; paging->index = cpu_to_le32(i); + dma_sync_single_for_cpu(mvm->trans->dev, addr, + PAGING_BLOCK_SIZE, + DMA_BIDIRECTIONAL); memcpy(paging->data, page_address(pages), PAGING_BLOCK_SIZE); dump_data = iwl_fw_error_next_data(dump_data); @@ -816,11 +852,12 @@ dump_trans_data: sg_nents(sg_dump_data), fw_error_dump->op_mode_ptr, fw_error_dump->op_mode_len, 0); - sg_pcopy_from_buffer(sg_dump_data, - sg_nents(sg_dump_data), - fw_error_dump->trans_ptr->data, - fw_error_dump->trans_ptr->len, - fw_error_dump->op_mode_len); + if (fw_error_dump->trans_ptr) + sg_pcopy_from_buffer(sg_dump_data, + sg_nents(sg_dump_data), + fw_error_dump->trans_ptr->data, + fw_error_dump->trans_ptr->len, + fw_error_dump->op_mode_len); dev_coredumpsg(mvm->trans->dev, sg_dump_data, file_len, GFP_KERNEL); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 872066317fa5..45cb4f476e76 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -190,7 +190,7 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image) * CPU2 paging CSS * CPU2 paging image (including instruction and data) */ - for (sec_idx = 0; sec_idx < IWL_UCODE_SECTION_MAX; sec_idx++) { + for (sec_idx = 0; sec_idx < image->num_sec; sec_idx++) { if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) { sec_idx++; break; @@ -201,7 +201,7 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image) * If paging is enabled there should be at least 2 more sections left * (one for CSS and one for Paging data) */ - if (sec_idx >= ARRAY_SIZE(image->sec) - 1) { + if (sec_idx >= image->num_sec - 1) { IWL_ERR(mvm, "Paging: Missing CSS and/or paging sections\n"); iwl_free_fw_paging(mvm); return -EINVAL; @@ -214,6 +214,10 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image) memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block), image->sec[sec_idx].data, mvm->fw_paging_db[0].fw_paging_size); + dma_sync_single_for_device(mvm->trans->dev, + mvm->fw_paging_db[0].fw_paging_phys, + mvm->fw_paging_db[0].fw_paging_size, + DMA_BIDIRECTIONAL); IWL_DEBUG_FW(mvm, "Paging: copied %d CSS bytes to first block\n", @@ -228,9 +232,16 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image) * loop stop at num_of_paging_blk since that last block is not full. */ for (idx = 1; idx < mvm->num_of_paging_blk; idx++) { - memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block), + struct iwl_fw_paging *block = &mvm->fw_paging_db[idx]; + + memcpy(page_address(block->fw_paging_block), image->sec[sec_idx].data + offset, - mvm->fw_paging_db[idx].fw_paging_size); + block->fw_paging_size); + dma_sync_single_for_device(mvm->trans->dev, + block->fw_paging_phys, + block->fw_paging_size, + DMA_BIDIRECTIONAL); + IWL_DEBUG_FW(mvm, "Paging: copied %d paging bytes to block %d\n", @@ -242,9 +253,15 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image) /* copy the last paging block */ if (mvm->num_of_pages_in_last_blk > 0) { - memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block), + struct iwl_fw_paging *block = &mvm->fw_paging_db[idx]; + + memcpy(page_address(block->fw_paging_block), image->sec[sec_idx].data + offset, FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk); + dma_sync_single_for_device(mvm->trans->dev, + block->fw_paging_phys, + block->fw_paging_size, + DMA_BIDIRECTIONAL); IWL_DEBUG_FW(mvm, "Paging: copied %d pages in the last block %d\n", @@ -259,9 +276,7 @@ static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm, { struct page *block; dma_addr_t phys = 0; - int blk_idx = 0; - int order, num_of_pages; - int dma_enabled; + int blk_idx, order, num_of_pages, size, dma_enabled; if (mvm->fw_paging_db[0].fw_paging_block) return 0; @@ -272,9 +287,8 @@ static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm, BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE); num_of_pages = image->paging_mem_size / FW_PAGING_SIZE; - mvm->num_of_paging_blk = ((num_of_pages - 1) / - NUM_OF_PAGE_PER_GROUP) + 1; - + mvm->num_of_paging_blk = + DIV_ROUND_UP(num_of_pages, NUM_OF_PAGE_PER_GROUP); mvm->num_of_pages_in_last_blk = num_of_pages - NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1); @@ -284,46 +298,13 @@ static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm, mvm->num_of_paging_blk, mvm->num_of_pages_in_last_blk); - /* allocate block of 4Kbytes for paging CSS */ - order = get_order(FW_PAGING_SIZE); - block = alloc_pages(GFP_KERNEL, order); - if (!block) { - /* free all the previous pages since we failed */ - iwl_free_fw_paging(mvm); - return -ENOMEM; - } - - mvm->fw_paging_db[blk_idx].fw_paging_block = block; - mvm->fw_paging_db[blk_idx].fw_paging_size = FW_PAGING_SIZE; - - if (dma_enabled) { - phys = dma_map_page(mvm->trans->dev, block, 0, - PAGE_SIZE << order, DMA_BIDIRECTIONAL); - if (dma_mapping_error(mvm->trans->dev, phys)) { - /* - * free the previous pages and the current one since - * we failed to map_page. - */ - iwl_free_fw_paging(mvm); - return -ENOMEM; - } - mvm->fw_paging_db[blk_idx].fw_paging_phys = phys; - } else { - mvm->fw_paging_db[blk_idx].fw_paging_phys = PAGING_ADDR_SIG | - blk_idx << BLOCK_2_EXP_SIZE; - } - - IWL_DEBUG_FW(mvm, - "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n", - order); - /* - * allocate blocks in dram. - * since that CSS allocated in fw_paging_db[0] loop start from index 1 + * Allocate CSS and paging blocks in dram. */ - for (blk_idx = 1; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) { - /* allocate block of PAGING_BLOCK_SIZE (32K) */ - order = get_order(PAGING_BLOCK_SIZE); + for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) { + /* For CSS allocate 4KB, for others PAGING_BLOCK_SIZE (32K) */ + size = blk_idx ? PAGING_BLOCK_SIZE : FW_PAGING_SIZE; + order = get_order(size); block = alloc_pages(GFP_KERNEL, order); if (!block) { /* free all the previous pages since we failed */ @@ -332,7 +313,7 @@ static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm, } mvm->fw_paging_db[blk_idx].fw_paging_block = block; - mvm->fw_paging_db[blk_idx].fw_paging_size = PAGING_BLOCK_SIZE; + mvm->fw_paging_db[blk_idx].fw_paging_size = size; if (dma_enabled) { phys = dma_map_page(mvm->trans->dev, block, 0, @@ -353,9 +334,14 @@ static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm, blk_idx << BLOCK_2_EXP_SIZE; } - IWL_DEBUG_FW(mvm, - "Paging: allocated 32K bytes (order %d) for firmware paging.\n", - order); + if (!blk_idx) + IWL_DEBUG_FW(mvm, + "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n", + order); + else + IWL_DEBUG_FW(mvm, + "Paging: allocated 32K bytes (order %d) for firmware paging.\n", + order); } return 0; @@ -475,80 +461,60 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, struct iwl_mvm *mvm = container_of(notif_wait, struct iwl_mvm, notif_wait); struct iwl_mvm_alive_data *alive_data = data; - struct mvm_alive_resp_ver1 *palive1; - struct mvm_alive_resp_ver2 *palive2; + struct mvm_alive_resp_v3 *palive3; struct mvm_alive_resp *palive; + struct iwl_umac_alive *umac; + struct iwl_lmac_alive *lmac1; + struct iwl_lmac_alive *lmac2 = NULL; + u16 status; + + if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) { + palive = (void *)pkt->data; + umac = &palive->umac_data; + lmac1 = &palive->lmac_data[0]; + lmac2 = &palive->lmac_data[1]; + status = le16_to_cpu(palive->status); + } else { + palive3 = (void *)pkt->data; + umac = &palive3->umac_data; + lmac1 = &palive3->lmac_data; + status = le16_to_cpu(palive3->status); + } - if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive1)) { - palive1 = (void *)pkt->data; + mvm->error_event_table[0] = le32_to_cpu(lmac1->error_event_table_ptr); + if (lmac2) + mvm->error_event_table[1] = + le32_to_cpu(lmac2->error_event_table_ptr); + mvm->log_event_table = le32_to_cpu(lmac1->log_event_table_ptr); + mvm->sf_space.addr = le32_to_cpu(lmac1->st_fwrd_addr); + mvm->sf_space.size = le32_to_cpu(lmac1->st_fwrd_size); - mvm->support_umac_log = false; - mvm->error_event_table = - le32_to_cpu(palive1->error_event_table_ptr); - mvm->log_event_table = - le32_to_cpu(palive1->log_event_table_ptr); - alive_data->scd_base_addr = le32_to_cpu(palive1->scd_base_ptr); + mvm->umac_error_event_table = le32_to_cpu(umac->error_info_addr); - alive_data->valid = le16_to_cpu(palive1->status) == - IWL_ALIVE_STATUS_OK; - IWL_DEBUG_FW(mvm, - "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n", - le16_to_cpu(palive1->status), palive1->ver_type, - palive1->ver_subtype, palive1->flags); - } else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive2)) { - palive2 = (void *)pkt->data; - - mvm->error_event_table = - le32_to_cpu(palive2->error_event_table_ptr); - mvm->log_event_table = - le32_to_cpu(palive2->log_event_table_ptr); - alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr); - mvm->umac_error_event_table = - le32_to_cpu(palive2->error_info_addr); - mvm->sf_space.addr = le32_to_cpu(palive2->st_fwrd_addr); - mvm->sf_space.size = le32_to_cpu(palive2->st_fwrd_size); - - alive_data->valid = le16_to_cpu(palive2->status) == - IWL_ALIVE_STATUS_OK; - if (mvm->umac_error_event_table) - mvm->support_umac_log = true; + alive_data->scd_base_addr = le32_to_cpu(lmac1->scd_base_ptr); + alive_data->valid = status == IWL_ALIVE_STATUS_OK; + if (mvm->umac_error_event_table) + mvm->support_umac_log = true; - IWL_DEBUG_FW(mvm, - "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n", - le16_to_cpu(palive2->status), palive2->ver_type, - palive2->ver_subtype, palive2->flags); + IWL_DEBUG_FW(mvm, + "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n", + status, lmac1->ver_type, lmac1->ver_subtype); - IWL_DEBUG_FW(mvm, - "UMAC version: Major - 0x%x, Minor - 0x%x\n", - palive2->umac_major, palive2->umac_minor); - } else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) { - palive = (void *)pkt->data; + if (lmac2) + IWL_DEBUG_FW(mvm, "Alive ucode CDB\n"); - mvm->error_event_table = - le32_to_cpu(palive->error_event_table_ptr); - mvm->log_event_table = - le32_to_cpu(palive->log_event_table_ptr); - alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr); - mvm->umac_error_event_table = - le32_to_cpu(palive->error_info_addr); - mvm->sf_space.addr = le32_to_cpu(palive->st_fwrd_addr); - mvm->sf_space.size = le32_to_cpu(palive->st_fwrd_size); - - alive_data->valid = le16_to_cpu(palive->status) == - IWL_ALIVE_STATUS_OK; - if (mvm->umac_error_event_table) - mvm->support_umac_log = true; + IWL_DEBUG_FW(mvm, + "UMAC version: Major - 0x%x, Minor - 0x%x\n", + le32_to_cpu(umac->umac_major), + le32_to_cpu(umac->umac_minor)); - IWL_DEBUG_FW(mvm, - "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n", - le16_to_cpu(palive->status), palive->ver_type, - palive->ver_subtype, palive->flags); + return true; +} - IWL_DEBUG_FW(mvm, - "UMAC version: Major - 0x%x, Minor - 0x%x\n", - le32_to_cpu(palive->umac_major), - le32_to_cpu(palive->umac_minor)); - } +static bool iwl_wait_init_complete(struct iwl_notif_wait_data *notif_wait, + struct iwl_rx_packet *pkt, void *data) +{ + WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF); return true; } @@ -568,6 +534,48 @@ static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait, return false; } +static int iwl_mvm_init_paging(struct iwl_mvm *mvm) +{ + const struct fw_img *fw = &mvm->fw->img[mvm->cur_ucode]; + int ret; + + /* + * Configure and operate fw paging mechanism. + * The driver configures the paging flow only once. + * The CPU2 paging image is included in the IWL_UCODE_INIT image. + */ + if (!fw->paging_mem_size) + return 0; + + /* + * When dma is not enabled, the driver needs to copy / write + * the downloaded / uploaded page to / from the smem. + * This gets the location of the place were the pages are + * stored. + */ + if (!is_device_dma_capable(mvm->trans->dev)) { + ret = iwl_trans_get_paging_item(mvm); + if (ret) { + IWL_ERR(mvm, "failed to get FW paging item\n"); + return ret; + } + } + + ret = iwl_save_fw_paging(mvm, fw); + if (ret) { + IWL_ERR(mvm, "failed to save the FW paging image\n"); + return ret; + } + + ret = iwl_send_paging_cmd(mvm, fw); + if (ret) { + IWL_ERR(mvm, "failed to send the paging cmd\n"); + iwl_free_fw_paging(mvm); + return ret; + } + + return 0; +} static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, enum iwl_ucode_type ucode_type) { @@ -639,40 +647,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr); /* - * configure and operate fw paging mechanism. - * driver configures the paging flow only once, CPU2 paging image - * included in the IWL_UCODE_INIT image. - */ - if (fw->paging_mem_size) { - /* - * When dma is not enabled, the driver needs to copy / write - * the downloaded / uploaded page to / from the smem. - * This gets the location of the place were the pages are - * stored. - */ - if (!is_device_dma_capable(mvm->trans->dev)) { - ret = iwl_trans_get_paging_item(mvm); - if (ret) { - IWL_ERR(mvm, "failed to get FW paging item\n"); - return ret; - } - } - - ret = iwl_save_fw_paging(mvm, fw); - if (ret) { - IWL_ERR(mvm, "failed to save the FW paging image\n"); - return ret; - } - - ret = iwl_send_paging_cmd(mvm, fw); - if (ret) { - IWL_ERR(mvm, "failed to send the paging cmd\n"); - iwl_free_fw_paging(mvm); - return ret; - } - } - - /* * Note: all the queues are enabled as part of the interface * initialization, but in firmware restart scenarios they * could be stopped, so wake them up. In firmware restart, @@ -829,6 +803,75 @@ out: return ret; } +int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) +{ + struct iwl_notification_wait init_wait; + struct iwl_nvm_access_complete_cmd nvm_complete = {}; + static const u16 init_complete[] = { + INIT_COMPLETE_NOTIF, + }; + int ret; + + lockdep_assert_held(&mvm->mutex); + + iwl_init_notification_wait(&mvm->notif_wait, + &init_wait, + init_complete, + ARRAY_SIZE(init_complete), + iwl_wait_init_complete, + NULL); + + /* Will also start the device */ + ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR); + if (ret) { + IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); + goto error; + } + + /* TODO: remove when integrating context info */ + ret = iwl_mvm_init_paging(mvm); + if (ret) { + IWL_ERR(mvm, "Failed to init paging: %d\n", + ret); + goto error; + } + + /* Read the NVM only at driver load time, no need to do this twice */ + if (read_nvm) { + /* Read nvm */ + ret = iwl_nvm_init(mvm, true); + if (ret) { + IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); + goto error; + } + } + + /* In case we read the NVM from external file, load it to the NIC */ + if (mvm->nvm_file_name) + iwl_mvm_load_nvm_to_nic(mvm); + + ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans); + if (WARN_ON(ret)) + goto error; + + ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP, + NVM_ACCESS_COMPLETE), 0, + sizeof(nvm_complete), &nvm_complete); + if (ret) { + IWL_ERR(mvm, "Failed to run complete NVM access: %d\n", + ret); + goto error; + } + + /* We wait for the INIT complete notification */ + return iwl_wait_notification(&mvm->notif_wait, &init_wait, + MVM_UCODE_ALIVE_TIMEOUT); + +error: + iwl_remove_notification(&mvm->notif_wait, &init_wait); + return ret; +} + static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { @@ -1089,23 +1132,13 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm) return ret; } -int iwl_mvm_up(struct iwl_mvm *mvm) +static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm) { - int ret, i; - struct ieee80211_channel *chan; - struct cfg80211_chan_def chandef; - - lockdep_assert_held(&mvm->mutex); + int ret; - ret = iwl_trans_start_hw(mvm->trans); - if (ret) - return ret; + if (iwl_mvm_has_new_tx_api(mvm)) + return iwl_run_unified_mvm_ucode(mvm, false); - /* - * If we haven't completed the run of the init ucode during - * module loading, load init ucode now - * (for example, if we were in RFKILL) - */ ret = iwl_run_init_mvm_ucode(mvm, false); if (iwlmvm_mod_params.init_dbg) @@ -1116,7 +1149,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) /* this can't happen */ if (WARN_ON(ret > 0)) ret = -ERFKILL; - goto error; + return ret; } /* @@ -1127,9 +1160,28 @@ int iwl_mvm_up(struct iwl_mvm *mvm) _iwl_trans_stop_device(mvm->trans, false); ret = _iwl_trans_start_hw(mvm->trans, false); if (ret) - goto error; + return ret; ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR); + if (ret) + return ret; + + return iwl_mvm_init_paging(mvm); +} + +int iwl_mvm_up(struct iwl_mvm *mvm) +{ + int ret, i; + struct ieee80211_channel *chan; + struct cfg80211_chan_def chandef; + + lockdep_assert_held(&mvm->mutex); + + ret = iwl_trans_start_hw(mvm->trans); + if (ret) + return ret; + + ret = iwl_mvm_load_rt_fw(mvm); if (ret) { IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); goto error; @@ -1156,13 +1208,15 @@ int iwl_mvm_up(struct iwl_mvm *mvm) goto error; /* Send phy db control command and then phy db calibration*/ - ret = iwl_send_phy_db_data(mvm->phy_db); - if (ret) - goto error; + if (!iwl_mvm_has_new_tx_api(mvm)) { + ret = iwl_send_phy_db_data(mvm->phy_db); + if (ret) + goto error; - ret = iwl_send_phy_cfg_cmd(mvm); - if (ret) - goto error; + ret = iwl_send_phy_cfg_cmd(mvm); + if (ret) + goto error; + } /* Init RSS configuration */ if (iwl_mvm_has_new_rx_api(mvm)) { @@ -1348,4 +1402,9 @@ void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, le32_to_cpu(mfuart_notif->external_ver), le32_to_cpu(mfuart_notif->status), le32_to_cpu(mfuart_notif->duration)); + + if (iwl_rx_packet_payload_len(pkt) == sizeof(*mfuart_notif)) + IWL_DEBUG_INFO(mvm, + "MFUART: image size: 0x%08x\n", + le32_to_cpu(mfuart_notif->image_size)); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 4a0874e40731..99132ea16ede 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -531,38 +531,26 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); + /* + * If DQA is supported - queues were already disabled, since in + * DQA-mode the queues are a property of the STA and not of the + * vif, and at this point the STA was already deleted + */ + if (iwl_mvm_is_dqa_supported(mvm)) + return; + switch (vif->type) { case NL80211_IFTYPE_P2P_DEVICE: - if (!iwl_mvm_is_dqa_supported(mvm)) - iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE, - IWL_MVM_OFFCHANNEL_QUEUE, - IWL_MAX_TID_COUNT, 0); - else - iwl_mvm_disable_txq(mvm, - IWL_MVM_DQA_P2P_DEVICE_QUEUE, - vif->hw_queue[0], IWL_MAX_TID_COUNT, - 0); + iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE, + IWL_MVM_OFFCHANNEL_QUEUE, + IWL_MAX_TID_COUNT, 0); break; case NL80211_IFTYPE_AP: iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue, IWL_MAX_TID_COUNT, 0); - - if (iwl_mvm_is_dqa_supported(mvm)) - iwl_mvm_disable_txq(mvm, - IWL_MVM_DQA_AP_PROBE_RESP_QUEUE, - vif->hw_queue[0], IWL_MAX_TID_COUNT, - 0); /* fall through */ default: - /* - * If DQA is supported - queues were already disabled, since in - * DQA-mode the queues are a property of the STA and not of the - * vif, and at this point the STA was already deleted - */ - if (iwl_mvm_is_dqa_supported(mvm)) - break; - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) iwl_mvm_disable_txq(mvm, vif->hw_queue[ac], vif->hw_queue[ac], @@ -991,7 +979,7 @@ static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm, } static void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm, - struct iwl_mac_beacon_cmd_v6 *beacon_cmd, + __le32 *tim_index, __le32 *tim_size, u8 *beacon, u32 frame_size) { u32 tim_idx; @@ -1008,8 +996,8 @@ static void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm, /* If TIM field was found, set variables */ if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) { - beacon_cmd->tim_idx = cpu_to_le32(tim_idx); - beacon_cmd->tim_size = cpu_to_le32((u32)beacon[tim_idx+1]); + *tim_index = cpu_to_le32(tim_idx); + *tim_size = cpu_to_le32((u32)beacon[tim_idx + 1]); } else { IWL_WARN(mvm, "Unable to find TIM Element in beacon\n"); } @@ -1043,8 +1031,9 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, }; union { struct iwl_mac_beacon_cmd_v6 beacon_cmd_v6; - struct iwl_mac_beacon_cmd beacon_cmd; + struct iwl_mac_beacon_cmd_v7 beacon_cmd; } u = {}; + struct iwl_mac_beacon_cmd beacon_cmd; struct ieee80211_tx_info *info; u32 beacon_skb_len; u32 rate, tx_flags; @@ -1054,6 +1043,46 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, beacon_skb_len = beacon->len; + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD)) { + u32 csa_offset, ecsa_offset; + + csa_offset = iwl_mvm_find_ie_offset(beacon->data, + WLAN_EID_CHANNEL_SWITCH, + beacon_skb_len); + ecsa_offset = + iwl_mvm_find_ie_offset(beacon->data, + WLAN_EID_EXT_CHANSWITCH_ANN, + beacon_skb_len); + + if (iwl_mvm_has_new_tx_api(mvm)) { + beacon_cmd.data.template_id = + cpu_to_le32((u32)mvmvif->id); + beacon_cmd.data.ecsa_offset = cpu_to_le32(ecsa_offset); + beacon_cmd.data.csa_offset = cpu_to_le32(csa_offset); + beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon_skb_len); + if (vif->type == NL80211_IFTYPE_AP) + iwl_mvm_mac_ctxt_set_tim(mvm, + &beacon_cmd.data.tim_idx, + &beacon_cmd.data.tim_size, + beacon->data, + beacon_skb_len); + cmd.len[0] = sizeof(beacon_cmd); + cmd.data[0] = &beacon_cmd; + goto send; + + } else { + u.beacon_cmd.data.ecsa_offset = + cpu_to_le32(ecsa_offset); + u.beacon_cmd.data.csa_offset = cpu_to_le32(csa_offset); + cmd.len[0] = sizeof(u.beacon_cmd); + cmd.data[0] = &u; + } + } else { + cmd.len[0] = sizeof(u.beacon_cmd_v6); + cmd.data[0] = &u; + } + /* TODO: for now the beacon template id is set to be the mac context id. * Might be better to handle it as another resource ... */ u.beacon_cmd_v6.template_id = cpu_to_le32((u32)mvmvif->id); @@ -1092,29 +1121,13 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, /* Set up TX beacon command fields */ if (vif->type == NL80211_IFTYPE_AP) - iwl_mvm_mac_ctxt_set_tim(mvm, &u.beacon_cmd_v6, + iwl_mvm_mac_ctxt_set_tim(mvm, &u.beacon_cmd_v6.tim_idx, + &u.beacon_cmd_v6.tim_size, beacon->data, beacon_skb_len); +send: /* Submit command */ - - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD)) { - u.beacon_cmd.csa_offset = - cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, - WLAN_EID_CHANNEL_SWITCH, - beacon_skb_len)); - u.beacon_cmd.ecsa_offset = - cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, - WLAN_EID_EXT_CHANSWITCH_ANN, - beacon_skb_len)); - - cmd.len[0] = sizeof(u.beacon_cmd); - } else { - cmd.len[0] = sizeof(u.beacon_cmd_v6); - } - - cmd.data[0] = &u; cmd.dataflags[0] = 0; cmd.len[1] = beacon_skb_len; cmd.data[1] = beacon->data; @@ -1565,7 +1578,7 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, rx_status.flag |= RX_FLAG_MACTIME_PLCP_START; rx_status.device_timestamp = le32_to_cpu(sb->system_time); rx_status.band = - (sb->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ? + (sb->band & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; rx_status.freq = ieee80211_channel_to_frequency(le16_to_cpu(sb->channel), diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 45122dafe922..d37b1695c64e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -463,6 +463,13 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) IEEE80211_RADIOTAP_MCS_HAVE_STBC; hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC | IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED; + + hw->radiotap_timestamp.units_pos = + IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US | + IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ; + /* this is the case for CCK frames, it's better (only 8) for OFDM */ + hw->radiotap_timestamp.accuracy = 22; + hw->rate_control_algorithm = "iwl-mvm-rs"; hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES; hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; @@ -670,7 +677,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->wiphy->wowlan = &mvm->wowlan; } - if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len && + if (mvm->fw->img[IWL_UCODE_WOWLAN].num_sec && mvm->trans->ops->d3_suspend && mvm->trans->ops->d3_resume && device_can_wakeup(mvm->trans->dev)) { @@ -1203,8 +1210,6 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) /* the fw is stopped, the aux sta is dead: clean up driver state */ iwl_mvm_del_aux_sta(mvm); - iwl_free_fw_paging(mvm); - /* * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete() * won't be called in this case). @@ -2003,16 +2008,16 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) iwl_mvm_config_scan(mvm); - } else if (changes & BSS_CHANGED_BEACON_INFO) { + } + + if (changes & BSS_CHANGED_BEACON_INFO) { /* - * We received a beacon _after_ association so + * We received a beacon from the associated AP so * remove the session protection. */ iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data); - } - if (changes & BSS_CHANGED_BEACON_INFO) { iwl_mvm_sf_update(mvm, vif, false); WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); } @@ -2099,22 +2104,6 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, if (ret) goto out_unbind; - /* enable the multicast queue, now that we have a station for it */ - if (iwl_mvm_is_dqa_supported(mvm)) { - unsigned int wdg_timeout = - iwl_mvm_get_wd_timeout(mvm, vif, false, false); - struct iwl_trans_txq_scd_cfg cfg = { - .fifo = IWL_MVM_TX_FIFO_MCAST, - .sta_id = mvmvif->bcast_sta.sta_id, - .tid = IWL_MAX_TID_COUNT, - .aggregate = false, - .frame_limit = IWL_FRAME_LIMIT, - }; - - iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0, - &cfg, wdg_timeout); - } - /* must be set before quota calculations */ mvmvif->ap_ibss_active = true; @@ -2547,6 +2536,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); int ret; IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n", @@ -2575,8 +2565,6 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST && iwl_mvm_is_dqa_supported(mvm)) { - struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); - iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta); flush_work(&mvm->add_stream_wk); @@ -2587,6 +2575,9 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, } mutex_lock(&mvm->mutex); + /* track whether or not the station is associated */ + mvm_sta->associated = new_state >= IEEE80211_STA_ASSOC; + if (old_state == IEEE80211_STA_NOTEXIST && new_state == IEEE80211_STA_NONE) { /* @@ -2636,11 +2627,10 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, mvmvif->ap_assoc_sta_count++; iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); } + + iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, + true); ret = iwl_mvm_update_sta(mvm, vif, sta); - if (ret == 0) - iwl_mvm_rs_rate_init(mvm, sta, - mvmvif->phy_ctxt->channel->band, - true); } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTHORIZED) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 4a9cb76b7611..73a216524af2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -739,8 +739,9 @@ struct iwl_mvm { enum iwl_ucode_type cur_ucode; bool ucode_loaded; + bool hw_registered; bool calibrating; - u32 error_event_table; + u32 error_event_table[2]; u32 log_event_table; u32 umac_error_event_table; bool support_umac_log; @@ -1217,6 +1218,19 @@ static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm) return mvm->trans->cfg->use_tfh; } +static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm) +{ + /* + * TODO: + * The issue of how to determine CDB support is still not well defined. + * It may be that it will be for all next HW devices and it may be per + * FW compilation and it may also differ between different devices. + * For now take a ride on the new TX API and get back to it when + * it is well defined. + */ + return iwl_mvm_has_new_tx_api(mvm); +} + static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm) { #ifdef CONFIG_THERMAL @@ -1257,6 +1271,7 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm); ******************/ /* uCode */ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm); +int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm); /* Utils */ int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, @@ -1657,8 +1672,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, * Disable a TXQ. * Note that in non-DQA mode the %mac80211_queue and %tid params are ignored. */ -void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, - u8 tid, u8 flags); +int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, + u8 tid, u8 flags); int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq); /* Return a bitmask with all the hw supported queues, except for the @@ -1686,6 +1701,7 @@ void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) { + iwl_free_fw_paging(mvm); mvm->ucode_loaded = false; iwl_trans_stop_device(mvm->trans); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index f14aada390c5..4cd72d4cdc47 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -466,6 +466,13 @@ static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = { HCMD_NAME(STORED_BEACON_NTF), }; +/* Please keep this array *SORTED* by hex value. + * Access is done through binary search + */ +static const struct iwl_hcmd_names iwl_mvm_regulatory_and_nvm_names[] = { + HCMD_NAME(NVM_ACCESS_COMPLETE), +}; + static const struct iwl_hcmd_arr iwl_mvm_groups[] = { [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), @@ -474,6 +481,8 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = { [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names), [DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names), [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names), + [REGULATORY_AND_NVM_GROUP] = + HCMD_ARR(iwl_mvm_regulatory_and_nvm_names), }; /* this forward declaration can avoid to export the function */ @@ -597,7 +606,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mvm->last_agg_queue = IWL_MVM_DQA_MAX_DATA_QUEUE; } mvm->sf_state = SF_UNINIT; - mvm->cur_ucode = IWL_UCODE_INIT; + if (iwl_mvm_has_new_tx_api(mvm)) + mvm->cur_ucode = IWL_UCODE_REGULAR; + else + mvm->cur_ucode = IWL_UCODE_INIT; mvm->drop_bcn_ap_mode = true; mutex_init(&mvm->mutex); @@ -720,7 +732,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mutex_lock(&mvm->mutex); iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE); - err = iwl_run_init_mvm_ucode(mvm, true); + if (iwl_mvm_has_new_tx_api(mvm)) + err = iwl_run_unified_mvm_ucode(mvm, true); + else + err = iwl_run_init_mvm_ucode(mvm, true); if (!err || !iwlmvm_mod_params.init_dbg) iwl_mvm_stop_device(mvm); iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE); @@ -743,6 +758,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, err = iwl_mvm_mac_setup_register(mvm); if (err) goto out_free; + mvm->hw_registered = true; min_backoff = calc_min_backoff(trans, cfg); iwl_mvm_thermal_initialize(mvm, min_backoff); @@ -764,6 +780,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, out_unregister: ieee80211_unregister_hw(mvm->hw); + mvm->hw_registered = false; iwl_mvm_leds_exit(mvm); iwl_mvm_thermal_exit(mvm); out_free: @@ -1192,7 +1209,8 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) reprobe->dev = mvm->trans->dev; INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk); schedule_work(&reprobe->work); - } else if (mvm->cur_ucode == IWL_UCODE_REGULAR) { + } else if (mvm->cur_ucode == IWL_UCODE_REGULAR && + mvm->hw_registered) { /* don't let the transport/FW power down */ iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c index af6d10c23e5a..e684811f8e8b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c @@ -174,6 +174,14 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm, enum ieee80211_ac_numbers ac; bool tid_found = false; +#ifdef CONFIG_IWLWIFI_DEBUGFS + /* set advanced pm flag with no uapsd ACs to enable ps-poll */ + if (mvmvif->dbgfs_pm.use_ps_poll) { + cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK); + return; + } +#endif + for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_BK; ac++) { if (!mvmvif->queue_params[ac].uapsd) continue; @@ -204,16 +212,6 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm, } } - if (!(cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) { -#ifdef CONFIG_IWLWIFI_DEBUGFS - /* set advanced pm flag with no uapsd ACs to enable ps-poll */ - if (mvmvif->dbgfs_pm.use_ps_poll) - cmd->flags |= - cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK); -#endif - return; - } - cmd->flags |= cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK); if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) | @@ -601,9 +599,8 @@ static void iwl_mvm_power_ps_disabled_iterator(void *_data, u8* mac, struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); bool *disable_ps = _data; - if (mvmvif->phy_ctxt) - if (mvmvif->phy_ctxt->id < MAX_PHYS) - *disable_ps |= mvmvif->ps_disabled; + if (mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < NUM_PHY_CTX) + *disable_ps |= mvmvif->ps_disabled; } static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac, @@ -611,6 +608,7 @@ static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_power_vifs *power_iterator = _data; + bool active = mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < NUM_PHY_CTX; switch (ieee80211_vif_type_p2p(vif)) { case NL80211_IFTYPE_P2P_DEVICE: @@ -621,34 +619,30 @@ static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac, /* only a single MAC of the same type */ WARN_ON(power_iterator->ap_vif); power_iterator->ap_vif = vif; - if (mvmvif->phy_ctxt) - if (mvmvif->phy_ctxt->id < MAX_PHYS) - power_iterator->ap_active = true; + if (active) + power_iterator->ap_active = true; break; case NL80211_IFTYPE_MONITOR: /* only a single MAC of the same type */ WARN_ON(power_iterator->monitor_vif); power_iterator->monitor_vif = vif; - if (mvmvif->phy_ctxt) - if (mvmvif->phy_ctxt->id < MAX_PHYS) - power_iterator->monitor_active = true; + if (active) + power_iterator->monitor_active = true; break; case NL80211_IFTYPE_P2P_CLIENT: /* only a single MAC of the same type */ WARN_ON(power_iterator->p2p_vif); power_iterator->p2p_vif = vif; - if (mvmvif->phy_ctxt) - if (mvmvif->phy_ctxt->id < MAX_PHYS) - power_iterator->p2p_active = true; + if (active) + power_iterator->p2p_active = true; break; case NL80211_IFTYPE_STATION: power_iterator->bss_vif = vif; - if (mvmvif->phy_ctxt) - if (mvmvif->phy_ctxt->id < MAX_PHYS) - power_iterator->bss_active = true; + if (active) + power_iterator->bss_active = true; break; default: diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 227c5ed9cbe6..ce907c58ebf6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -161,9 +161,6 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct rs_rate *rate, const struct rs_tx_column *next_col) { - struct iwl_mvm_sta *mvmsta; - struct iwl_mvm_vif *mvmvif; - if (!sta->ht_cap.ht_supported) return false; @@ -176,9 +173,6 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta)) return false; - mvmsta = iwl_mvm_sta_from_mac80211(sta); - mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); - if (mvm->nvm_data->sku_cap_mimo_disabled) return false; @@ -978,7 +972,9 @@ static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask, /* Find the previous rate that is in the rate mask */ i = index - 1; - for (mask = (1 << i); i >= 0; i--, mask >>= 1) { + if (i >= 0) + mask = BIT(i); + for (; i >= 0; i--, mask >>= 1) { if (rate_mask & mask) { low = i; break; @@ -3071,7 +3067,7 @@ static void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm) void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg) { - u8 nss = 0, mcs = 0; + u8 nss = 0; spin_lock(&mvm->drv_stats_lock); @@ -3099,11 +3095,9 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg) if (rate & RATE_MCS_HT_MSK) { mvm->drv_rx_stats.ht_frames++; - mcs = rate & RATE_HT_MCS_RATE_CODE_MSK; nss = ((rate & RATE_HT_MCS_NSS_MSK) >> RATE_HT_MCS_NSS_POS) + 1; } else if (rate & RATE_MCS_VHT_MSK) { mvm->drv_rx_stats.vht_frames++; - mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK; nss = ((rate & RATE_VHT_MCS_NSS_MSK) >> RATE_VHT_MCS_NSS_POS) + 1; } else { @@ -3624,6 +3618,8 @@ int rs_pretty_print_rate(char *buf, const u32 rate) } else if (rate & RATE_MCS_HT_MSK) { type = "HT"; mcs = rate & RATE_HT_MCS_INDEX_MSK; + nss = ((rate & RATE_HT_MCS_NSS_MSK) + >> RATE_HT_MCS_NSS_POS) + 1; } else { type = "Unknown"; /* shouldn't happen */ } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index 0e60e38b2acf..20473df79c94 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -497,8 +497,7 @@ struct iwl_mvm_stat_data { struct iwl_mvm *mvm; __le32 mac_id; u8 beacon_filter_average_energy; - struct mvm_statistics_general_v8 *general; - struct mvm_statistics_load *load; + void *general; }; static void iwl_mvm_stat_iterator(void *_data, u8 *mac, @@ -518,10 +517,26 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac, * the notification directly. */ if (data->general) { - mvmvif->beacon_stats.num_beacons = - le32_to_cpu(data->general->beacon_counter[mvmvif->id]); - mvmvif->beacon_stats.avg_signal = - -data->general->beacon_average_energy[mvmvif->id]; + u16 vif_id = mvmvif->id; + + if (iwl_mvm_is_cdb_supported(mvm)) { + struct mvm_statistics_general_cdb *general = + data->general; + + mvmvif->beacon_stats.num_beacons = + le32_to_cpu(general->beacon_counter[vif_id]); + mvmvif->beacon_stats.avg_signal = + -general->beacon_average_energy[vif_id]; + } else { + struct mvm_statistics_general_v8 *general = + data->general; + + mvmvif->beacon_stats.num_beacons = + le32_to_cpu(general->beacon_counter[vif_id]); + mvmvif->beacon_stats.avg_signal = + -general->beacon_average_energy[vif_id]; + } + } if (mvmvif->id != id) @@ -571,6 +586,7 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac, ieee80211_cqm_rssi_notify( vif, NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW, + sig, GFP_KERNEL); } else if (sig > thold && (last_event == 0 || sig > last_event + hyst)) { @@ -580,6 +596,7 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac, ieee80211_cqm_rssi_notify( vif, NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH, + sig, GFP_KERNEL); } } @@ -615,48 +632,65 @@ iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { - struct iwl_notif_statistics_v11 *stats = (void *)&pkt->data; + struct iwl_notif_statistics_cdb *stats = (void *)&pkt->data; struct iwl_mvm_stat_data data = { .mvm = mvm, }; - int expected_size = iwl_mvm_has_new_rx_api(mvm) ? sizeof(*stats) : - sizeof(struct iwl_notif_statistics_v10); - u32 temperature; + int expected_size; + + if (iwl_mvm_is_cdb_supported(mvm)) + expected_size = sizeof(*stats); + else if (iwl_mvm_has_new_rx_api(mvm)) + expected_size = sizeof(struct iwl_notif_statistics_v11); + else + expected_size = sizeof(struct iwl_notif_statistics_v10); if (iwl_rx_packet_payload_len(pkt) != expected_size) goto invalid; - temperature = le32_to_cpu(stats->general.radio_temperature); data.mac_id = stats->rx.general.mac_id; data.beacon_filter_average_energy = - stats->general.beacon_filter_average_energy; + stats->general.common.beacon_filter_average_energy; iwl_mvm_update_rx_statistics(mvm, &stats->rx); - mvm->radio_stats.rx_time = le64_to_cpu(stats->general.rx_time); - mvm->radio_stats.tx_time = le64_to_cpu(stats->general.tx_time); + mvm->radio_stats.rx_time = le64_to_cpu(stats->general.common.rx_time); + mvm->radio_stats.tx_time = le64_to_cpu(stats->general.common.tx_time); mvm->radio_stats.on_time_rf = - le64_to_cpu(stats->general.on_time_rf); + le64_to_cpu(stats->general.common.on_time_rf); mvm->radio_stats.on_time_scan = - le64_to_cpu(stats->general.on_time_scan); + le64_to_cpu(stats->general.common.on_time_scan); data.general = &stats->general; if (iwl_mvm_has_new_rx_api(mvm)) { int i; - - data.load = &stats->load_stats; + u8 *energy; + __le32 *bytes, *air_time; + + if (!iwl_mvm_is_cdb_supported(mvm)) { + struct iwl_notif_statistics_v11 *v11 = + (void *)&pkt->data; + + energy = (void *)&v11->load_stats.avg_energy; + bytes = (void *)&v11->load_stats.byte_count; + air_time = (void *)&v11->load_stats.air_time; + } else { + energy = (void *)&stats->load_stats.avg_energy; + bytes = (void *)&stats->load_stats.byte_count; + air_time = (void *)&stats->load_stats.air_time; + } rcu_read_lock(); for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { struct iwl_mvm_sta *sta; - if (!data.load->avg_energy[i]) + if (!energy[i]) continue; sta = iwl_mvm_sta_from_staid_rcu(mvm, i); if (!sta) continue; - sta->avg_energy = data.load->avg_energy[i]; + sta->avg_energy = energy[i]; } rcu_read_unlock(); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 6c802cee900c..d79e9c2a2654 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -149,8 +149,17 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr, unsigned int headlen, fraglen, pad_len = 0; unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); - if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) + if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) { pad_len = 2; + + /* + * If the device inserted padding it means that (it thought) + * the 802.11 header wasn't a multiple of 4 bytes long. In + * this case, reserve two bytes at the start of the SKB to + * align the payload properly in case we end up copying it. + */ + skb_reserve(skb, pad_len); + } len -= pad_len; /* If frame is small enough to fit in skb->head, pull it completely. @@ -409,7 +418,7 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm, /* ignore nssn smaller than head sn - this can happen due to timeout */ if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size)) - return; + goto set_timer; while (iwl_mvm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) { int index = ssn % reorder_buf->buf_size; @@ -432,6 +441,7 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm, } reorder_buf->head_sn = nssn; +set_timer: if (reorder_buf->num_stored && !reorder_buf->removed) { u16 index = reorder_buf->head_sn % reorder_buf->buf_size; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index fa9743205491..0a64efa844b7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -197,7 +197,7 @@ static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac, int *global_cnt = data; if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt && - mvmvif->phy_ctxt->id < MAX_PHYS) + mvmvif->phy_ctxt->id < NUM_PHY_CTX) *global_cnt += 1; } @@ -943,18 +943,92 @@ static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm) return cpu_to_le32(rates); } -int iwl_mvm_config_scan(struct iwl_mvm *mvm) +static void iwl_mvm_fill_scan_dwell(struct iwl_mvm *mvm, + struct iwl_scan_dwell *dwell, + struct iwl_mvm_scan_timing_params *timing) +{ + dwell->active = timing->dwell_active; + dwell->passive = timing->dwell_passive; + dwell->fragmented = timing->dwell_fragmented; + dwell->extended = timing->dwell_extended; +} + +static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels) { - struct iwl_scan_config *scan_config; struct ieee80211_supported_band *band; - int num_channels = - mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels + - mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels; - int ret, i, j = 0, cmd_size; + int i, j = 0; + + band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ]; + for (i = 0; i < band->n_channels; i++, j++) + channels[j] = band->channels[i].hw_value; + band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ]; + for (i = 0; i < band->n_channels; i++, j++) + channels[j] = band->channels[i].hw_value; +} + +static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config, + u32 flags, u8 channel_flags) +{ + enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false); + struct iwl_scan_config *cfg = config; + + cfg->flags = cpu_to_le32(flags); + cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); + cfg->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm)); + cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm); + cfg->out_of_channel_time = cpu_to_le32(scan_timing[type].max_out_time); + cfg->suspend_time = cpu_to_le32(scan_timing[type].suspend_time); + + iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell, &scan_timing[type]); + + memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN); + + cfg->bcast_sta_id = mvm->aux_sta.sta_id; + cfg->channel_flags = channel_flags; + + iwl_mvm_fill_channels(mvm, cfg->channel_array); +} + +static void iwl_mvm_fill_scan_config_cdb(struct iwl_mvm *mvm, void *config, + u32 flags, u8 channel_flags) +{ + enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false); + struct iwl_scan_config_cdb *cfg = config; + + cfg->flags = cpu_to_le32(flags); + cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); + cfg->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm)); + cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm); + cfg->out_of_channel_time[0] = + cpu_to_le32(scan_timing[type].max_out_time); + cfg->out_of_channel_time[1] = + cpu_to_le32(scan_timing[type].max_out_time); + cfg->suspend_time[0] = cpu_to_le32(scan_timing[type].suspend_time); + cfg->suspend_time[1] = cpu_to_le32(scan_timing[type].suspend_time); + + iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell, &scan_timing[type]); + + memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN); + + cfg->bcast_sta_id = mvm->aux_sta.sta_id; + cfg->channel_flags = channel_flags; + + iwl_mvm_fill_channels(mvm, cfg->channel_array); +} + +int iwl_mvm_config_scan(struct iwl_mvm *mvm) +{ + void *cfg; + int ret, cmd_size; struct iwl_host_cmd cmd = { .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0), }; enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false); + int num_channels = + mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels + + mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels; + u32 flags; + u8 channel_flags; if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels)) return -ENOBUFS; @@ -965,52 +1039,45 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) return 0; } - cmd_size = sizeof(*scan_config) + mvm->fw->ucode_capa.n_scan_channels; + if (iwl_mvm_is_cdb_supported(mvm)) + cmd_size = sizeof(struct iwl_scan_config_cdb); + else + cmd_size = sizeof(struct iwl_scan_config); + cmd_size += mvm->fw->ucode_capa.n_scan_channels; - scan_config = kzalloc(cmd_size, GFP_KERNEL); - if (!scan_config) + cfg = kzalloc(cmd_size, GFP_KERNEL); + if (!cfg) return -ENOMEM; - scan_config->flags = cpu_to_le32(SCAN_CONFIG_FLAG_ACTIVATE | - SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS | - SCAN_CONFIG_FLAG_SET_TX_CHAINS | - SCAN_CONFIG_FLAG_SET_RX_CHAINS | - SCAN_CONFIG_FLAG_SET_AUX_STA_ID | - SCAN_CONFIG_FLAG_SET_ALL_TIMES | - SCAN_CONFIG_FLAG_SET_LEGACY_RATES | - SCAN_CONFIG_FLAG_SET_MAC_ADDR | - SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS| - SCAN_CONFIG_N_CHANNELS(num_channels) | - (type == IWL_SCAN_TYPE_FRAGMENTED ? - SCAN_CONFIG_FLAG_SET_FRAGMENTED : - SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED)); - scan_config->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); - scan_config->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm)); - scan_config->legacy_rates = iwl_mvm_scan_config_rates(mvm); - scan_config->out_of_channel_time = - cpu_to_le32(scan_timing[type].max_out_time); - scan_config->suspend_time = cpu_to_le32(scan_timing[type].suspend_time); - scan_config->dwell_active = scan_timing[type].dwell_active; - scan_config->dwell_passive = scan_timing[type].dwell_passive; - scan_config->dwell_fragmented = scan_timing[type].dwell_fragmented; - scan_config->dwell_extended = scan_timing[type].dwell_extended; - - memcpy(&scan_config->mac_addr, &mvm->addresses[0].addr, ETH_ALEN); - - scan_config->bcast_sta_id = mvm->aux_sta.sta_id; - scan_config->channel_flags = IWL_CHANNEL_FLAG_EBS | - IWL_CHANNEL_FLAG_ACCURATE_EBS | - IWL_CHANNEL_FLAG_EBS_ADD | - IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE; - - band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ]; - for (i = 0; i < band->n_channels; i++, j++) - scan_config->channel_array[j] = band->channels[i].hw_value; - band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ]; - for (i = 0; i < band->n_channels; i++, j++) - scan_config->channel_array[j] = band->channels[i].hw_value; + flags = SCAN_CONFIG_FLAG_ACTIVATE | + SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS | + SCAN_CONFIG_FLAG_SET_TX_CHAINS | + SCAN_CONFIG_FLAG_SET_RX_CHAINS | + SCAN_CONFIG_FLAG_SET_AUX_STA_ID | + SCAN_CONFIG_FLAG_SET_ALL_TIMES | + SCAN_CONFIG_FLAG_SET_LEGACY_RATES | + SCAN_CONFIG_FLAG_SET_MAC_ADDR | + SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS | + SCAN_CONFIG_N_CHANNELS(num_channels) | + (type == IWL_SCAN_TYPE_FRAGMENTED ? + SCAN_CONFIG_FLAG_SET_FRAGMENTED : + SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED); + + channel_flags = IWL_CHANNEL_FLAG_EBS | + IWL_CHANNEL_FLAG_ACCURATE_EBS | + IWL_CHANNEL_FLAG_EBS_ADD | + IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE; + + if (iwl_mvm_is_cdb_supported(mvm)) { + flags |= (type == IWL_SCAN_TYPE_FRAGMENTED) ? + SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED : + SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED; + iwl_mvm_fill_scan_config_cdb(mvm, cfg, flags, channel_flags); + } else { + iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags); + } - cmd.data[0] = scan_config; + cmd.data[0] = cfg; cmd.len[0] = cmd_size; cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY; @@ -1020,7 +1087,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) if (!ret) mvm->scan_type = type; - kfree(scan_config); + kfree(cfg); return ret; } @@ -1039,19 +1106,31 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, struct iwl_scan_req_umac *cmd, struct iwl_mvm_scan_params *params) { + struct iwl_mvm_scan_timing_params *timing = &scan_timing[params->type]; + if (params->measurement_dwell) { cmd->active_dwell = params->measurement_dwell; cmd->passive_dwell = params->measurement_dwell; cmd->extended_dwell = params->measurement_dwell; } else { - cmd->active_dwell = scan_timing[params->type].dwell_active; - cmd->passive_dwell = scan_timing[params->type].dwell_passive; - cmd->extended_dwell = scan_timing[params->type].dwell_extended; + cmd->active_dwell = timing->dwell_active; + cmd->passive_dwell = timing->dwell_passive; + cmd->extended_dwell = timing->dwell_extended; + } + cmd->fragmented_dwell = timing->dwell_fragmented; + + if (iwl_mvm_is_cdb_supported(mvm)) { + cmd->cdb.max_out_time[0] = cpu_to_le32(timing->max_out_time); + cmd->cdb.suspend_time[0] = cpu_to_le32(timing->suspend_time); + cmd->cdb.max_out_time[1] = cpu_to_le32(timing->max_out_time); + cmd->cdb.suspend_time[1] = cpu_to_le32(timing->suspend_time); + cmd->cdb.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); + } else { + cmd->no_cdb.max_out_time = cpu_to_le32(timing->max_out_time); + cmd->no_cdb.suspend_time = cpu_to_le32(timing->suspend_time); + cmd->no_cdb.scan_priority = + cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); } - cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented; - cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time); - cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time); - cmd->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); if (iwl_mvm_is_regular_scan(params)) cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); @@ -1063,9 +1142,8 @@ static void iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm, struct ieee80211_channel **channels, int n_channels, u32 ssid_bitmap, - struct iwl_scan_req_umac *cmd) + struct iwl_scan_channel_cfg_umac *channel_cfg) { - struct iwl_scan_channel_cfg_umac *channel_cfg = (void *)&cmd->data; int i; for (i = 0; i < n_channels; i++) { @@ -1088,8 +1166,11 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm, if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0) flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT; - if (params->type == IWL_SCAN_TYPE_FRAGMENTED) + if (params->type == IWL_SCAN_TYPE_FRAGMENTED) { flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED; + if (iwl_mvm_is_cdb_supported(mvm)) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED; + } if (iwl_mvm_rrm_scan_needed(mvm)) flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED; @@ -1126,11 +1207,14 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int type) { struct iwl_scan_req_umac *cmd = mvm->scan_cmd; - struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data + + void *cmd_data = iwl_mvm_is_cdb_supported(mvm) ? + (void *)&cmd->cdb.data : (void *)&cmd->no_cdb.data; + struct iwl_scan_req_umac_tail *sec_part = cmd_data + sizeof(struct iwl_scan_channel_cfg_umac) * mvm->fw->ucode_capa.n_scan_channels; int uid, i; u32 ssid_bitmap = 0; + u8 channel_flags = 0; struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif); lockdep_assert_held(&mvm->mutex); @@ -1157,16 +1241,23 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE); if (iwl_mvm_scan_use_ebs(mvm, vif)) - cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS | - IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | - IWL_SCAN_CHANNEL_FLAG_CACHE_ADD; + channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS | + IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | + IWL_SCAN_CHANNEL_FLAG_CACHE_ADD; - cmd->n_channels = params->n_channels; + if (iwl_mvm_is_cdb_supported(mvm)) { + cmd->cdb.channel_flags = channel_flags; + cmd->cdb.n_channels = params->n_channels; + } else { + cmd->no_cdb.channel_flags = channel_flags; + cmd->no_cdb.n_channels = params->n_channels; + } iwl_scan_build_ssids(params, sec_part->direct_scan, &ssid_bitmap); iwl_mvm_umac_scan_cfg_channels(mvm, params->channels, - params->n_channels, ssid_bitmap, cmd); + params->n_channels, ssid_bitmap, + cmd_data); for (i = 0; i < params->n_scan_plans; i++) { struct cfg80211_sched_scan_plan *scan_plan = @@ -1601,8 +1692,13 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type) int iwl_mvm_scan_size(struct iwl_mvm *mvm) { + int base_size = IWL_SCAN_REQ_UMAC_SIZE; + + if (iwl_mvm_is_cdb_supported(mvm)) + base_size = IWL_SCAN_REQ_UMAC_SIZE_CDB; + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) - return sizeof(struct iwl_scan_req_umac) + + return base_size + sizeof(struct iwl_scan_channel_cfg_umac) * mvm->fw->ucode_capa.n_scan_channels + sizeof(struct iwl_scan_req_umac_tail); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 09e9e2e3ed04..bd1dcc863d8f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -202,7 +202,8 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT); add_sta_cmd.station_flags |= cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT); - add_sta_cmd.assoc_id = cpu_to_le16(sta->aid); + if (mvm_sta->associated) + add_sta_cmd.assoc_id = cpu_to_le16(sta->aid); if (sta->wme) { add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS; @@ -454,14 +455,53 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) rcu_read_unlock(); + return disable_agg_tids; +} + +static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, + bool same_sta) +{ + struct iwl_mvm_sta *mvmsta; + u8 txq_curr_ac, sta_id, tid; + unsigned long disable_agg_tids = 0; + int ret; + + lockdep_assert_held(&mvm->mutex); + spin_lock_bh(&mvm->queue_info_lock); - /* Unmap MAC queues and TIDs from this queue */ - mvm->queue_info[queue].hw_queue_to_mac80211 = 0; - mvm->queue_info[queue].hw_queue_refcount = 0; - mvm->queue_info[queue].tid_bitmap = 0; + txq_curr_ac = mvm->queue_info[queue].mac80211_ac; + sta_id = mvm->queue_info[queue].ra_sta_id; + tid = mvm->queue_info[queue].txq_tid; spin_unlock_bh(&mvm->queue_info_lock); - return disable_agg_tids; + mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); + + disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue); + /* Disable the queue */ + if (disable_agg_tids) + iwl_mvm_invalidate_sta_queue(mvm, queue, + disable_agg_tids, false); + + ret = iwl_mvm_disable_txq(mvm, queue, + mvmsta->vif->hw_queue[txq_curr_ac], + tid, 0); + if (ret) { + /* Re-mark the inactive queue as inactive */ + spin_lock_bh(&mvm->queue_info_lock); + mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE; + spin_unlock_bh(&mvm->queue_info_lock); + IWL_ERR(mvm, + "Failed to free inactive queue %d (ret=%d)\n", + queue, ret); + + return ret; + } + + /* If TXQ is allocated to another STA, update removal in FW */ + if (!same_sta) + iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true); + + return 0; } static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, @@ -652,7 +692,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); u8 mac_queue = mvmsta->vif->hw_queue[ac]; int queue = -1; - bool using_inactive_queue = false; + bool using_inactive_queue = false, same_sta = false; unsigned long disable_agg_tids = 0; enum iwl_mvm_agg_state queue_state; bool shared_queue = false; @@ -709,6 +749,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) { mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; using_inactive_queue = true; + same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id; IWL_DEBUG_TX_QUEUES(mvm, "Re-assigning TXQ %d: sta_id=%d, tid=%d\n", queue, mvmsta->sta_id, tid); @@ -755,44 +796,9 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, * first */ if (using_inactive_queue) { - struct iwl_scd_txq_cfg_cmd cmd = { - .scd_queue = queue, - .action = SCD_CFG_DISABLE_QUEUE, - }; - u8 txq_curr_ac; - - disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue); - - spin_lock_bh(&mvm->queue_info_lock); - txq_curr_ac = mvm->queue_info[queue].mac80211_ac; - cmd.sta_id = mvm->queue_info[queue].ra_sta_id; - cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[txq_curr_ac]; - cmd.tid = mvm->queue_info[queue].txq_tid; - spin_unlock_bh(&mvm->queue_info_lock); - - /* Disable the queue */ - if (disable_agg_tids) - iwl_mvm_invalidate_sta_queue(mvm, queue, - disable_agg_tids, false); - iwl_trans_txq_disable(mvm->trans, queue, false); - ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), - &cmd); - if (ret) { - IWL_ERR(mvm, - "Failed to free inactive queue %d (ret=%d)\n", - queue, ret); - - /* Re-mark the inactive queue as inactive */ - spin_lock_bh(&mvm->queue_info_lock); - mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE; - spin_unlock_bh(&mvm->queue_info_lock); - + ret = iwl_mvm_free_inactive_queue(mvm, queue, same_sta); + if (ret) return ret; - } - - /* If TXQ is allocated to another STA, update removal in FW */ - if (cmd.sta_id != mvmsta->sta_id) - iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true); } IWL_DEBUG_TX_QUEUES(mvm, @@ -868,7 +874,6 @@ static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue) .scd_queue = queue, .action = SCD_CFG_UPDATE_QUEUE_TID, }; - s8 sta_id; int tid; unsigned long tid_bitmap; int ret; @@ -876,7 +881,6 @@ static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue) lockdep_assert_held(&mvm->mutex); spin_lock_bh(&mvm->queue_info_lock); - sta_id = mvm->queue_info[queue].ra_sta_id; tid_bitmap = mvm->queue_info[queue].tid_bitmap; spin_unlock_bh(&mvm->queue_info_lock); @@ -1110,6 +1114,7 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); int queue; + bool using_inactive_queue = false, same_sta = false; /* * Check for inactive queues, so we don't reach a situation where we @@ -1133,6 +1138,14 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, spin_unlock_bh(&mvm->queue_info_lock); IWL_ERR(mvm, "No available queues for new station\n"); return -ENOSPC; + } else if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) { + /* + * If this queue is already allocated but inactive we'll need to + * first free this queue before enabling it again, we'll mark + * it as reserved to make sure no new traffic arrives on it + */ + using_inactive_queue = true; + same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id; } mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; @@ -1140,6 +1153,9 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, mvmsta->reserved_queue = queue; + if (using_inactive_queue) + iwl_mvm_free_inactive_queue(mvm, queue, same_sta); + IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n", queue, mvmsta->sta_id); @@ -1486,6 +1502,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + u8 sta_id = mvm_sta->sta_id; int ret; lockdep_assert_held(&mvm->mutex); @@ -1494,7 +1511,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, kfree(mvm_sta->dup_data); if ((vif->type == NL80211_IFTYPE_STATION && - mvmvif->ap_sta_id == mvm_sta->sta_id) || + mvmvif->ap_sta_id == sta_id) || iwl_mvm_is_dqa_supported(mvm)){ ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); if (ret) @@ -1510,8 +1527,17 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); /* If DQA is supported - the queues can be disabled now */ - if (iwl_mvm_is_dqa_supported(mvm)) + if (iwl_mvm_is_dqa_supported(mvm)) { iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta); + /* + * If pending_frames is set at this point - it must be + * driver internal logic error, since queues are empty + * and removed successuly. + * warn on it but set it to 0 anyway to avoid station + * not being removed later in the function + */ + WARN_ON(atomic_xchg(&mvm->pending_frames[sta_id], 0)); + } /* If there is a TXQ still marked as reserved - free it */ if (iwl_mvm_is_dqa_supported(mvm) && @@ -1529,7 +1555,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, if (WARN((*status != IWL_MVM_QUEUE_RESERVED) && (*status != IWL_MVM_QUEUE_FREE), "sta_id %d reserved txq %d status %d", - mvm_sta->sta_id, reserved_txq, *status)) { + sta_id, reserved_txq, *status)) { spin_unlock_bh(&mvm->queue_info_lock); return -EINVAL; } @@ -1539,7 +1565,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, } if (vif->type == NL80211_IFTYPE_STATION && - mvmvif->ap_sta_id == mvm_sta->sta_id) { + mvmvif->ap_sta_id == sta_id) { /* if associated - we can't remove the AP STA now */ if (vif->bss_conf.assoc) return ret; @@ -1548,7 +1574,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; /* clear d0i3_ap_sta_id if no longer relevant */ - if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id) + if (mvm->d0i3_ap_sta_id == sta_id) mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; } } @@ -1557,7 +1583,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, * This shouldn't happen - the TDLS channel switch should be canceled * before the STA is removed. */ - if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == mvm_sta->sta_id)) { + if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) { mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT; cancel_delayed_work(&mvm->tdls_cs.dwork); } @@ -1567,21 +1593,20 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, * calls the drain worker. */ spin_lock_bh(&mvm_sta->lock); + /* * There are frames pending on the AC queues for this station. * We need to wait until all the frames are drained... */ - if (atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) { - rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], + if (atomic_read(&mvm->pending_frames[sta_id])) { + rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], ERR_PTR(-EBUSY)); spin_unlock_bh(&mvm_sta->lock); /* disable TDLS sta queues on drain complete */ if (sta->tdls) { - mvm->tfd_drained[mvm_sta->sta_id] = - mvm_sta->tfd_queue_msk; - IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n", - mvm_sta->sta_id); + mvm->tfd_drained[sta_id] = mvm_sta->tfd_queue_msk; + IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n", sta_id); } ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); @@ -1765,6 +1790,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; const u8 *baddr = _baddr; + int ret; lockdep_assert_held(&mvm->mutex); @@ -1780,19 +1806,16 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) iwl_mvm_get_wd_timeout(mvm, vif, false, false); int queue; - if ((vif->type == NL80211_IFTYPE_AP) && - (mvmvif->bcast_sta.tfd_queue_msk & - BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE))) + if (vif->type == NL80211_IFTYPE_AP) queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; - else if ((vif->type == NL80211_IFTYPE_P2P_DEVICE) && - (mvmvif->bcast_sta.tfd_queue_msk & - BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE))) + else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; - else if (WARN(1, "Missed required TXQ for adding bcast STA\n")) + else if (WARN(1, "Missing required TXQ for adding bcast STA\n")) return -EINVAL; iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, &cfg, wdg_timeout); + bsta->tfd_queue_msk |= BIT(queue); } if (vif->type == NL80211_IFTYPE_ADHOC) @@ -1801,8 +1824,67 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT)) return -ENOSPC; - return iwl_mvm_add_int_sta_common(mvm, bsta, baddr, - mvmvif->id, mvmvif->color); + ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr, + mvmvif->id, mvmvif->color); + if (ret) + return ret; + + /* + * In AP vif type, we also need to enable the cab_queue. However, we + * have to enable it after the ADD_STA command is sent, otherwise the + * FW will throw an assert once we send the ADD_STA command (it'll + * detect a mismatch in the tfd_queue_msk, as we can't add the + * enabled-cab_queue to the mask) + */ + if (iwl_mvm_is_dqa_supported(mvm) && + vif->type == NL80211_IFTYPE_AP) { + struct iwl_trans_txq_scd_cfg cfg = { + .fifo = IWL_MVM_TX_FIFO_MCAST, + .sta_id = mvmvif->bcast_sta.sta_id, + .tid = IWL_MAX_TID_COUNT, + .aggregate = false, + .frame_limit = IWL_FRAME_LIMIT, + }; + unsigned int wdg_timeout = + iwl_mvm_get_wd_timeout(mvm, vif, false, false); + + iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, + 0, &cfg, wdg_timeout); + } + + return 0; +} + +static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + lockdep_assert_held(&mvm->mutex); + + if (vif->type == NL80211_IFTYPE_AP) + iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue, + IWL_MAX_TID_COUNT, 0); + + if (mvmvif->bcast_sta.tfd_queue_msk & + BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE)) { + iwl_mvm_disable_txq(mvm, + IWL_MVM_DQA_AP_PROBE_RESP_QUEUE, + vif->hw_queue[0], IWL_MAX_TID_COUNT, + 0); + mvmvif->bcast_sta.tfd_queue_msk &= + ~BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE); + } + + if (mvmvif->bcast_sta.tfd_queue_msk & + BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE)) { + iwl_mvm_disable_txq(mvm, + IWL_MVM_DQA_P2P_DEVICE_QUEUE, + vif->hw_queue[0], IWL_MAX_TID_COUNT, + 0); + mvmvif->bcast_sta.tfd_queue_msk &= + ~BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE); + } } /* Send the FW a request to remove the station from it's internal data @@ -1814,6 +1896,9 @@ int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); + if (iwl_mvm_is_dqa_supported(mvm)) + iwl_mvm_free_bcast_sta_queues(mvm, vif); + ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id); if (ret) IWL_WARN(mvm, "Failed sending remove station\n"); @@ -1827,22 +1912,16 @@ int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); - if (!iwl_mvm_is_dqa_supported(mvm)) + if (!iwl_mvm_is_dqa_supported(mvm)) { qmask = iwl_mvm_mac_get_queues_mask(vif); - if (vif->type == NL80211_IFTYPE_AP) { /* * The firmware defines the TFD queue mask to only be relevant * for *unicast* queues, so the multicast (CAB) queue shouldn't - * be included. + * be included. This only happens in NL80211_IFTYPE_AP vif type, + * so the next line will only have an effect there. */ qmask &= ~BIT(vif->cab_queue); - - if (iwl_mvm_is_dqa_supported(mvm)) - qmask |= BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE); - } else if (iwl_mvm_is_dqa_supported(mvm) && - vif->type == NL80211_IFTYPE_P2P_DEVICE) { - qmask |= BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE); } return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask, @@ -2247,6 +2326,13 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, IWL_ERR(mvm, "Failed to allocate agg queue\n"); goto release_locks; } + /* + * TXQ shouldn't be in inactive mode for non-DQA, so getting + * an inactive queue from iwl_mvm_find_free_queue() is + * certainly a bug + */ + WARN_ON(mvm->queue_info[txq_id].status == + IWL_MVM_QUEUE_INACTIVE); /* TXQ hasn't yet been enabled, so mark it only as reserved */ mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED; @@ -2962,6 +3048,11 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, /* Get the station from the mvm local station table */ mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); + if (!mvm_sta) { + IWL_ERR(mvm, "Failed to find station\n"); + return -EINVAL; + } + sta_id = mvm_sta->sta_id; IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id); @@ -2989,8 +3080,6 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, return 0; } - sta_id = mvm_sta->sta_id; - ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); if (ret) return ret; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index b45c7b9937c8..4be34f902278 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -437,6 +437,7 @@ struct iwl_mvm_sta { bool disable_tx; bool tlc_amsdu; bool sleeping; + bool associated; u8 agg_tids; u8 sleep_tx_count; u8 avg_energy; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 66957ac12ca4..dd2b4a300819 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -102,14 +102,13 @@ iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr, #define OPT_HDR(type, skb, off) \ (type *)(skb_network_header(skb) + (off)) -static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, - struct ieee80211_hdr *hdr, - struct ieee80211_tx_info *info, - struct iwl_tx_cmd *tx_cmd) +static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_hdr *hdr, + struct ieee80211_tx_info *info) { + u16 offload_assist = 0; #if IS_ENABLED(CONFIG_INET) u16 mh_len = ieee80211_hdrlen(hdr->frame_control); - u16 offload_assist = le16_to_cpu(tx_cmd->offload_assist); u8 protocol = 0; /* @@ -117,7 +116,7 @@ static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, * compute it */ if (skb->ip_summed != CHECKSUM_PARTIAL || IWL_MVM_SW_TX_CSUM_OFFLOAD) - return; + goto out; /* We do not expect to be requested to csum stuff we do not support */ if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) || @@ -125,7 +124,7 @@ static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, skb->protocol != htons(ETH_P_IPV6)), "No support for requested checksum\n")) { skb_checksum_help(skb); - return; + goto out; } if (skb->protocol == htons(ETH_P_IP)) { @@ -145,7 +144,7 @@ static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, protocol != NEXTHDR_HOP && protocol != NEXTHDR_DEST) { skb_checksum_help(skb); - return; + goto out; } hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); @@ -159,7 +158,7 @@ static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) { WARN_ON_ONCE(1); skb_checksum_help(skb); - return; + goto out; } /* enable L4 csum */ @@ -191,8 +190,9 @@ static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, mh_len /= 2; offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE; - tx_cmd->offload_assist = cpu_to_le16(offload_assist); +out: #endif + return offload_assist; } /* @@ -202,7 +202,6 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, struct iwl_tx_cmd *tx_cmd, struct ieee80211_tx_info *info, u8 sta_id) { - struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (void *)skb->data; __le16 fc = hdr->frame_control; u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags); @@ -284,9 +283,8 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, tx_flags |= TX_CMD_FLG_WRITE_TX_POWER; tx_cmd->tx_flags = cpu_to_le32(tx_flags); - /* Total # bytes to be transmitted */ - tx_cmd->len = cpu_to_le16((u16)skb->len + - (uintptr_t)skb_info->driver_data[0]); + /* Total # bytes to be transmitted - PCIe code will adjust for A-MSDU */ + tx_cmd->len = cpu_to_le16((u16)skb->len); tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); tx_cmd->sta_id = sta_id; @@ -295,7 +293,52 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, !(tx_cmd->offload_assist & cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU)))) tx_cmd->offload_assist |= cpu_to_le16(BIT(TX_CMD_OFFLD_PAD)); - iwl_mvm_tx_csum(mvm, skb, hdr, info, tx_cmd); + tx_cmd->offload_assist |= + cpu_to_le16(iwl_mvm_tx_csum(mvm, skb, hdr, info)); +} + +static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm, + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta) +{ + int rate_idx; + u8 rate_plcp; + u32 rate_flags; + + /* HT rate doesn't make sense for a non data frame */ + WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS, + "Got an HT rate (flags:0x%x/mcs:%d) for a non data frame\n", + info->control.rates[0].flags, + info->control.rates[0].idx); + + rate_idx = info->control.rates[0].idx; + /* if the rate isn't a well known legacy rate, take the lowest one */ + if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY) + rate_idx = rate_lowest_index( + &mvm->nvm_data->bands[info->band], sta); + + /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ + if (info->band == NL80211_BAND_5GHZ) + rate_idx += IWL_FIRST_OFDM_RATE; + + /* For 2.4 GHZ band, check that there is no need to remap */ + BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0); + + /* Get PLCP rate for tx_cmd->rate_n_flags */ + rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx); + + if (info->band == NL80211_BAND_2GHZ && + !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) + rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS; + else + rate_flags = + BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; + + /* Set CCK flag as needed */ + if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) + rate_flags |= RATE_MCS_CCK_MSK; + + return (u32)rate_plcp | rate_flags; } /* @@ -305,10 +348,6 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, __le16 fc) { - u32 rate_flags; - int rate_idx; - u8 rate_plcp; - /* Set retry limit on RTS packets */ tx_cmd->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT; @@ -337,46 +376,12 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR); } - /* HT rate doesn't make sense for a non data frame */ - WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS, - "Got an HT rate (flags:0x%x/mcs:%d) for a non data frame (fc:0x%x)\n", - info->control.rates[0].flags, - info->control.rates[0].idx, - le16_to_cpu(fc)); - - rate_idx = info->control.rates[0].idx; - /* if the rate isn't a well known legacy rate, take the lowest one */ - if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY) - rate_idx = rate_lowest_index( - &mvm->nvm_data->bands[info->band], sta); - - /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ - if (info->band == NL80211_BAND_5GHZ) - rate_idx += IWL_FIRST_OFDM_RATE; - - /* For 2.4 GHZ band, check that there is no need to remap */ - BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0); - - /* Get PLCP rate for tx_cmd->rate_n_flags */ - rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx); - mvm->mgmt_last_antenna_idx = iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm), mvm->mgmt_last_antenna_idx); - if (info->band == NL80211_BAND_2GHZ && - !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) - rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS; - else - rate_flags = - BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; - - /* Set CCK flag as needed */ - if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) - rate_flags |= RATE_MCS_CCK_MSK; - /* Set the rate in the TX cmd */ - tx_cmd->rate_n_flags = cpu_to_le32((u32)rate_plcp | rate_flags); + tx_cmd->rate_n_flags = cpu_to_le32(iwl_mvm_get_tx_rate(mvm, info, sta)); } static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info, @@ -459,7 +464,6 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_sta *sta, u8 sta_id) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); struct iwl_device_cmd *dev_cmd; struct iwl_tx_cmd *tx_cmd; @@ -479,12 +483,18 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control); + return dev_cmd; +} + +static void iwl_mvm_skb_prepare_status(struct sk_buff *skb, + struct iwl_device_cmd *cmd) +{ + struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); + memset(&skb_info->status, 0, sizeof(skb_info->status)); memset(skb_info->driver_data, 0, sizeof(skb_info->driver_data)); - skb_info->driver_data[1] = dev_cmd; - - return dev_cmd; + skb_info->driver_data[1] = cmd; } static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, @@ -496,15 +506,17 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, switch (info->control.vif->type) { case NL80211_IFTYPE_AP: /* - * handle legacy hostapd as well, where station may be added - * only after assoc. + * Handle legacy hostapd as well, where station may be added + * only after assoc. Take care of the case where we send a + * deauth to a station that we don't have. */ - if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc)) + if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc) || + ieee80211_is_deauth(fc)) return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; if (info->hw_queue == info->control.vif->cab_queue) return info->hw_queue; - WARN_ON_ONCE(1); + WARN_ONCE(1, "fc=0x%02x", le16_to_cpu(fc)); return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; case NL80211_IFTYPE_P2P_DEVICE: if (ieee80211_is_mgmt(fc)) @@ -536,9 +548,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) * queue. STATION (HS2.0) uses the auxiliary context of the FW, * and hence needs to be sent on the aux queue */ - if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE && + if (skb_info->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE && skb_info->control.vif->type == NL80211_IFTYPE_STATION) - IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue; + skb_info->hw_queue = mvm->aux_queue; memcpy(&info, skb->cb, sizeof(info)); @@ -550,9 +562,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) info.hw_queue != info.control.vif->cab_queue))) return -1; - /* This holds the amsdu headers length */ - skb_info->driver_data[0] = (void *)(uintptr_t)0; - queue = info.hw_queue; /* @@ -563,9 +572,10 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) * (this is not possible for unicast packets as a TLDS discovery * response are sent without a station entry); otherwise use the * AUX station. - * In DQA mode, if vif is of type STATION and frames are not multicast, - * they should be sent from the BSS queue. For example, TDLS setup - * frames should be sent on this queue, as they go through the AP. + * In DQA mode, if vif is of type STATION and frames are not multicast + * or offchannel, they should be sent from the BSS queue. + * For example, TDLS setup frames should be sent on this queue, + * as they go through the AP. */ sta_id = mvm->aux_sta.sta_id; if (info.control.vif) { @@ -587,7 +597,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) if (ap_sta_id != IWL_MVM_STATION_COUNT) sta_id = ap_sta_id; } else if (iwl_mvm_is_dqa_supported(mvm) && - info.control.vif->type == NL80211_IFTYPE_STATION) { + info.control.vif->type == NL80211_IFTYPE_STATION && + queue != mvm->aux_queue) { queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE; } } @@ -598,6 +609,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) if (!dev_cmd) return -1; + /* From now on, we cannot access info->control */ + iwl_mvm_skb_prepare_status(skb, dev_cmd); + tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; /* Copy MAC header from skb into command buffer */ @@ -634,7 +648,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, unsigned int num_subframes, tcp_payload_len, subf_len, max_amsdu_len; bool ipv4 = (skb->protocol == htons(ETH_P_IP)); u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0; - u16 amsdu_add, snap_ip_tcp, pad, i = 0; + u16 snap_ip_tcp, pad, i = 0; unsigned int dbg_max_amsdu_len; netdev_features_t netdev_features = NETIF_F_CSUM_MASK | NETIF_F_SG; u8 *qc, tid, txf; @@ -736,21 +750,6 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, /* This skb fits in one single A-MSDU */ if (num_subframes * mss >= tcp_payload_len) { - struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); - - /* - * Compute the length of all the data added for the A-MSDU. - * This will be used to compute the length to write in the TX - * command. We have: SNAP + IP + TCP for n -1 subframes and - * ETH header for n subframes. Note that the original skb - * already had one set of SNAP / IP / TCP headers. - */ - num_subframes = DIV_ROUND_UP(tcp_payload_len, mss); - amsdu_add = num_subframes * sizeof(struct ethhdr) + - (num_subframes - 1) * (snap_ip_tcp + pad); - /* This holds the amsdu headers length */ - skb_info->driver_data[0] = (void *)(uintptr_t)amsdu_add; - __skb_queue_tail(mpdus_skb, skb); return 0; } @@ -789,14 +788,6 @@ segment: ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes); if (tcp_payload_len > mss) { - struct ieee80211_tx_info *skb_info = - IEEE80211_SKB_CB(tmp); - - num_subframes = DIV_ROUND_UP(tcp_payload_len, mss); - amsdu_add = num_subframes * sizeof(struct ethhdr) + - (num_subframes - 1) * (snap_ip_tcp + pad); - skb_info->driver_data[0] = - (void *)(uintptr_t)amsdu_add; skb_shinfo(tmp)->gso_size = mss; } else { qc = ieee80211_get_qos_ctl((void *)tmp->data); @@ -908,7 +899,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, goto drop; tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; - /* From now on, we cannot access info->control */ /* * we handle that entirely ourselves -- for uAPSD the firmware @@ -919,6 +909,10 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, spin_lock(&mvmsta->lock); + /* nullfunc frames should go to the MGMT queue regardless of QOS, + * the condition of !ieee80211_is_qos_nullfunc(fc) keeps the default + * assignment of MGMT TID + */ if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) { u8 *qc = NULL; qc = ieee80211_get_qos_ctl(hdr); @@ -931,27 +925,13 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); hdr->seq_ctrl |= cpu_to_le16(seq_number); is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU; - } else if (iwl_mvm_is_dqa_supported(mvm) && - (ieee80211_is_qos_nullfunc(fc) || - ieee80211_is_nullfunc(fc))) { - /* - * nullfunc frames should go to the MGMT queue regardless of QOS - */ - tid = IWL_MAX_TID_COUNT; + if (WARN_ON_ONCE(is_ampdu && + mvmsta->tid_data[tid].state != IWL_AGG_ON)) + goto drop_unlock_sta; } - if (iwl_mvm_is_dqa_supported(mvm)) { + if (iwl_mvm_is_dqa_supported(mvm) || is_ampdu) txq_id = mvmsta->tid_data[tid].txq_id; - - if (ieee80211_is_mgmt(fc)) - tx_cmd->tid_tspec = IWL_TID_NON_QOS; - } - - /* Copy MAC header from skb into command buffer */ - memcpy(tx_cmd->hdr, hdr, hdrlen); - - WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); - if (sta->tdls && !iwl_mvm_is_dqa_supported(mvm)) { /* default to TID 0 for non-QoS packets */ u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid; @@ -959,11 +939,10 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, txq_id = mvmsta->hw_queue[tid_to_mac80211_ac[tdls_tid]]; } - if (is_ampdu) { - if (WARN_ON_ONCE(mvmsta->tid_data[tid].state != IWL_AGG_ON)) - goto drop_unlock_sta; - txq_id = mvmsta->tid_data[tid].txq_id; - } + /* Copy MAC header from skb into command buffer */ + memcpy(tx_cmd->hdr, hdr, hdrlen); + + WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); /* Check if TXQ needs to be allocated or re-activated */ if (unlikely(txq_id == IEEE80211_INVAL_HW_QUEUE || @@ -1015,6 +994,9 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id, tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number)); + /* From now on, we cannot access info->control */ + iwl_mvm_skb_prepare_status(skb, dev_cmd); + if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id)) goto drop_unlock_sta; @@ -1024,7 +1006,10 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, spin_unlock(&mvmsta->lock); /* Increase pending frames count if this isn't AMPDU */ - if (!is_ampdu) + if ((iwl_mvm_is_dqa_supported(mvm) && + mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_ON && + mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_STARTING) || + (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu)) atomic_inc(&mvm->pending_frames[mvmsta->sta_id]); return 0; @@ -1040,7 +1025,6 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_info info; struct sk_buff_head mpdus_skbs; unsigned int payload_len; @@ -1054,9 +1038,6 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, memcpy(&info, skb->cb, sizeof(info)); - /* This holds the amsdu headers length */ - skb_info->driver_data[0] = (void *)(uintptr_t)0; - if (!skb_is_gso(skb)) return iwl_mvm_tx_mpdu(mvm, skb, &info, sta); @@ -1295,8 +1276,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, memset(&info->status, 0, sizeof(info->status)); - info->flags &= ~IEEE80211_TX_CTL_AMPDU; - /* inform mac80211 about what happened with the frame */ switch (status & TX_STATUS_MSK) { case TX_STATUS_SUCCESS: @@ -1319,10 +1298,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, (void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate); /* Single frame failure in an AMPDU queue => send BAR */ - if (txq_id >= mvm->first_agg_queue && + if (info->flags & IEEE80211_TX_CTL_AMPDU && !(info->flags & IEEE80211_TX_STAT_ACK) && !(info->flags & IEEE80211_TX_STAT_TX_FILTERED)) info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; + info->flags &= ~IEEE80211_TX_CTL_AMPDU; /* W/A FW bug: seq_ctl is wrong when the status isn't success */ if (status != TX_STATUS_SUCCESS) { @@ -1357,7 +1337,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, ieee80211_tx_status(mvm->hw, skb); } - if (txq_id >= mvm->first_agg_queue) { + if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue) { /* If this is an aggregation queue, we use the ssn since: * ssn = wifi seq_num % 256. * The seq_ctl is the sequence control of the packet to which diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index d04babd99b53..dedea96a8e0f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -497,13 +497,11 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm) IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref); } -void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) +static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base) { struct iwl_trans *trans = mvm->trans; struct iwl_error_event_table table; - u32 base; - base = mvm->error_event_table; if (mvm->cur_ucode == IWL_UCODE_INIT) { if (!base) base = mvm->fw->init_errlog_ptr; @@ -574,6 +572,14 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel); IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp); IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler); +} + +void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) +{ + iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[0]); + + if (mvm->error_event_table[1]) + iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[1]); if (mvm->support_umac_log) iwl_mvm_dump_umac_error_log(mvm); @@ -649,8 +655,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, /* Make sure this TID isn't already enabled */ if (mvm->queue_info[queue].tid_bitmap & BIT(cfg->tid)) { spin_unlock_bh(&mvm->queue_info_lock); - IWL_ERR(mvm, "Trying to enable TXQ with existing TID %d\n", - cfg->tid); + IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n", + queue, cfg->tid); return; } @@ -693,10 +699,6 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, .tid = cfg->tid, }; - /* Set sta_id in the command, if it exists */ - if (iwl_mvm_is_dqa_supported(mvm)) - cmd.sta_id = cfg->sta_id; - iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout); WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), @@ -706,8 +708,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, } } -void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, - u8 tid, u8 flags) +int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, + u8 tid, u8 flags) { struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, @@ -720,7 +722,7 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, if (WARN_ON(mvm->queue_info[queue].hw_queue_refcount == 0)) { spin_unlock_bh(&mvm->queue_info_lock); - return; + return 0; } mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); @@ -760,7 +762,7 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, /* If the queue is still enabled - nothing left to do in this func */ if (cmd.action == SCD_CFG_ENABLE_QUEUE) { spin_unlock_bh(&mvm->queue_info_lock); - return; + return 0; } cmd.sta_id = mvm->queue_info[queue].ra_sta_id; @@ -791,6 +793,8 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, if (ret) IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n", queue, ret); + + return ret; } /** |