diff options
198 files changed, 3786 insertions, 2199 deletions
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index a785554916c0..304736870dca 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -604,15 +604,6 @@ emit_cond_jmp: const struct bpf_insn insn1 = insn[1]; u64 imm64; - if (insn1.code != 0 || insn1.src_reg != 0 || - insn1.dst_reg != 0 || insn1.off != 0) { - /* Note: verifier in BPF core must catch invalid - * instructions. - */ - pr_err_once("Invalid BPF_LD_IMM64 instruction\n"); - return -EINVAL; - } - imm64 = (u64)insn1.imm << 32 | (u32)imm; emit_a64_mov_i64(dst, imm64, ctx); diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 32322ce9b405..14f840df1d95 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -490,13 +490,6 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, break; case BPF_LD | BPF_IMM | BPF_DW: - if (insn[1].code != 0 || insn[1].src_reg != 0 || - insn[1].dst_reg != 0 || insn[1].off != 0) { - /* verifier must catch invalid insns */ - pr_err("invalid BPF_LD_IMM64 insn\n"); - return -EINVAL; - } - /* optimization: if imm64 is zero, use 'xor <dst>,<dst>' * to save 7 bytes. */ diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 5cbd1e7a926a..91f7492623d3 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -432,15 +432,17 @@ static int macb_mii_probe(struct net_device *dev) } pdata = dev_get_platdata(&bp->pdev->dev); - if (pdata && gpio_is_valid(pdata->phy_irq_pin)) { - ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, - "phy int"); - if (!ret) { - phy_irq = gpio_to_irq(pdata->phy_irq_pin); - phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq; + if (pdata) { + if (gpio_is_valid(pdata->phy_irq_pin)) { + ret = devm_gpio_request(&bp->pdev->dev, + pdata->phy_irq_pin, "phy int"); + if (!ret) { + phy_irq = gpio_to_irq(pdata->phy_irq_pin); + phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq; + } + } else { + phydev->irq = PHY_POLL; } - } else { - phydev->irq = PHY_POLL; } /* attach the mac to the phy */ diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index a29b12e80855..c7c994eb410e 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -135,7 +135,8 @@ enum e1000_boards { board_pchlan, board_pch2lan, board_pch_lpt, - board_pch_spt + board_pch_spt, + board_pch_cnp }; struct e1000_ps_page { @@ -378,18 +379,22 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca); * INCVALUE_n into the TIMINCA register allowing 32+8+(24-INCVALUE_SHIFT_n) * bits to count nanoseconds leaving the rest for fractional nonseconds. */ -#define INCVALUE_96MHz 125 -#define INCVALUE_SHIFT_96MHz 17 -#define INCPERIOD_SHIFT_96MHz 2 -#define INCPERIOD_96MHz (12 >> INCPERIOD_SHIFT_96MHz) +#define INCVALUE_96MHZ 125 +#define INCVALUE_SHIFT_96MHZ 17 +#define INCPERIOD_SHIFT_96MHZ 2 +#define INCPERIOD_96MHZ (12 >> INCPERIOD_SHIFT_96MHZ) -#define INCVALUE_25MHz 40 -#define INCVALUE_SHIFT_25MHz 18 -#define INCPERIOD_25MHz 1 +#define INCVALUE_25MHZ 40 +#define INCVALUE_SHIFT_25MHZ 18 +#define INCPERIOD_25MHZ 1 -#define INCVALUE_24MHz 125 -#define INCVALUE_SHIFT_24MHz 14 -#define INCPERIOD_24MHz 3 +#define INCVALUE_24MHZ 125 +#define INCVALUE_SHIFT_24MHZ 14 +#define INCPERIOD_24MHZ 3 + +#define INCVALUE_38400KHZ 26 +#define INCVALUE_SHIFT_38400KHZ 19 +#define INCPERIOD_38400KHZ 1 /* Another drawback of scaling the incvalue by a large factor is the * 64-bit SYSTIM register overflows more quickly. This is dealt with @@ -515,6 +520,7 @@ extern const struct e1000_info e1000_pch_info; extern const struct e1000_info e1000_pch2_info; extern const struct e1000_info e1000_pch_lpt_info; extern const struct e1000_info e1000_pch_spt_info; +extern const struct e1000_info e1000_pch_cnp_info; extern const struct e1000_info e1000_es2_info; void e1000e_ptp_init(struct e1000_adapter *adapter); diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index e70b1ebff60d..e23dbd9190d6 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -911,19 +911,20 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) case e1000_pch2lan: case e1000_pch_lpt: case e1000_pch_spt: + /* fall through */ + case e1000_pch_cnp: mask |= BIT(18); break; default: break; } - if ((mac->type == e1000_pch_lpt) || (mac->type == e1000_pch_spt)) + if (mac->type >= e1000_pch_lpt) wlock_mac = (er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK) >> E1000_FWSM_WLOCK_MAC_SHIFT; for (i = 0; i < mac->rar_entry_count; i++) { - if ((mac->type == e1000_pch_lpt) || - (mac->type == e1000_pch_spt)) { + if (mac->type >= e1000_pch_lpt) { /* Cannot test write-protected SHRAL[n] registers */ if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac))) continue; @@ -1532,7 +1533,7 @@ static int e1000_setup_loopback_test(struct e1000_adapter *adapter) struct e1000_hw *hw = &adapter->hw; u32 rctl, fext_nvm11, tarc0; - if (hw->mac.type == e1000_pch_spt) { + if (hw->mac.type >= e1000_pch_spt) { fext_nvm11 = er32(FEXTNVM11); fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX; ew32(FEXTNVM11, fext_nvm11); @@ -1576,6 +1577,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter) switch (hw->mac.type) { case e1000_pch_spt: + case e1000_pch_cnp: fext_nvm11 = er32(FEXTNVM11); fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX; ew32(FEXTNVM11, fext_nvm11); diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index 4e733bf1a38e..66bd5060a65b 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -96,6 +96,10 @@ struct e1000_hw; #define E1000_DEV_ID_PCH_SPT_I219_V4 0x15D8 #define E1000_DEV_ID_PCH_SPT_I219_LM5 0x15E3 #define E1000_DEV_ID_PCH_SPT_I219_V5 0x15D6 +#define E1000_DEV_ID_PCH_CNP_I219_LM6 0x15BD +#define E1000_DEV_ID_PCH_CNP_I219_V6 0x15BE +#define E1000_DEV_ID_PCH_CNP_I219_LM7 0x15BB +#define E1000_DEV_ID_PCH_CNP_I219_V7 0x15BC #define E1000_REVISION_4 4 @@ -118,6 +122,7 @@ enum e1000_mac_type { e1000_pch2lan, e1000_pch_lpt, e1000_pch_spt, + e1000_pch_cnp, }; enum e1000_media_type { diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index f3aaca743ea3..68ea8b4555ab 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -237,7 +237,7 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) if (ret_val) return false; out: - if ((hw->mac.type == e1000_pch_lpt) || (hw->mac.type == e1000_pch_spt)) { + if (hw->mac.type >= e1000_pch_lpt) { /* Only unforce SMBus if ME is not active */ if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { /* Unforce SMBus mode in PHY */ @@ -333,6 +333,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) switch (hw->mac.type) { case e1000_pch_lpt: case e1000_pch_spt: + case e1000_pch_cnp: if (e1000_phy_is_accessible_pchlan(hw)) break; @@ -474,6 +475,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) case e1000_pch2lan: case e1000_pch_lpt: case e1000_pch_spt: + case e1000_pch_cnp: /* In case the PHY needs to be in mdio slow mode, * set slow mode and try to get the PHY id again. */ @@ -607,7 +609,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) nvm->type = e1000_nvm_flash_sw; - if (hw->mac.type == e1000_pch_spt) { + if (hw->mac.type >= e1000_pch_spt) { /* in SPT, gfpreg doesn't exist. NVM size is taken from the * STRAP register. This is because in SPT the GbE Flash region * is no longer accessed through the flash registers. Instead, @@ -715,6 +717,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) /* fall-through */ case e1000_pch_lpt: case e1000_pch_spt: + case e1000_pch_cnp: case e1000_pchlan: /* check management mode */ mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; @@ -732,7 +735,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) break; } - if ((mac->type == e1000_pch_lpt) || (mac->type == e1000_pch_spt)) { + if (mac->type >= e1000_pch_lpt) { mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES; mac->ops.rar_set = e1000_rar_set_pch_lpt; mac->ops.setup_physical_interface = @@ -1399,9 +1402,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) * aggressive resulting in many collisions. To avoid this, increase * the IPG and reduce Rx latency in the PHY. */ - if (((hw->mac.type == e1000_pch2lan) || - (hw->mac.type == e1000_pch_lpt) || - (hw->mac.type == e1000_pch_spt)) && link) { + if ((hw->mac.type >= e1000_pch2lan) && link) { u16 speed, duplex; e1000e_get_speed_and_duplex_copper(hw, &speed, &duplex); @@ -1412,7 +1413,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) tipg_reg |= 0xFF; /* Reduce Rx latency in analog PHY */ emi_val = 0; - } else if (hw->mac.type == e1000_pch_spt && + } else if (hw->mac.type >= e1000_pch_spt && duplex == FULL_DUPLEX && speed != SPEED_1000) { tipg_reg |= 0xC; emi_val = 1; @@ -1435,8 +1436,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) emi_addr = I217_RX_CONFIG; ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val); - if (hw->mac.type == e1000_pch_lpt || - hw->mac.type == e1000_pch_spt) { + if (hw->mac.type >= e1000_pch_lpt) { u16 phy_reg; e1e_rphy_locked(hw, I217_PLL_CLOCK_GATE_REG, &phy_reg); @@ -1452,7 +1452,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) if (ret_val) return ret_val; - if (hw->mac.type == e1000_pch_spt) { + if (hw->mac.type >= e1000_pch_spt) { u16 data; u16 ptr_gap; @@ -1502,7 +1502,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) * on power up. * Set the Beacon Duration for I217 to 8 usec */ - if ((hw->mac.type == e1000_pch_lpt) || (hw->mac.type == e1000_pch_spt)) { + if (hw->mac.type >= e1000_pch_lpt) { u32 mac_reg; mac_reg = er32(FEXTNVM4); @@ -1520,8 +1520,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) if (ret_val) return ret_val; } - if ((hw->mac.type == e1000_pch_lpt) || - (hw->mac.type == e1000_pch_spt)) { + if (hw->mac.type >= e1000_pch_lpt) { /* Set platform power management values for * Latency Tolerance Reporting (LTR) */ @@ -1533,15 +1532,18 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) /* Clear link partner's EEE ability */ hw->dev_spec.ich8lan.eee_lp_ability = 0; - /* FEXTNVM6 K1-off workaround */ - if (hw->mac.type == e1000_pch_spt) { - u32 pcieanacfg = er32(PCIEANACFG); + if (hw->mac.type >= e1000_pch_lpt) { u32 fextnvm6 = er32(FEXTNVM6); - if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE) - fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE; - else - fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE; + if (hw->mac.type == e1000_pch_spt) { + /* FEXTNVM6 K1-off workaround - for SPT only */ + u32 pcieanacfg = er32(PCIEANACFG); + + if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE) + fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE; + else + fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE; + } ew32(FEXTNVM6, fextnvm6); } @@ -1640,6 +1642,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) case e1000_pch2lan: case e1000_pch_lpt: case e1000_pch_spt: + case e1000_pch_cnp: rc = e1000_init_phy_params_pchlan(hw); break; default: @@ -2091,6 +2094,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) case e1000_pch2lan: case e1000_pch_lpt: case e1000_pch_spt: + case e1000_pch_cnp: sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; break; default: @@ -3125,6 +3129,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) switch (hw->mac.type) { case e1000_pch_spt: + case e1000_pch_cnp: bank1_offset = nvm->flash_bank_size; act_offset = E1000_ICH_NVM_SIG_WORD; @@ -3380,7 +3385,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) /* Clear FCERR and DAEL in hw status by writing 1 */ hsfsts.hsf_status.flcerr = 1; hsfsts.hsf_status.dael = 1; - if (hw->mac.type == e1000_pch_spt) + if (hw->mac.type >= e1000_pch_spt) ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF); else ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); @@ -3399,7 +3404,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) * Begin by setting Flash Cycle Done. */ hsfsts.hsf_status.flcdone = 1; - if (hw->mac.type == e1000_pch_spt) + if (hw->mac.type >= e1000_pch_spt) ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF); else ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); @@ -3423,7 +3428,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) * now set the Flash Cycle Done. */ hsfsts.hsf_status.flcdone = 1; - if (hw->mac.type == e1000_pch_spt) + if (hw->mac.type >= e1000_pch_spt) ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF); else @@ -3450,13 +3455,13 @@ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) u32 i = 0; /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ - if (hw->mac.type == e1000_pch_spt) + if (hw->mac.type >= e1000_pch_spt) hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16; else hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); hsflctl.hsf_ctrl.flcgo = 1; - if (hw->mac.type == e1000_pch_spt) + if (hw->mac.type >= e1000_pch_spt) ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16); else ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); @@ -3527,7 +3532,7 @@ static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, /* In SPT, only 32 bits access is supported, * so this function should not be called. */ - if (hw->mac.type == e1000_pch_spt) + if (hw->mac.type >= e1000_pch_spt) return -E1000_ERR_NVM; else ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word); @@ -3634,8 +3639,7 @@ static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, s32 ret_val = -E1000_ERR_NVM; u8 count = 0; - if (offset > ICH_FLASH_LINEAR_ADDR_MASK || - hw->mac.type != e1000_pch_spt) + if (offset > ICH_FLASH_LINEAR_ADDR_MASK || hw->mac.type < e1000_pch_spt) return -E1000_ERR_NVM; flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + hw->nvm.flash_base_addr); @@ -4068,6 +4072,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) switch (hw->mac.type) { case e1000_pch_lpt: case e1000_pch_spt: + case e1000_pch_cnp: word = NVM_COMPAT; valid_csum_mask = NVM_COMPAT_VALID_CSUM; break; @@ -4153,7 +4158,7 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, s32 ret_val; u8 count = 0; - if (hw->mac.type == e1000_pch_spt) { + if (hw->mac.type >= e1000_pch_spt) { if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK) return -E1000_ERR_NVM; } else { @@ -4173,7 +4178,7 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, /* In SPT, This register is in Lan memory space, not * flash. Therefore, only 32 bit access is supported */ - if (hw->mac.type == e1000_pch_spt) + if (hw->mac.type >= e1000_pch_spt) hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16; else hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); @@ -4185,7 +4190,7 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, * not flash. Therefore, only 32 bit access is * supported */ - if (hw->mac.type == e1000_pch_spt) + if (hw->mac.type >= e1000_pch_spt) ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16); else ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); @@ -4243,7 +4248,7 @@ static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, s32 ret_val; u8 count = 0; - if (hw->mac.type == e1000_pch_spt) { + if (hw->mac.type >= e1000_pch_spt) { if (offset > ICH_FLASH_LINEAR_ADDR_MASK) return -E1000_ERR_NVM; } @@ -4259,7 +4264,7 @@ static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, /* In SPT, This register is in Lan memory space, not * flash. Therefore, only 32 bit access is supported */ - if (hw->mac.type == e1000_pch_spt) + if (hw->mac.type >= e1000_pch_spt) hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16; else @@ -4272,7 +4277,7 @@ static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, * not flash. Therefore, only 32 bit access is * supported */ - if (hw->mac.type == e1000_pch_spt) + if (hw->mac.type >= e1000_pch_spt) ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16); else ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); @@ -4464,14 +4469,14 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) /* Write a value 11 (block Erase) in Flash * Cycle field in hw flash control */ - if (hw->mac.type == e1000_pch_spt) + if (hw->mac.type >= e1000_pch_spt) hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16; else hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; - if (hw->mac.type == e1000_pch_spt) + if (hw->mac.type >= e1000_pch_spt) ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16); else @@ -4894,8 +4899,7 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) ew32(RFCTL, reg); /* Enable ECC on Lynxpoint */ - if ((hw->mac.type == e1000_pch_lpt) || - (hw->mac.type == e1000_pch_spt)) { + if (hw->mac.type >= e1000_pch_lpt) { reg = er32(PBECCSTS); reg |= E1000_PBECCSTS_ECC_ENABLE; ew32(PBECCSTS, reg); @@ -5299,7 +5303,7 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) || (device_id == E1000_DEV_ID_PCH_I218_LM3) || (device_id == E1000_DEV_ID_PCH_I218_V3) || - (hw->mac.type == e1000_pch_spt)) { + (hw->mac.type >= e1000_pch_spt)) { u32 fextnvm6 = er32(FEXTNVM6); ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); @@ -5865,7 +5869,8 @@ const struct e1000_info e1000_pch2_info = { | FLAG_HAS_JUMBO_FRAMES | FLAG_APME_IN_WUC, .flags2 = FLAG2_HAS_PHY_STATS - | FLAG2_HAS_EEE, + | FLAG2_HAS_EEE + | FLAG2_CHECK_SYSTIM_OVERFLOW, .pba = 26, .max_hw_frame_size = 9022, .get_variants = e1000_get_variants_ich8lan, @@ -5914,3 +5919,23 @@ const struct e1000_info e1000_pch_spt_info = { .phy_ops = &ich8_phy_ops, .nvm_ops = &spt_nvm_ops, }; + +const struct e1000_info e1000_pch_cnp_info = { + .mac = e1000_pch_cnp, + .flags = FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_HAS_HW_TIMESTAMP + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_HAS_JUMBO_FRAMES + | FLAG_APME_IN_WUC, + .flags2 = FLAG2_HAS_PHY_STATS + | FLAG2_HAS_EEE, + .pba = 26, + .max_hw_frame_size = 9022, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &spt_nvm_ops, +}; diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 667fc45ce906..b3679728caac 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -71,6 +71,7 @@ static const struct e1000_info *e1000_info_tbl[] = { [board_pch2lan] = &e1000_pch2_info, [board_pch_lpt] = &e1000_pch_lpt_info, [board_pch_spt] = &e1000_pch_spt_info, + [board_pch_cnp] = &e1000_pch_cnp_info, }; struct e1000_reg_info { @@ -1791,8 +1792,7 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data) } /* Reset on uncorrectable ECC error */ - if ((icr & E1000_ICR_ECCER) && ((hw->mac.type == e1000_pch_lpt) || - (hw->mac.type == e1000_pch_spt))) { + if ((icr & E1000_ICR_ECCER) && (hw->mac.type >= e1000_pch_lpt)) { u32 pbeccsts = er32(PBECCSTS); adapter->corr_errors += @@ -1872,8 +1872,7 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data) } /* Reset on uncorrectable ECC error */ - if ((icr & E1000_ICR_ECCER) && ((hw->mac.type == e1000_pch_lpt) || - (hw->mac.type == e1000_pch_spt))) { + if ((icr & E1000_ICR_ECCER) && (hw->mac.type >= e1000_pch_lpt)) { u32 pbeccsts = er32(PBECCSTS); adapter->corr_errors += @@ -2241,8 +2240,7 @@ static void e1000_irq_enable(struct e1000_adapter *adapter) if (adapter->msix_entries) { ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); ew32(IMS, adapter->eiac_mask | E1000_IMS_LSC); - } else if ((hw->mac.type == e1000_pch_lpt) || - (hw->mac.type == e1000_pch_spt)) { + } else if (hw->mac.type >= e1000_pch_lpt) { ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER); } else { ew32(IMS, IMS_ENABLE_MASK); @@ -3000,8 +2998,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) hw->mac.ops.config_collision_dist(hw); - /* SPT Si errata workaround to avoid data corruption */ - if (hw->mac.type == e1000_pch_spt) { + /* SPT and CNP Si errata workaround to avoid data corruption */ + if (hw->mac.type >= e1000_pch_spt) { u32 reg_val; reg_val = er32(IOSFPC); @@ -3497,8 +3495,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) /* Make sure clock is enabled on I217/I218/I219 before checking * the frequency */ - if (((hw->mac.type == e1000_pch_lpt) || - (hw->mac.type == e1000_pch_spt)) && + if ((hw->mac.type >= e1000_pch_lpt) && !(er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) && !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) { u32 fextnvm7 = er32(FEXTNVM7); @@ -3512,42 +3509,57 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) switch (hw->mac.type) { case e1000_pch2lan: /* Stable 96MHz frequency */ - incperiod = INCPERIOD_96MHz; - incvalue = INCVALUE_96MHz; - shift = INCVALUE_SHIFT_96MHz; - adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHz; + incperiod = INCPERIOD_96MHZ; + incvalue = INCVALUE_96MHZ; + shift = INCVALUE_SHIFT_96MHZ; + adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHZ; break; case e1000_pch_lpt: if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { /* Stable 96MHz frequency */ - incperiod = INCPERIOD_96MHz; - incvalue = INCVALUE_96MHz; - shift = INCVALUE_SHIFT_96MHz; - adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHz; + incperiod = INCPERIOD_96MHZ; + incvalue = INCVALUE_96MHZ; + shift = INCVALUE_SHIFT_96MHZ; + adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHZ; } else { /* Stable 25MHz frequency */ - incperiod = INCPERIOD_25MHz; - incvalue = INCVALUE_25MHz; - shift = INCVALUE_SHIFT_25MHz; + incperiod = INCPERIOD_25MHZ; + incvalue = INCVALUE_25MHZ; + shift = INCVALUE_SHIFT_25MHZ; adapter->cc.shift = shift; } break; case e1000_pch_spt: if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { /* Stable 24MHz frequency */ - incperiod = INCPERIOD_24MHz; - incvalue = INCVALUE_24MHz; - shift = INCVALUE_SHIFT_24MHz; + incperiod = INCPERIOD_24MHZ; + incvalue = INCVALUE_24MHZ; + shift = INCVALUE_SHIFT_24MHZ; adapter->cc.shift = shift; break; } return -EINVAL; + case e1000_pch_cnp: + if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { + /* Stable 24MHz frequency */ + incperiod = INCPERIOD_24MHZ; + incvalue = INCVALUE_24MHZ; + shift = INCVALUE_SHIFT_24MHZ; + adapter->cc.shift = shift; + } else { + /* Stable 38400KHz frequency */ + incperiod = INCPERIOD_38400KHZ; + incvalue = INCVALUE_38400KHZ; + shift = INCVALUE_SHIFT_38400KHZ; + adapter->cc.shift = shift; + } + break; case e1000_82574: case e1000_82583: /* Stable 25MHz frequency */ - incperiod = INCPERIOD_25MHz; - incvalue = INCVALUE_25MHz; - shift = INCVALUE_SHIFT_25MHz; + incperiod = INCPERIOD_25MHZ; + incvalue = INCVALUE_25MHZ; + shift = INCVALUE_SHIFT_25MHZ; adapter->cc.shift = shift; break; default: @@ -4038,6 +4050,7 @@ void e1000e_reset(struct e1000_adapter *adapter) case e1000_pch2lan: case e1000_pch_lpt: case e1000_pch_spt: + case e1000_pch_cnp: fc->refresh_time = 0x0400; if (adapter->netdev->mtu <= ETH_DATA_LEN) { @@ -4082,7 +4095,7 @@ void e1000e_reset(struct e1000_adapter *adapter) } } - if (hw->mac.type == e1000_pch_spt) + if (hw->mac.type >= e1000_pch_spt) e1000_flush_desc_rings(adapter); /* Allow time for pending master requests to run */ mac->ops.reset_hw(hw); @@ -4157,7 +4170,7 @@ void e1000e_reset(struct e1000_adapter *adapter) phy_data &= ~IGP02E1000_PM_SPD; e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); } - if (hw->mac.type == e1000_pch_spt && adapter->int_mode == 0) { + if (hw->mac.type >= e1000_pch_spt && adapter->int_mode == 0) { u32 reg; /* Fextnvm7 @ 0xe4[2] = 1 */ @@ -4291,7 +4304,7 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset) if (!pci_channel_offline(adapter->pdev)) { if (reset) e1000e_reset(adapter); - else if (hw->mac.type == e1000_pch_spt) + else if (hw->mac.type >= e1000_pch_spt) e1000_flush_desc_rings(adapter); } e1000_clean_tx_ring(adapter->tx_ring); @@ -4979,8 +4992,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter) adapter->stats.mgpdc += er32(MGTPDC); /* Correctable ECC Errors */ - if ((hw->mac.type == e1000_pch_lpt) || - (hw->mac.type == e1000_pch_spt)) { + if (hw->mac.type >= e1000_pch_lpt) { u32 pbeccsts = er32(PBECCSTS); adapter->corr_errors += @@ -6354,8 +6366,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) if (adapter->hw.phy.type == e1000_phy_igp_3) { e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); - } else if ((hw->mac.type == e1000_pch_lpt) || - (hw->mac.type == e1000_pch_spt)) { + } else if (hw->mac.type >= e1000_pch_lpt) { if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) /* ULP does not support wake from unicast, multicast * or broadcast. @@ -7514,6 +7525,10 @@ static const struct pci_device_id e1000_pci_tbl[] = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V4), board_pch_spt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM5), board_pch_spt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V5), board_pch_spt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_LM6), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_V6), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_LM7), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_V7), board_pch_cnp }, { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ }; diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index 34cc3be0df8e..b366885487a8 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -301,8 +301,8 @@ void e1000e_ptp_init(struct e1000_adapter *adapter) case e1000_pch2lan: case e1000_pch_lpt: case e1000_pch_spt: - if (((hw->mac.type != e1000_pch_lpt) && - (hw->mac.type != e1000_pch_spt)) || + case e1000_pch_cnp: + if ((hw->mac.type < e1000_pch_lpt) || (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { adapter->ptp_clock_info.max_adj = 24000000 - 1; break; diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 70f9458f7a01..cdde3cc28fb5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -125,7 +125,6 @@ enum i40e_state_t { __I40E_CONFIG_BUSY, __I40E_CONFIG_DONE, __I40E_DOWN, - __I40E_NEEDS_RESTART, __I40E_SERVICE_SCHED, __I40E_ADMINQ_EVENT_PENDING, __I40E_MDD_EVENT_PENDING, @@ -138,7 +137,6 @@ enum i40e_state_t { __I40E_GLOBAL_RESET_REQUESTED, __I40E_EMP_RESET_REQUESTED, __I40E_EMP_RESET_INTR_RECEIVED, - __I40E_FILTER_OVERFLOW_PROMISC, __I40E_SUSPENDED, __I40E_PTP_TX_IN_PROGRESS, __I40E_BAD_EEPROM, @@ -147,6 +145,20 @@ enum i40e_state_t { __I40E_RESET_FAILED, __I40E_PORT_SUSPENDED, __I40E_VF_DISABLE, + /* This must be last as it determines the size of the BITMAP */ + __I40E_STATE_SIZE__, +}; + +/* VSI state flags */ +enum i40e_vsi_state_t { + __I40E_VSI_DOWN, + __I40E_VSI_NEEDS_RESTART, + __I40E_VSI_SYNCING_FILTERS, + __I40E_VSI_OVERFLOW_PROMISC, + __I40E_VSI_REINIT_REQUESTED, + __I40E_VSI_DOWN_REQUESTED, + /* This must be last as it determines the size of the BITMAP */ + __I40E_VSI_STATE_SIZE__, }; enum i40e_interrupt_policy { @@ -245,7 +257,7 @@ struct i40e_tc_configuration { struct i40e_udp_port_config { /* AdminQ command interface expects port number in Host byte order */ - u16 index; + u16 port; u8 type; }; @@ -322,7 +334,7 @@ struct i40e_flex_pit { struct i40e_pf { struct pci_dev *pdev; struct i40e_hw hw; - unsigned long state; + DECLARE_BITMAP(state, __I40E_STATE_SIZE__); struct msix_entry *msix_entries; bool fc_autoneg_status; @@ -396,6 +408,8 @@ struct i40e_pf { #define I40E_FLAG_DCB_ENABLED BIT_ULL(20) #define I40E_FLAG_FD_SB_ENABLED BIT_ULL(21) #define I40E_FLAG_FD_ATR_ENABLED BIT_ULL(22) +#define I40E_FLAG_FD_SB_AUTO_DISABLED BIT_ULL(23) +#define I40E_FLAG_FD_ATR_AUTO_DISABLED BIT_ULL(24) #define I40E_FLAG_PTP BIT_ULL(25) #define I40E_FLAG_MFP_ENABLED BIT_ULL(26) #define I40E_FLAG_UDP_FILTER_SYNC BIT_ULL(27) @@ -428,13 +442,6 @@ struct i40e_pf { #define I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE BIT_ULL(57) #define I40E_FLAG_LEGACY_RX BIT_ULL(58) - /* Tracks features that are disabled due to hw limitations. - * If a bit is set here, it means that the corresponding - * bit in the 'flags' field is cleared i.e that feature - * is disabled - */ - u64 hw_disabled_flags; - struct i40e_client_instance *cinst; bool stat_offsets_loaded; struct i40e_hw_port_stats stats; @@ -593,7 +600,7 @@ struct i40e_vsi { bool stat_offsets_loaded; u32 current_netdev_flags; - unsigned long state; + DECLARE_BITMAP(state, __I40E_VSI_STATE_SIZE__); #define I40E_VSI_FLAG_FILTER_CHANGED BIT(0) #define I40E_VSI_FLAG_VEB_OWNER BIT(1) unsigned long flags; diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c index eb2896fd52a6..c3b81a97558e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c @@ -371,8 +371,8 @@ void i40e_client_subtask(struct i40e_pf *pf) cdev = pf->cinst; /* If we're down or resetting, just bail */ - if (test_bit(__I40E_DOWN, &pf->state) || - test_bit(__I40E_CONFIG_BUSY, &pf->state)) + if (test_bit(__I40E_DOWN, pf->state) || + test_bit(__I40E_CONFIG_BUSY, pf->state)) return; if (!client || !cdev) @@ -382,7 +382,7 @@ void i40e_client_subtask(struct i40e_pf *pf) * the netdev is up, then open the client. */ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) { - if (!test_bit(__I40E_DOWN, &vsi->state) && + if (!test_bit(__I40E_VSI_DOWN, vsi->state) && client->ops && client->ops->open) { set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state); ret = client->ops->open(&cdev->lan_info, client); @@ -397,7 +397,7 @@ void i40e_client_subtask(struct i40e_pf *pf) /* Likewise for client close. If the client is up, but the netdev * is down, then close the client. */ - if (test_bit(__I40E_DOWN, &vsi->state) && + if (test_bit(__I40E_VSI_DOWN, vsi->state) && client->ops && client->ops->close) { clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state); client->ops->close(&cdev->lan_info, client, false); @@ -503,7 +503,7 @@ static void i40e_client_release(struct i40e_client *client) continue; while (test_and_set_bit(__I40E_SERVICE_SCHED, - &pf->state)) + pf->state)) usleep_range(500, 1000); if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) { @@ -521,7 +521,7 @@ static void i40e_client_release(struct i40e_client *client) i40e_client_del_instance(pf); dev_info(&pf->pdev->dev, "Deleted client instance of Client %s\n", client->name); - clear_bit(__I40E_SERVICE_SCHED, &pf->state); + clear_bit(__I40E_SERVICE_SCHED, pf->state); } mutex_unlock(&i40e_device_mutex); } @@ -661,10 +661,10 @@ static void i40e_client_request_reset(struct i40e_info *ldev, switch (reset_level) { case I40E_CLIENT_RESET_LEVEL_PF: - set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + set_bit(__I40E_PF_RESET_REQUESTED, pf->state); break; case I40E_CLIENT_RESET_LEVEL_CORE: - set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + set_bit(__I40E_PF_RESET_REQUESTED, pf->state); break; default: dev_warn(&pf->pdev->dev, diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index a3d7ec62b76c..8f326f87a815 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -158,9 +158,12 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) dev_info(&pf->pdev->dev, " vlgrp: & = %p\n", vsi->active_vlans); dev_info(&pf->pdev->dev, - " state = %li flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n", - vsi->state, vsi->flags, - vsi->netdev_registered, vsi->current_netdev_flags); + " flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n", + vsi->flags, vsi->netdev_registered, vsi->current_netdev_flags); + for (i = 0; i < BITS_TO_LONGS(__I40E_VSI_STATE_SIZE__); i++) + dev_info(&pf->pdev->dev, + " state[%d] = %08lx\n", + i, vsi->state[i]); if (vsi == pf->vsi[pf->lan_vsi]) dev_info(&pf->pdev->dev, " MAC address: %pM SAN MAC: %pM Port MAC: %pM\n", pf->hw.mac.addr, @@ -174,7 +177,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) } dev_info(&pf->pdev->dev, " active_filters %u, promisc_threshold %u, overflow promisc %s\n", vsi->active_filters, vsi->promisc_threshold, - (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state) ? + (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) ? "ON" : "OFF")); nstat = i40e_get_vsi_stats_struct(vsi); dev_info(&pf->pdev->dev, @@ -1706,7 +1709,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp, } else if (!vsi->netdev) { dev_info(&pf->pdev->dev, "tx_timeout: no netdev for VSI %d\n", vsi_seid); - } else if (test_bit(__I40E_DOWN, &vsi->state)) { + } else if (test_bit(__I40E_VSI_DOWN, vsi->state)) { dev_info(&pf->pdev->dev, "tx_timeout: VSI %d not UP\n", vsi_seid); } else if (rtnl_trylock()) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 08035c4389cd..7a8eb486b9ea 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -757,7 +757,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev, if (memcmp(©_cmd, &safe_cmd, sizeof(struct ethtool_link_ksettings))) return -EOPNOTSUPP; - while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) { + while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) { timeout--; if (!timeout) return -EBUSY; @@ -891,7 +891,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev, } done: - clear_bit(__I40E_CONFIG_BUSY, &pf->state); + clear_bit(__I40E_CONFIG_BUSY, pf->state); return err; } @@ -987,7 +987,7 @@ static int i40e_set_pauseparam(struct net_device *netdev, } /* If we have link and don't have autoneg */ - if (!test_bit(__I40E_DOWN, &pf->state) && + if (!test_bit(__I40E_DOWN, pf->state) && !(hw_link_info->an_info & I40E_AQ_AN_COMPLETED)) { /* Send message that it might not necessarily work*/ netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n"); @@ -1039,10 +1039,10 @@ static int i40e_set_pauseparam(struct net_device *netdev, err = -EAGAIN; } - if (!test_bit(__I40E_DOWN, &pf->state)) { + if (!test_bit(__I40E_DOWN, pf->state)) { /* Give it a little more time to try to come back */ msleep(75); - if (!test_bit(__I40E_DOWN, &pf->state)) + if (!test_bit(__I40E_DOWN, pf->state)) return i40e_nway_reset(netdev); } @@ -1139,8 +1139,8 @@ static int i40e_get_eeprom(struct net_device *netdev, /* make sure it is the right magic for NVMUpdate */ if ((eeprom->magic >> 16) != hw->device_id) errno = -EINVAL; - else if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || - test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) + else if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || + test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) errno = -EBUSY; else ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); @@ -1246,8 +1246,8 @@ static int i40e_set_eeprom(struct net_device *netdev, /* check for NVMUpdate access method */ else if (!eeprom->magic || (eeprom->magic >> 16) != hw->device_id) errno = -EINVAL; - else if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || - test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) + else if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || + test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) errno = -EBUSY; else ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); @@ -1332,7 +1332,7 @@ static int i40e_set_ringparam(struct net_device *netdev, (new_rx_count == vsi->rx_rings[0]->count)) return 0; - while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) { + while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) { timeout--; if (!timeout) return -EBUSY; @@ -1485,7 +1485,7 @@ free_tx: } done: - clear_bit(__I40E_CONFIG_BUSY, &pf->state); + clear_bit(__I40E_CONFIG_BUSY, pf->state); return err; } @@ -1826,7 +1826,7 @@ static inline bool i40e_active_vfs(struct i40e_pf *pf) int i; for (i = 0; i < pf->num_alloc_vfs; i++) - if (test_bit(I40E_VF_STAT_ACTIVE, &vfs[i].vf_states)) + if (test_bit(I40E_VF_STATE_ACTIVE, &vfs[i].vf_states)) return true; return false; } @@ -1847,7 +1847,7 @@ static void i40e_diag_test(struct net_device *netdev, /* Offline tests */ netif_info(pf, drv, netdev, "offline testing starting\n"); - set_bit(__I40E_TESTING, &pf->state); + set_bit(__I40E_TESTING, pf->state); if (i40e_active_vfs(pf) || i40e_active_vmdqs(pf)) { dev_warn(&pf->pdev->dev, @@ -1857,7 +1857,7 @@ static void i40e_diag_test(struct net_device *netdev, data[I40E_ETH_TEST_INTR] = 1; data[I40E_ETH_TEST_LINK] = 1; eth_test->flags |= ETH_TEST_FL_FAILED; - clear_bit(__I40E_TESTING, &pf->state); + clear_bit(__I40E_TESTING, pf->state); goto skip_ol_tests; } @@ -1886,7 +1886,7 @@ static void i40e_diag_test(struct net_device *netdev, if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG])) eth_test->flags |= ETH_TEST_FL_FAILED; - clear_bit(__I40E_TESTING, &pf->state); + clear_bit(__I40E_TESTING, pf->state); i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true); if (if_running) @@ -2924,11 +2924,11 @@ static int i40e_del_fdir_entry(struct i40e_vsi *vsi, struct i40e_pf *pf = vsi->back; int ret = 0; - if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || - test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) + if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || + test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) return -EBUSY; - if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) + if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) return -EBUSY; ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd); @@ -3643,14 +3643,14 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) return -EOPNOTSUPP; - if (pf->hw_disabled_flags & I40E_FLAG_FD_SB_ENABLED) + if (pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) return -ENOSPC; - if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || - test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) + if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || + test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) return -EBUSY; - if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) + if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) return -EBUSY; fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; @@ -4086,12 +4086,12 @@ flags_complete: /* Flush current ATR settings if ATR was disabled */ if ((changed_flags & I40E_FLAG_FD_ATR_ENABLED) && !(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) { - pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED; - set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); + pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED; + set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); } /* Only allow ATR evict on hardware that is capable of handling it */ - if (pf->hw_disabled_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) + if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) pf->flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE; if (changed_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index c001562f19b2..d5c9c9e06ff5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -47,7 +47,7 @@ static const char i40e_driver_string[] = #define DRV_VERSION_MAJOR 2 #define DRV_VERSION_MINOR 1 -#define DRV_VERSION_BUILD 7 +#define DRV_VERSION_BUILD 14 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -295,8 +295,8 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id) **/ void i40e_service_event_schedule(struct i40e_pf *pf) { - if (!test_bit(__I40E_DOWN, &pf->state) && - !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) + if (!test_bit(__I40E_VSI_DOWN, pf->state) && + !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) queue_work(i40e_wq, &pf->service_task); } @@ -377,13 +377,13 @@ static void i40e_tx_timeout(struct net_device *netdev) switch (pf->tx_timeout_recovery_level) { case 1: - set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + set_bit(__I40E_PF_RESET_REQUESTED, pf->state); break; case 2: - set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); + set_bit(__I40E_CORE_RESET_REQUESTED, pf->state); break; case 3: - set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); + set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); break; default: netdev_err(netdev, "tx_timeout recovery unsuccessful\n"); @@ -422,7 +422,7 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev, struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); int i; - if (test_bit(__I40E_DOWN, &vsi->state)) + if (test_bit(__I40E_VSI_DOWN, vsi->state)) return; if (!vsi->tx_rings) @@ -753,8 +753,8 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) u64 tx_p, tx_b; u16 q; - if (test_bit(__I40E_DOWN, &vsi->state) || - test_bit(__I40E_CONFIG_BUSY, &pf->state)) + if (test_bit(__I40E_VSI_DOWN, vsi->state) || + test_bit(__I40E_CONFIG_BUSY, pf->state)) return; ns = i40e_get_vsi_stats_struct(vsi); @@ -1050,13 +1050,13 @@ static void i40e_update_pf_stats(struct i40e_pf *pf) &osd->rx_lpi_count, &nsd->rx_lpi_count); if (pf->flags & I40E_FLAG_FD_SB_ENABLED && - !(pf->hw_disabled_flags & I40E_FLAG_FD_SB_ENABLED)) + !(pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED)) nsd->fd_sb_status = true; else nsd->fd_sb_status = false; if (pf->flags & I40E_FLAG_FD_ATR_ENABLED && - !(pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED)) + !(pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED)) nsd->fd_atr_status = true; else nsd->fd_atr_status = false; @@ -1346,7 +1346,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, * to failed, so we don't bother to try sending the filter * to the hardware. */ - if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state)) + if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state)) f->state = I40E_FILTER_FAILED; else f->state = I40E_FILTER_NEW; @@ -1525,8 +1525,8 @@ static int i40e_set_mac(struct net_device *netdev, void *p) return 0; } - if (test_bit(__I40E_DOWN, &vsi->back->state) || - test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) + if (test_bit(__I40E_VSI_DOWN, vsi->back->state) || + test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state)) return -EADDRNOTAVAIL; if (ether_addr_equal(hw->mac.addr, addr->sa_data)) @@ -1920,7 +1920,7 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name, if (fcnt != num_add) { *promisc_changed = true; - set_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state); + set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); dev_warn(&vsi->back->pdev->dev, "Error %s adding RX filters on %s, promiscuous mode forced on\n", i40e_aq_str(hw, aq_err), @@ -2003,7 +2003,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) struct i40e_aqc_add_macvlan_element_data *add_list; struct i40e_aqc_remove_macvlan_element_data *del_list; - while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state)) + while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state)) usleep_range(1000, 2000); pf = vsi->back; @@ -2139,8 +2139,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) num_add = 0; hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) { - if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, - &vsi->state)) { + if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, + vsi->state)) { new->state = I40E_FILTER_FAILED; continue; } @@ -2227,20 +2227,20 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) * safely exit if we didn't just enter, we no longer have any failed * filters, and we have reduced filters below the threshold value. */ - if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state) && + if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) && !promisc_changed && !failed_filters && (vsi->active_filters < vsi->promisc_threshold)) { dev_info(&pf->pdev->dev, "filter logjam cleared on %s, leaving overflow promiscuous mode\n", vsi_name); - clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state); + clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); promisc_changed = true; vsi->promisc_threshold = 0; } /* if the VF is not trusted do not do promisc */ if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) { - clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state); + clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); goto out; } @@ -2265,12 +2265,12 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) } if ((changed_flags & IFF_PROMISC) || (promisc_changed && - test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state))) { + test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state))) { bool cur_promisc; cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || - test_bit(__I40E_FILTER_OVERFLOW_PROMISC, - &vsi->state)); + test_bit(__I40E_VSI_OVERFLOW_PROMISC, + vsi->state)); if ((vsi->type == I40E_VSI_MAIN) && (pf->lan_veb != I40E_NO_VEB) && !(pf->flags & I40E_FLAG_MFP_ENABLED)) { @@ -2353,7 +2353,7 @@ out: if (retval) vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; - clear_bit(__I40E_CONFIG_BUSY, &vsi->state); + clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state); return retval; err_no_memory: @@ -2365,7 +2365,7 @@ err_no_memory_locked: spin_unlock_bh(&vsi->mac_filter_hash_lock); vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; - clear_bit(__I40E_CONFIG_BUSY, &vsi->state); + clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state); return -ENOMEM; } @@ -3611,29 +3611,29 @@ static irqreturn_t i40e_intr(int irq, void *data) * this is not a performance path and napi_schedule() * can deal with rescheduling. */ - if (!test_bit(__I40E_DOWN, &pf->state)) + if (!test_bit(__I40E_VSI_DOWN, pf->state)) napi_schedule_irqoff(&q_vector->napi); } if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK; - set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); + set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state); i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n"); } if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) { ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; - set_bit(__I40E_MDD_EVENT_PENDING, &pf->state); + set_bit(__I40E_MDD_EVENT_PENDING, pf->state); } if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) { ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK; - set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); + set_bit(__I40E_VFLR_EVENT_PENDING, pf->state); } if (icr0 & I40E_PFINT_ICR0_GRST_MASK) { - if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) - set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); + if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) + set_bit(__I40E_RESET_INTR_RECEIVED, pf->state); ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK; val = rd32(hw, I40E_GLGEN_RSTAT); val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK) @@ -3644,7 +3644,7 @@ static irqreturn_t i40e_intr(int irq, void *data) pf->globr_count++; } else if (val == I40E_RESET_EMPR) { pf->empr_count++; - set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state); + set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state); } } @@ -3677,7 +3677,7 @@ static irqreturn_t i40e_intr(int irq, void *data) (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) || (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) { dev_info(&pf->pdev->dev, "device will be reset\n"); - set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + set_bit(__I40E_PF_RESET_REQUESTED, pf->state); i40e_service_event_schedule(pf); } ena_mask &= ~icr0_remaining; @@ -3687,7 +3687,7 @@ static irqreturn_t i40e_intr(int irq, void *data) enable_intr: /* re-enable interrupt causes */ wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); - if (!test_bit(__I40E_DOWN, &pf->state)) { + if (!test_bit(__I40E_VSI_DOWN, pf->state)) { i40e_service_event_schedule(pf); i40e_irq_dynamic_enable_icr0(pf, false); } @@ -3907,7 +3907,7 @@ static void i40e_netpoll(struct net_device *netdev) int i; /* if interface is down do nothing */ - if (test_bit(__I40E_DOWN, &vsi->state)) + if (test_bit(__I40E_VSI_DOWN, vsi->state)) return; if (pf->flags & I40E_FLAG_MSIX_ENABLED) { @@ -4144,7 +4144,7 @@ int i40e_vsi_start_rings(struct i40e_vsi *vsi) void i40e_vsi_stop_rings(struct i40e_vsi *vsi) { /* When port TX is suspended, don't wait */ - if (test_bit(__I40E_PORT_SUSPENDED, &vsi->back->state)) + if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state)) return i40e_vsi_stop_rings_no_wait(vsi); /* do rx first for enable and last for disable @@ -4436,14 +4436,14 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi) static void i40e_vsi_close(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; - if (!test_and_set_bit(__I40E_DOWN, &vsi->state)) + if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state)) i40e_down(vsi); i40e_vsi_free_irq(vsi); i40e_vsi_free_tx_resources(vsi); i40e_vsi_free_rx_resources(vsi); vsi->current_netdev_flags = 0; pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED; - if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) + if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) pf->flags |= I40E_FLAG_CLIENT_RESET; } @@ -4453,10 +4453,10 @@ static void i40e_vsi_close(struct i40e_vsi *vsi) **/ static void i40e_quiesce_vsi(struct i40e_vsi *vsi) { - if (test_bit(__I40E_DOWN, &vsi->state)) + if (test_bit(__I40E_VSI_DOWN, vsi->state)) return; - set_bit(__I40E_NEEDS_RESTART, &vsi->state); + set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state); if (vsi->netdev && netif_running(vsi->netdev)) vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); else @@ -4469,10 +4469,9 @@ static void i40e_quiesce_vsi(struct i40e_vsi *vsi) **/ static void i40e_unquiesce_vsi(struct i40e_vsi *vsi) { - if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state)) + if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state)) return; - clear_bit(__I40E_NEEDS_RESTART, &vsi->state); if (vsi->netdev && netif_running(vsi->netdev)) vsi->netdev->netdev_ops->ndo_open(vsi->netdev); else @@ -4638,8 +4637,8 @@ static void i40e_detect_recover_hung(struct i40e_pf *pf) return; /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */ - if (test_bit(__I40E_DOWN, &vsi->back->state) || - test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) + if (test_bit(__I40E_VSI_DOWN, vsi->back->state) || + test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state)) return; /* Make sure type is MAIN VSI */ @@ -5186,7 +5185,7 @@ static int i40e_resume_port_tx(struct i40e_pf *pf) i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* Schedule PF reset to recover */ - set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + set_bit(__I40E_PF_RESET_REQUESTED, pf->state); i40e_service_event_schedule(pf); } @@ -5354,7 +5353,7 @@ static int i40e_up_complete(struct i40e_vsi *vsi) if (err) return err; - clear_bit(__I40E_DOWN, &vsi->state); + clear_bit(__I40E_VSI_DOWN, vsi->state); i40e_napi_enable_all(vsi); i40e_vsi_enable_irq(vsi); @@ -5403,12 +5402,12 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi) struct i40e_pf *pf = vsi->back; WARN_ON(in_interrupt()); - while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) + while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) usleep_range(1000, 2000); i40e_down(vsi); i40e_up(vsi); - clear_bit(__I40E_CONFIG_BUSY, &pf->state); + clear_bit(__I40E_CONFIG_BUSY, pf->state); } /** @@ -5435,7 +5434,7 @@ void i40e_down(struct i40e_vsi *vsi) int i; /* It is assumed that the caller of this function - * sets the vsi->state __I40E_DOWN bit. + * sets the vsi->state __I40E_VSI_DOWN bit. */ if (vsi->netdev) { netif_carrier_off(vsi->netdev); @@ -5541,8 +5540,8 @@ int i40e_open(struct net_device *netdev) int err; /* disallow open during test or if eeprom is broken */ - if (test_bit(__I40E_TESTING, &pf->state) || - test_bit(__I40E_BAD_EEPROM, &pf->state)) + if (test_bit(__I40E_TESTING, pf->state) || + test_bit(__I40E_BAD_EEPROM, pf->state)) return -EBUSY; netif_carrier_off(netdev); @@ -5787,10 +5786,9 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired) struct i40e_vsi *vsi = pf->vsi[v]; if (vsi != NULL && - test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) { + test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED, + vsi->state)) i40e_vsi_reinit_locked(pf->vsi[v]); - clear_bit(__I40E_REINIT_REQUESTED, &vsi->state); - } } } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) { int v; @@ -5801,10 +5799,10 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired) struct i40e_vsi *vsi = pf->vsi[v]; if (vsi != NULL && - test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) { - set_bit(__I40E_DOWN, &vsi->state); + test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED, + vsi->state)) { + set_bit(__I40E_VSI_DOWN, vsi->state); i40e_down(vsi); - clear_bit(__I40E_DOWN_REQUESTED, &vsi->state); } } } else { @@ -5944,7 +5942,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, else pf->flags &= ~I40E_FLAG_DCB_ENABLED; - set_bit(__I40E_PORT_SUSPENDED, &pf->state); + set_bit(__I40E_PORT_SUSPENDED, pf->state); /* Reconfiguration needed quiesce all VSIs */ i40e_pf_quiesce_all_vsi(pf); @@ -5953,7 +5951,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, ret = i40e_resume_port_tx(pf); - clear_bit(__I40E_PORT_SUSPENDED, &pf->state); + clear_bit(__I40E_PORT_SUSPENDED, pf->state); /* In case of error no point in resuming VSIs */ if (ret) goto exit; @@ -5962,7 +5960,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, ret = i40e_pf_wait_queues_disabled(pf); if (ret) { /* Schedule PF reset to recover */ - set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + set_bit(__I40E_PF_RESET_REQUESTED, pf->state); i40e_service_event_schedule(pf); } else { i40e_pf_unquiesce_all_vsi(pf); @@ -6077,34 +6075,33 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf) u32 fcnt_prog, fcnt_avail; struct hlist_node *node; - if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) + if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) return; - /* Check if, FD SB or ATR was auto disabled and if there is enough room - * to re-enable - */ + /* Check if we have enough room to re-enable FDir SB capability. */ fcnt_prog = i40e_get_global_fd_count(pf); fcnt_avail = pf->fdir_pf_filter_count; if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) || (pf->fd_add_err == 0) || (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) { - if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && - (pf->hw_disabled_flags & I40E_FLAG_FD_SB_ENABLED)) { - pf->hw_disabled_flags &= ~I40E_FLAG_FD_SB_ENABLED; - if (I40E_DEBUG_FD & pf->hw.debug_mask) + if (pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) { + pf->flags &= ~I40E_FLAG_FD_SB_AUTO_DISABLED; + if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && + (I40E_DEBUG_FD & pf->hw.debug_mask)) dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); } } - /* Wait for some more space to be available to turn on ATR. We also - * must check that no existing ntuple rules for TCP are in effect + /* We should wait for even more space before re-enabling ATR. + * Additionally, we cannot enable ATR as long as we still have TCP SB + * rules active. */ - if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) { - if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && - (pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED) && - (pf->fd_tcp4_filter_cnt == 0)) { - pf->hw_disabled_flags &= ~I40E_FLAG_FD_ATR_ENABLED; - if (I40E_DEBUG_FD & pf->hw.debug_mask) + if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) && + (pf->fd_tcp4_filter_cnt == 0)) { + if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) { + pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED; + if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && + (I40E_DEBUG_FD & pf->hw.debug_mask)) dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n"); } } @@ -6155,7 +6152,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) } pf->fd_flush_timestamp = jiffies; - pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED; + pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED; /* flush all filters */ wr32(&pf->hw, I40E_PFQF_CTL_1, I40E_PFQF_CTL_1_CLEARFDTABLE_MASK); @@ -6175,8 +6172,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) /* replay sideband filters */ i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); if (!disable_atr && !pf->fd_tcp4_filter_cnt) - pf->hw_disabled_flags &= ~I40E_FLAG_FD_ATR_ENABLED; - clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); + pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED; + clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); if (I40E_DEBUG_FD & pf->hw.debug_mask) dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); } @@ -6206,10 +6203,10 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) { /* if interface is down do nothing */ - if (test_bit(__I40E_DOWN, &pf->state)) + if (test_bit(__I40E_VSI_DOWN, pf->state)) return; - if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) + if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) i40e_fdir_flush_and_replay(pf); i40e_fdir_check_and_reenable(pf); @@ -6223,7 +6220,7 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) **/ static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up) { - if (!vsi || test_bit(__I40E_DOWN, &vsi->state)) + if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state)) return; switch (vsi->type) { @@ -6316,11 +6313,11 @@ static void i40e_link_event(struct i40e_pf *pf) if (new_link == old_link && new_link_speed == old_link_speed && - (test_bit(__I40E_DOWN, &vsi->state) || + (test_bit(__I40E_VSI_DOWN, vsi->state) || new_link == netif_carrier_ok(vsi->netdev))) return; - if (!test_bit(__I40E_DOWN, &vsi->state)) + if (!test_bit(__I40E_VSI_DOWN, vsi->state)) i40e_print_link_message(vsi, new_link); /* Notify the base of the switch tree connected to @@ -6347,8 +6344,8 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf) int i; /* if interface is down do nothing */ - if (test_bit(__I40E_DOWN, &pf->state) || - test_bit(__I40E_CONFIG_BUSY, &pf->state)) + if (test_bit(__I40E_VSI_DOWN, pf->state) || + test_bit(__I40E_CONFIG_BUSY, pf->state)) return; /* make sure we don't do these things too often */ @@ -6386,31 +6383,31 @@ static void i40e_reset_subtask(struct i40e_pf *pf) { u32 reset_flags = 0; - if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) { + if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) { reset_flags |= BIT(__I40E_REINIT_REQUESTED); - clear_bit(__I40E_REINIT_REQUESTED, &pf->state); + clear_bit(__I40E_REINIT_REQUESTED, pf->state); } - if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) { + if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) { reset_flags |= BIT(__I40E_PF_RESET_REQUESTED); - clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + clear_bit(__I40E_PF_RESET_REQUESTED, pf->state); } - if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) { + if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) { reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED); - clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); + clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state); } - if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) { + if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) { reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED); - clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); + clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); } - if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) { - reset_flags |= BIT(__I40E_DOWN_REQUESTED); - clear_bit(__I40E_DOWN_REQUESTED, &pf->state); + if (test_bit(__I40E_VSI_DOWN_REQUESTED, pf->state)) { + reset_flags |= BIT(__I40E_VSI_DOWN_REQUESTED); + clear_bit(__I40E_VSI_DOWN_REQUESTED, pf->state); } /* If there's a recovery already waiting, it takes * precedence before starting a new reset sequence. */ - if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) { + if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) { i40e_prep_for_reset(pf, false); i40e_reset(pf); i40e_rebuild(pf, false, false); @@ -6418,8 +6415,8 @@ static void i40e_reset_subtask(struct i40e_pf *pf) /* If we're already down or resetting, just bail */ if (reset_flags && - !test_bit(__I40E_DOWN, &pf->state) && - !test_bit(__I40E_CONFIG_BUSY, &pf->state)) { + !test_bit(__I40E_VSI_DOWN, pf->state) && + !test_bit(__I40E_CONFIG_BUSY, pf->state)) { rtnl_lock(); i40e_do_reset(pf, reset_flags, true); rtnl_unlock(); @@ -6468,7 +6465,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) u32 val; /* Do not run clean AQ when PF reset fails */ - if (test_bit(__I40E_RESET_FAILED, &pf->state)) + if (test_bit(__I40E_RESET_FAILED, pf->state)) return; /* check for error indications */ @@ -6572,7 +6569,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) } while (i++ < pf->adminq_work_limit); if (i < pf->adminq_work_limit) - clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); + clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state); /* re-enable Admin queue interrupt cause */ val = rd32(hw, I40E_PFINT_ICR0_ENA); @@ -6598,13 +6595,13 @@ static void i40e_verify_eeprom(struct i40e_pf *pf) if (err) { dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n", err); - set_bit(__I40E_BAD_EEPROM, &pf->state); + set_bit(__I40E_BAD_EEPROM, pf->state); } } - if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) { + if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) { dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n"); - clear_bit(__I40E_BAD_EEPROM, &pf->state); + clear_bit(__I40E_BAD_EEPROM, pf->state); } } @@ -6922,8 +6919,8 @@ static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired) i40e_status ret = 0; u32 v; - clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); - if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) + clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state); + if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) return; if (i40e_check_asq_alive(&pf->hw)) i40e_vc_notify_reset(pf); @@ -6982,8 +6979,8 @@ static int i40e_reset(struct i40e_pf *pf) ret = i40e_pf_reset(hw); if (ret) { dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); - set_bit(__I40E_RESET_FAILED, &pf->state); - clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); + set_bit(__I40E_RESET_FAILED, pf->state); + clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state); } else { pf->pfr_count++; } @@ -7005,7 +7002,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) u32 val; int v; - if (test_bit(__I40E_DOWN, &pf->state)) + if (test_bit(__I40E_VSI_DOWN, pf->state)) goto clear_recovery; dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); @@ -7019,7 +7016,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) } /* re-verify the eeprom if we just had an EMP reset */ - if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state)) + if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) i40e_verify_eeprom(pf); i40e_clear_pxe_mode(hw); @@ -7182,9 +7179,9 @@ end_unlock: if (!lock_acquired) rtnl_unlock(); end_core_reset: - clear_bit(__I40E_RESET_FAILED, &pf->state); + clear_bit(__I40E_RESET_FAILED, pf->state); clear_recovery: - clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); + clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state); } /** @@ -7237,7 +7234,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) u32 reg; int i; - if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state)) + if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state)) return; /* find what triggered the MDD event */ @@ -7289,7 +7286,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) } /* Queue belongs to the PF, initiate a reset */ if (pf_mdd_detected) { - set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + set_bit(__I40E_PF_RESET_REQUESTED, pf->state); i40e_service_event_schedule(pf); } } @@ -7318,12 +7315,12 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) "Too many MDD events on VF %d, disabled\n", i); dev_info(&pf->pdev->dev, "Use PF Control I/F to re-enable the VF\n"); - set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); + set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); } } /* re-enable mdd interrupt cause */ - clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state); + clear_bit(__I40E_MDD_EVENT_PENDING, pf->state); reg = rd32(hw, I40E_PFINT_ICR0_ENA); reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, reg); @@ -7331,6 +7328,23 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) } /** + * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters + * @pf: board private structure + **/ +static void i40e_sync_udp_filters(struct i40e_pf *pf) +{ + int i; + + /* loop through and set pending bit for all active UDP filters */ + for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { + if (pf->udp_ports[i].port) + pf->pending_udp_bitmap |= BIT_ULL(i); + } + + pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; +} + +/** * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW * @pf: board private structure **/ @@ -7349,7 +7363,7 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { if (pf->pending_udp_bitmap & BIT_ULL(i)) { pf->pending_udp_bitmap &= ~BIT_ULL(i); - port = pf->udp_ports[i].index; + port = pf->udp_ports[i].port; if (port) ret = i40e_aq_add_udp_tunnel(hw, port, pf->udp_ports[i].type, @@ -7366,7 +7380,7 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); - pf->udp_ports[i].index = 0; + pf->udp_ports[i].port = 0; } } } @@ -7384,11 +7398,10 @@ static void i40e_service_task(struct work_struct *work) unsigned long start_time = jiffies; /* don't bother with service tasks if a reset is in progress */ - if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { + if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) return; - } - if (test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state)) + if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state)) return; i40e_detect_recover_hung(pf); @@ -7416,16 +7429,16 @@ static void i40e_service_task(struct work_struct *work) /* flush memory to make sure state is correct before next watchdog */ smp_mb__before_atomic(); - clear_bit(__I40E_SERVICE_SCHED, &pf->state); + clear_bit(__I40E_SERVICE_SCHED, pf->state); /* If the tasks have taken longer than one timer cycle or there * is more work to be done, reschedule the service task now * rather than wait for the timer to tick again. */ if (time_after(jiffies, (start_time + pf->service_timer_period)) || - test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) || - test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) || - test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) + test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) || + test_bit(__I40E_MDD_EVENT_PENDING, pf->state) || + test_bit(__I40E_VFLR_EVENT_PENDING, pf->state)) i40e_service_event_schedule(pf); } @@ -7574,7 +7587,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) } vsi->type = type; vsi->back = pf; - set_bit(__I40E_DOWN, &vsi->state); + set_bit(__I40E_VSI_DOWN, vsi->state); vsi->flags = 0; vsi->idx = vsi_idx; vsi->int_rate_limit = 0; @@ -8156,7 +8169,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf) /* Only request the irq if this is the first time through, and * not when we're rebuilding after a Reset */ - if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { + if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) { err = request_irq(pf->msix_entries[0].vector, i40e_intr, 0, pf->int_name, pf); if (err) { @@ -8808,9 +8821,9 @@ static int i40e_sw_init(struct i40e_pf *pf) (pf->hw.aq.api_min_ver > 4))) { /* Supported in FW API version higher than 1.4 */ pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; - pf->hw_disabled_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; + pf->flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; } else { - pf->hw_disabled_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; + pf->flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; } pf->eeprom_version = 0xDEAD; @@ -8870,16 +8883,16 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) need_reset = true; i40e_fdir_filter_exit(pf); } - pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; - pf->hw_disabled_flags &= ~I40E_FLAG_FD_SB_ENABLED; + pf->flags &= ~(I40E_FLAG_FD_SB_ENABLED | + I40E_FLAG_FD_SB_AUTO_DISABLED); /* reset fd counters */ pf->fd_add_err = 0; pf->fd_atr_cnt = 0; /* if ATR was auto disabled it can be re-enabled. */ - if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && - (pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED)) { - pf->hw_disabled_flags &= ~I40E_FLAG_FD_ATR_ENABLED; - if (I40E_DEBUG_FD & pf->hw.debug_mask) + if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) { + pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED; + if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && + (I40E_DEBUG_FD & pf->hw.debug_mask)) dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); } } @@ -8953,7 +8966,7 @@ static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port) u8 i; for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { - if (pf->udp_ports[i].index == port) + if (pf->udp_ports[i].port == port) return i; } @@ -9006,7 +9019,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev, } /* New port: add it and mark its index in the bitmap */ - pf->udp_ports[next_idx].index = port; + pf->udp_ports[next_idx].port = port; pf->pending_udp_bitmap |= BIT_ULL(next_idx); pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; } @@ -9047,7 +9060,7 @@ static void i40e_udp_tunnel_del(struct net_device *netdev, /* if port exists, set it to 0 (mark for deletion) * and make it pending */ - pf->udp_ports[idx].index = 0; + pf->udp_ports[idx].port = 0; pf->pending_udp_bitmap |= BIT_ULL(idx); pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; @@ -9701,7 +9714,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) } vsi->active_filters = 0; - clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state); + clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); spin_lock_bh(&vsi->mac_filter_hash_lock); /* If macvlan filters already exist, force them to get loaded */ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { @@ -9754,7 +9767,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi) return -ENODEV; } if (vsi == pf->vsi[pf->lan_vsi] && - !test_bit(__I40E_DOWN, &pf->state)) { + !test_bit(__I40E_VSI_DOWN, pf->state)) { dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); return -ENODEV; } @@ -10738,6 +10751,9 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) i40e_ptp_init(pf); + /* repopulate tunnel port filters */ + i40e_sync_udp_filters(pf); + return ret; } @@ -10987,7 +11003,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } pf->next_vsi = 0; pf->pdev = pdev; - set_bit(__I40E_DOWN, &pf->state); + set_bit(__I40E_VSI_DOWN, pf->state); hw = &pf->hw; hw->back = pf; @@ -11166,7 +11182,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pf->service_timer_period = HZ; INIT_WORK(&pf->service_task, i40e_service_task); - clear_bit(__I40E_SERVICE_SCHED, &pf->state); + clear_bit(__I40E_SERVICE_SCHED, pf->state); /* NVM bit on means WoL disabled for the port */ i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); @@ -11204,7 +11220,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* prep for VF support */ if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && (pf->flags & I40E_FLAG_MSIX_ENABLED) && - !test_bit(__I40E_BAD_EEPROM, &pf->state)) { + !test_bit(__I40E_BAD_EEPROM, pf->state)) { if (pci_num_vf(pdev)) pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; } @@ -11277,7 +11293,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * before setting up the misc vector or we get a race and the vector * ends up disabled forever. */ - clear_bit(__I40E_DOWN, &pf->state); + clear_bit(__I40E_VSI_DOWN, pf->state); /* In case of MSIX we are going to setup the misc vector right here * to handle admin queue events etc. In case of legacy and MSI @@ -11297,7 +11313,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* prep for VF support */ if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && (pf->flags & I40E_FLAG_MSIX_ENABLED) && - !test_bit(__I40E_BAD_EEPROM, &pf->state)) { + !test_bit(__I40E_BAD_EEPROM, pf->state)) { /* disable link interrupts for VFs */ val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM); val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; @@ -11432,7 +11448,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Unwind what we've done if something failed in the setup */ err_vsis: - set_bit(__I40E_DOWN, &pf->state); + set_bit(__I40E_VSI_DOWN, pf->state); i40e_clear_interrupt_scheme(pf); kfree(pf->vsi); err_switch_setup: @@ -11483,8 +11499,8 @@ static void i40e_remove(struct pci_dev *pdev) i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0); /* no more scheduling of any task */ - set_bit(__I40E_SUSPENDED, &pf->state); - set_bit(__I40E_DOWN, &pf->state); + set_bit(__I40E_SUSPENDED, pf->state); + set_bit(__I40E_VSI_DOWN, pf->state); if (pf->service_timer.data) del_timer_sync(&pf->service_timer); if (pf->service_task.func) @@ -11592,7 +11608,7 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev, } /* shutdown all operations */ - if (!test_bit(__I40E_SUSPENDED, &pf->state)) { + if (!test_bit(__I40E_SUSPENDED, pf->state)) { rtnl_lock(); i40e_prep_for_reset(pf, true); rtnl_unlock(); @@ -11659,7 +11675,7 @@ static void i40e_pci_error_resume(struct pci_dev *pdev) struct i40e_pf *pf = pci_get_drvdata(pdev); dev_dbg(&pdev->dev, "%s\n", __func__); - if (test_bit(__I40E_SUSPENDED, &pf->state)) + if (test_bit(__I40E_SUSPENDED, pf->state)) return; rtnl_lock(); @@ -11723,8 +11739,8 @@ static void i40e_shutdown(struct pci_dev *pdev) struct i40e_pf *pf = pci_get_drvdata(pdev); struct i40e_hw *hw = &pf->hw; - set_bit(__I40E_SUSPENDED, &pf->state); - set_bit(__I40E_DOWN, &pf->state); + set_bit(__I40E_SUSPENDED, pf->state); + set_bit(__I40E_VSI_DOWN, pf->state); rtnl_lock(); i40e_prep_for_reset(pf, true); rtnl_unlock(); @@ -11772,8 +11788,8 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) struct i40e_hw *hw = &pf->hw; int retval = 0; - set_bit(__I40E_SUSPENDED, &pf->state); - set_bit(__I40E_DOWN, &pf->state); + set_bit(__I40E_SUSPENDED, pf->state); + set_bit(__I40E_VSI_DOWN, pf->state); if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE)) i40e_enable_mc_magic_wake(pf); @@ -11824,8 +11840,8 @@ static int i40e_resume(struct pci_dev *pdev) pci_wake_from_d3(pdev, false); /* handling the reset will rebuild the device state */ - if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) { - clear_bit(__I40E_DOWN, &pf->state); + if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) { + clear_bit(__I40E_VSI_DOWN, pf->state); rtnl_lock(); i40e_reset_and_rebuild(pf, false, true); rtnl_unlock(); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 2caee35528fa..18c1cc08da97 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -358,7 +358,7 @@ void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf) skb_tstamp_tx(pf->ptp_tx_skb, &shhwtstamps); dev_kfree_skb_any(pf->ptp_tx_skb); pf->ptp_tx_skb = NULL; - clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, &pf->state); + clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state); } /** @@ -768,7 +768,7 @@ void i40e_ptp_stop(struct i40e_pf *pf) if (pf->ptp_tx_skb) { dev_kfree_skb_any(pf->ptp_tx_skb); pf->ptp_tx_skb = NULL; - clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, &pf->state); + clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state); } if (pf->ptp_clock) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 1531a0f9fcc6..29321a6167a6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -333,15 +333,9 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && I40E_DEBUG_FD & pf->hw.debug_mask) dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n"); - pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED; + pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED; } else { pf->fd_tcp4_filter_cnt--; - if (pf->fd_tcp4_filter_cnt == 0) { - if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && - I40E_DEBUG_FD & pf->hw.debug_mask) - dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n"); - pf->hw_disabled_flags &= ~I40E_FLAG_FD_ATR_ENABLED; - } } return 0; @@ -589,7 +583,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, * progress do nothing, once flush is complete the state will * be cleared. */ - if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) + if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) return; pf->fd_add_err++; @@ -597,9 +591,9 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf); if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) && - (pf->hw_disabled_flags & I40E_FLAG_FD_SB_ENABLED)) { - pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED; - set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); + pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) { + pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED; + set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); } /* filter programming failed most likely due to table full */ @@ -611,12 +605,10 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, */ if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) { if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && - !(pf->hw_disabled_flags & - I40E_FLAG_FD_SB_ENABLED)) { + !(pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED)) { + pf->flags |= I40E_FLAG_FD_SB_AUTO_DISABLED; if (I40E_DEBUG_FD & pf->hw.debug_mask) dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n"); - pf->hw_disabled_flags |= - I40E_FLAG_FD_SB_ENABLED; } } } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { @@ -850,7 +842,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, if (budget && ((j / WB_STRIDE) == 0) && (j > 0) && - !test_bit(__I40E_DOWN, &vsi->state) && + !test_bit(__I40E_VSI_DOWN, vsi->state) && (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) tx_ring->arm_wb = true; } @@ -868,7 +860,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, smp_mb(); if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && - !test_bit(__I40E_DOWN, &vsi->state)) { + !test_bit(__I40E_VSI_DOWN, vsi->state)) { netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); ++tx_ring->tx_stats.restart_queue; @@ -2179,7 +2171,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, } enable_int: - if (!test_bit(__I40E_DOWN, &vsi->state)) + if (!test_bit(__I40E_VSI_DOWN, vsi->state)) wr32(hw, INTREG(vector - 1), txval); if (q_vector->itr_countdown) @@ -2208,7 +2200,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) int budget_per_ring; int work_done = 0; - if (test_bit(__I40E_DOWN, &vsi->state)) { + if (test_bit(__I40E_VSI_DOWN, vsi->state)) { napi_complete(napi); return 0; } @@ -2312,7 +2304,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) return; - if ((pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED)) + if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) return; /* if sampling is disabled do nothing */ @@ -2346,7 +2338,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, th = (struct tcphdr *)(hdr.network + hlen); /* Due to lack of space, no more new filters can be programmed */ - if (th->syn && (pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED)) + if (th->syn && (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED)) return; if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) { /* HW ATR eviction will take care of removing filters on FIN @@ -2634,7 +2626,7 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb, return 0; if (pf->ptp_tx && - !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) { + !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; pf->ptp_tx_skb = skb_get(skb); } else { diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 350cba70490c..95c23fbaa211 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -50,8 +50,8 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf, for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; /* Not all vfs are enabled so skip the ones that are not */ - if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && - !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) + if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && + !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) continue; /* Ignore return value on purpose - a given VF may fail, but @@ -137,8 +137,8 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf) return; /* verify if the VF is in either init or active before proceeding */ - if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && - !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) + if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && + !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) return; abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id; @@ -812,7 +812,7 @@ static void i40e_free_vf_res(struct i40e_vf *vf) /* Start by disabling VF's configuration API to prevent the OS from * accessing the VF's VSI after it's freed / invalidated. */ - clear_bit(I40E_VF_STAT_INIT, &vf->vf_states); + clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); /* free vsi & disconnect it from the parent uplink */ if (vf->lan_vsi_idx) { @@ -884,7 +884,7 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf) vf->num_queue_pairs = total_queue_pairs; /* VF is now completely initialized */ - set_bit(I40E_VF_STAT_INIT, &vf->vf_states); + set_bit(I40E_VF_STATE_INIT, &vf->vf_states); error_alloc: if (ret) @@ -938,7 +938,7 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr) u32 reg, reg_idx, bit_idx; /* warn the VF */ - clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); + clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); /* Disable VF's configuration API during reset. The flag is re-enabled * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI. @@ -946,7 +946,7 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr) * to do it earlier to give some time to finish to any VF config * functions that may still be running at this point. */ - clear_bit(I40E_VF_STAT_INIT, &vf->vf_states); + clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); /* In the case of a VFLR, the HW has already reset the VF and we * just need to clean up, so don't hit the VFRTRIG register. @@ -1004,10 +1004,11 @@ static void i40e_cleanup_reset_vf(struct i40e_vf *vf) if (!i40e_alloc_vf_res(vf)) { int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; i40e_enable_vf_mappings(vf); - set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); - clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); + set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); + clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); /* Do not notify the client during VF init */ - if (vf->pf->num_alloc_vfs) + if (test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE, + &vf->vf_states)) i40e_notify_client_of_vf_reset(pf, abs_vf_id); vf->num_vlan = 0; } @@ -1035,7 +1036,7 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) int i; /* If VFs have been disabled, there is no need to reset */ - if (test_and_set_bit(__I40E_VF_DISABLE, &pf->state)) + if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) return; i40e_trigger_vf_reset(vf, flr); @@ -1072,7 +1073,7 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) i40e_cleanup_reset_vf(vf); i40e_flush(hw); - clear_bit(__I40E_VF_DISABLE, &pf->state); + clear_bit(__I40E_VF_DISABLE, pf->state); } /** @@ -1097,7 +1098,7 @@ void i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) return; /* If VFs have been disabled, there is no need to reset */ - if (test_and_set_bit(__I40E_VF_DISABLE, &pf->state)) + if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) return; /* Begin reset on all VFs at once */ @@ -1172,7 +1173,7 @@ void i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) i40e_cleanup_reset_vf(&pf->vf[v]); i40e_flush(hw); - clear_bit(__I40E_VF_DISABLE, &pf->state); + clear_bit(__I40E_VF_DISABLE, pf->state); } /** @@ -1189,13 +1190,25 @@ void i40e_free_vfs(struct i40e_pf *pf) if (!pf->vf) return; - while (test_and_set_bit(__I40E_VF_DISABLE, &pf->state)) + while (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) usleep_range(1000, 2000); i40e_notify_client_of_vf_enable(pf, 0); - for (i = 0; i < pf->num_alloc_vfs; i++) - if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states)) - i40e_vsi_stop_rings(pf->vsi[pf->vf[i].lan_vsi_idx]); + + /* Amortize wait time by stopping all VFs at the same time */ + for (i = 0; i < pf->num_alloc_vfs; i++) { + if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) + continue; + + i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]); + } + + for (i = 0; i < pf->num_alloc_vfs; i++) { + if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) + continue; + + i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]); + } /* Disable IOV before freeing resources. This lets any VF drivers * running in the host get themselves cleaned up before we yank @@ -1206,13 +1219,11 @@ void i40e_free_vfs(struct i40e_pf *pf) else dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); - msleep(20); /* let any messages in transit get finished up */ - /* free up VF resources */ tmp = pf->num_alloc_vfs; pf->num_alloc_vfs = 0; for (i = 0; i < tmp; i++) { - if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states)) + if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) i40e_free_vf_res(&pf->vf[i]); /* disable qp mappings */ i40e_disable_vf_mappings(&pf->vf[i]); @@ -1235,7 +1246,7 @@ void i40e_free_vfs(struct i40e_pf *pf) wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); } } - clear_bit(__I40E_VF_DISABLE, &pf->state); + clear_bit(__I40E_VF_DISABLE, pf->state); } #ifdef CONFIG_PCI_IOV @@ -1280,12 +1291,15 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) /* assign default capabilities */ set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); vfs[i].spoofchk = true; - /* VF resources get allocated during reset */ - i40e_reset_vf(&vfs[i], false); + + set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states); } pf->num_alloc_vfs = num_alloc_vfs; + /* VF resources get allocated during reset */ + i40e_reset_all_vfs(pf, false); + i40e_notify_client_of_vf_enable(pf, num_alloc_vfs); err_alloc: @@ -1312,7 +1326,7 @@ static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) int pre_existing_vfs = pci_num_vf(pdev); int err = 0; - if (test_bit(__I40E_TESTING, &pf->state)) { + if (test_bit(__I40E_TESTING, pf->state)) { dev_warn(&pdev->dev, "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n"); err = -EPERM; @@ -1418,7 +1432,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, "Number of invalid messages exceeded for VF %d\n", vf->vf_id); dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); - set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); + set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); } } else { vf->num_valid_msgs++; @@ -1493,7 +1507,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) int len = 0; int ret; - if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { + if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -1522,7 +1536,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) if (i40e_vf_client_capable(pf, vf->vf_id) && (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_IWARP)) { vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_IWARP; - set_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states); + set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states); } if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) { @@ -1583,7 +1597,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) ether_addr_copy(vfres->vsi_res[0].default_mac_addr, vf->default_lan_addr.addr); } - set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); + set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); err: /* send the response back to the VF */ @@ -1606,7 +1620,7 @@ err: **/ static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) { - if (test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) + if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) i40e_reset_vf(vf, false); } @@ -1654,7 +1668,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, int bkt; vsi = i40e_find_vsi_from_id(pf, info->vsi_id); - if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) || !vsi) { aq_ret = I40E_ERR_PARAM; @@ -1715,9 +1729,9 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, "VF %d successfully set multicast promiscuous mode\n", vf->vf_id); if (allmulti) - set_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states); + set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); else - clear_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states); + clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); } if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC) @@ -1766,9 +1780,9 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, "VF %d successfully set unicast promiscuous mode\n", vf->vf_id); if (alluni) - set_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states); + set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); else - clear_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states); + clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); } error_param: @@ -1797,7 +1811,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) i40e_status aq_ret = 0; int i; - if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -1854,7 +1868,7 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) unsigned long tempmap; int i; - if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -1914,7 +1928,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) u16 vsi_id = vqs->vsi_id; i40e_status aq_ret = 0; - if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -1953,7 +1967,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) struct i40e_pf *pf = vf->pf; i40e_status aq_ret = 0; - if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -1995,7 +2009,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) memset(&stats, 0, sizeof(struct i40e_eth_stats)); - if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -2082,7 +2096,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) i40e_status ret = 0; int i; - if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { ret = I40E_ERR_PARAM; goto error_param; @@ -2151,7 +2165,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) i40e_status ret = 0; int i; - if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { ret = I40E_ERR_PARAM; goto error_param; @@ -2217,7 +2231,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n"); goto error_param; } - if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { aq_ret = I40E_ERR_PARAM; goto error_param; @@ -2244,12 +2258,12 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) if (!ret) vf->num_vlan++; - if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states)) + if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, true, vfl->vlan_id[i], NULL); - if (test_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states)) + if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, true, vfl->vlan_id[i], @@ -2284,7 +2298,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) i40e_status aq_ret = 0; int i; - if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { aq_ret = I40E_ERR_PARAM; goto error_param; @@ -2307,12 +2321,12 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]); vf->num_vlan--; - if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states)) + if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, false, vfl->vlan_id[i], NULL); - if (test_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states)) + if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, false, vfl->vlan_id[i], @@ -2338,8 +2352,8 @@ static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id; i40e_status aq_ret = 0; - if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || - !test_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states)) { + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || + !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -2369,8 +2383,8 @@ static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen, (struct i40e_virtchnl_iwarp_qvlist_info *)msg; i40e_status aq_ret = 0; - if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || - !test_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states)) { + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || + !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -2407,7 +2421,7 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen) u16 vsi_id = vrk->vsi_id; i40e_status aq_ret = 0; - if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || !i40e_vc_isvalid_vsi_id(vf, vsi_id) || (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) { aq_ret = I40E_ERR_PARAM; @@ -2439,7 +2453,7 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen) u16 vsi_id = vrl->vsi_id; i40e_status aq_ret = 0; - if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || !i40e_vc_isvalid_vsi_id(vf, vsi_id) || (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) { aq_ret = I40E_ERR_PARAM; @@ -2469,7 +2483,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) i40e_status aq_ret = 0; int len = 0; - if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -2506,7 +2520,7 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) struct i40e_hw *hw = &pf->hw; i40e_status aq_ret = 0; - if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -2536,7 +2550,7 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, int valid_len = 0; /* Check if VF is disabled. */ - if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states)) + if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states)) return I40E_ERR_PARAM; /* Validate message length. */ @@ -2804,7 +2818,7 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf) struct i40e_vf *vf; int vf_id; - if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) + if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state)) return 0; /* Re-enable the VFLR interrupt cause here, before looking for which @@ -2817,7 +2831,7 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf) wr32(hw, I40E_PFINT_ICR0_ENA, reg); i40e_flush(hw); - clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); + clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state); for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; @@ -2860,7 +2874,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) vf = &(pf->vf[vf_id]); vsi = pf->vsi[vf->lan_vsi_idx]; - if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { + if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", vf_id); ret = -EAGAIN; @@ -2949,7 +2963,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, vf = &(pf->vf[vf_id]); vsi = pf->vsi[vf->lan_vsi_idx]; - if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { + if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", vf_id); ret = -EAGAIN; @@ -3081,7 +3095,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, vf = &(pf->vf[vf_id]); vsi = pf->vsi[vf->lan_vsi_idx]; - if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { + if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", vf_id); ret = -EAGAIN; @@ -3162,7 +3176,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev, vf = &(pf->vf[vf_id]); /* first vsi is always the LAN vsi */ vsi = pf->vsi[vf->lan_vsi_idx]; - if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { + if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", vf_id); ret = -EAGAIN; @@ -3281,7 +3295,7 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) } vf = &(pf->vf[vf_id]); - if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { + if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", vf_id); ret = -EAGAIN; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 9495f1422122..20d7c8160e9e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -56,13 +56,14 @@ enum i40e_queue_ctrl { /* VF states */ enum i40e_vf_states { - I40E_VF_STAT_INIT = 0, - I40E_VF_STAT_ACTIVE, - I40E_VF_STAT_IWARPENA, - I40E_VF_STAT_FCOEENA, - I40E_VF_STAT_DISABLED, - I40E_VF_STAT_MC_PROMISC, - I40E_VF_STAT_UC_PROMISC, + I40E_VF_STATE_INIT = 0, + I40E_VF_STATE_ACTIVE, + I40E_VF_STATE_IWARPENA, + I40E_VF_STATE_FCOEENA, + I40E_VF_STATE_DISABLED, + I40E_VF_STATE_MC_PROMISC, + I40E_VF_STATE_UC_PROMISC, + I40E_VF_STATE_PRE_ENABLE, }; /* VF capabilities */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 34e96d98251a..dfe241a12ad0 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -266,7 +266,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, if (budget && ((j / WB_STRIDE) == 0) && (j > 0) && - !test_bit(__I40E_DOWN, &vsi->state) && + !test_bit(__I40E_VSI_DOWN, vsi->state) && (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) tx_ring->arm_wb = true; } @@ -284,7 +284,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, smp_mb(); if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && - !test_bit(__I40E_DOWN, &vsi->state)) { + !test_bit(__I40E_VSI_DOWN, vsi->state)) { netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); ++tx_ring->tx_stats.restart_queue; @@ -1508,7 +1508,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, } enable_int: - if (!test_bit(__I40E_DOWN, &vsi->state)) + if (!test_bit(__I40E_VSI_DOWN, vsi->state)) wr32(hw, INTREG(vector - 1), txval); if (q_vector->itr_countdown) @@ -1537,7 +1537,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) int budget_per_ring; int work_done = 0; - if (test_bit(__I40E_DOWN, &vsi->state)) { + if (test_bit(__I40E_VSI_DOWN, vsi->state)) { napi_complete(napi); return 0; } diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index 40f56e2335df..b8ada6d8d890 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -49,6 +49,13 @@ #define DEFAULT_DEBUG_LEVEL_SHIFT 3 #define PFX "i40evf: " +/* VSI state flags shared with common code */ +enum i40evf_vsi_state_t { + __I40E_VSI_DOWN, + /* This must be last as it determines the size of the BITMAP */ + __I40E_VSI_STATE_SIZE__, +}; + /* dummy struct to make common code less painful */ struct i40e_vsi { struct i40evf_adapter *back; @@ -56,7 +63,7 @@ struct i40e_vsi { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; u16 seid; u16 id; - unsigned long state; + DECLARE_BITMAP(state, __I40E_VSI_STATE_SIZE__); int base_vector; u16 work_limit; u16 qs_handle; @@ -168,8 +175,6 @@ enum i40evf_critical_section_t { __I40EVF_IN_CRITICAL_TASK, /* cannot be interrupted */ __I40EVF_IN_CLIENT_TASK, }; -/* make common code happy */ -#define __I40E_DOWN __I40EVF_DOWN /* board specific private data structure */ struct i40evf_adapter { @@ -218,7 +223,6 @@ struct i40evf_adapter { #define I40EVF_FLAG_ALLMULTI_ON BIT(19) #define I40EVF_FLAG_LEGACY_RX BIT(20) /* duplicates for common code */ -#define I40E_FLAG_FDIR_ATR_ENABLED 0 #define I40E_FLAG_DCB_ENABLED 0 #define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED #define I40E_FLAG_WB_ON_ITR_CAPABLE I40EVF_FLAG_WB_ON_ITR_CAPABLE diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 5915273c372f..ea110a730e16 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -46,7 +46,7 @@ static const char i40evf_driver_string[] = #define DRV_VERSION_MAJOR 2 #define DRV_VERSION_MINOR 1 -#define DRV_VERSION_BUILD 7 +#define DRV_VERSION_BUILD 14 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) \ @@ -497,7 +497,7 @@ static void i40evf_netpoll(struct net_device *netdev) int i; /* if interface is down do nothing */ - if (test_bit(__I40E_DOWN, &adapter->vsi.state)) + if (test_bit(__I40E_VSI_DOWN, adapter->vsi.state)) return; for (i = 0; i < q_vectors; i++) @@ -694,13 +694,14 @@ static void i40evf_configure_tx(struct i40evf_adapter *adapter) static void i40evf_configure_rx(struct i40evf_adapter *adapter) { unsigned int rx_buf_len = I40E_RXBUFFER_2048; - struct net_device *netdev = adapter->netdev; struct i40e_hw *hw = &adapter->hw; int i; /* Legacy Rx will always default to a 2048 buffer size. */ #if (PAGE_SIZE < 8192) if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX)) { + struct net_device *netdev = adapter->netdev; + /* For jumbo frames on systems with 4K pages we have to use * an order 1 page, so we might as well increase the size * of our Rx buffer to make better use of the available space @@ -1087,7 +1088,7 @@ static void i40evf_configure(struct i40evf_adapter *adapter) static void i40evf_up_complete(struct i40evf_adapter *adapter) { adapter->state = __I40EVF_RUNNING; - clear_bit(__I40E_DOWN, &adapter->vsi.state); + clear_bit(__I40E_VSI_DOWN, adapter->vsi.state); i40evf_napi_enable_all(adapter); @@ -1271,13 +1272,13 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter) } pairs = adapter->num_active_queues; - /* It's easy to be greedy for MSI-X vectors, but it really - * doesn't do us much good if we have a lot more vectors - * than CPU's. So let's be conservative and only ask for - * (roughly) twice the number of vectors as there are CPU's. + /* It's easy to be greedy for MSI-X vectors, but it really doesn't do + * us much good if we have more vectors than CPUs. However, we already + * limit the total number of queues by the number of CPUs so we do not + * need any further limiting here. */ - v_budget = min_t(int, pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS; - v_budget = min_t(int, v_budget, (int)adapter->vf_res->max_vectors); + v_budget = min_t(int, pairs + NONQ_VECS, + (int)adapter->vf_res->max_vectors); adapter->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), GFP_KERNEL); @@ -1508,6 +1509,13 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter) { int err; + err = i40evf_alloc_queues(adapter); + if (err) { + dev_err(&adapter->pdev->dev, + "Unable to allocate memory for queues\n"); + goto err_alloc_queues; + } + rtnl_lock(); err = i40evf_set_interrupt_capability(adapter); rtnl_unlock(); @@ -1524,23 +1532,16 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter) goto err_alloc_q_vectors; } - err = i40evf_alloc_queues(adapter); - if (err) { - dev_err(&adapter->pdev->dev, - "Unable to allocate memory for queues\n"); - goto err_alloc_queues; - } - dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u", (adapter->num_active_queues > 1) ? "Enabled" : "Disabled", adapter->num_active_queues); return 0; -err_alloc_queues: - i40evf_free_q_vectors(adapter); err_alloc_q_vectors: i40evf_reset_interrupt_capability(adapter); err_set_interrupt: + i40evf_free_queues(adapter); +err_alloc_queues: return err; } @@ -1753,7 +1754,7 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter) adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; if (netif_running(adapter->netdev)) { - set_bit(__I40E_DOWN, &adapter->vsi.state); + set_bit(__I40E_VSI_DOWN, adapter->vsi.state); netif_carrier_off(adapter->netdev); netif_tx_disable(adapter->netdev); adapter->link_up = false; @@ -2233,7 +2234,7 @@ static int i40evf_close(struct net_device *netdev) return 0; - set_bit(__I40E_DOWN, &adapter->vsi.state); + set_bit(__I40E_VSI_DOWN, adapter->vsi.state); if (CLIENT_ENABLED(adapter)) adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_CLOSE; @@ -2674,7 +2675,7 @@ static void i40evf_init_task(struct work_struct *work) dev_info(&pdev->dev, "GRO is enabled\n"); adapter->state = __I40EVF_DOWN; - set_bit(__I40E_DOWN, &adapter->vsi.state); + set_bit(__I40E_VSI_DOWN, adapter->vsi.state); i40evf_misc_irq_enable(adapter); adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 656ca8f69768..76263762bea1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -235,7 +235,11 @@ struct vf_macvlans { struct ixgbe_tx_buffer { union ixgbe_adv_tx_desc *next_to_watch; unsigned long time_stamp; - struct sk_buff *skb; + union { + struct sk_buff *skb; + /* XDP uses address ptr on irq_clean */ + void *data; + }; unsigned int bytecount; unsigned short gso_segs; __be16 protocol; @@ -288,6 +292,7 @@ enum ixgbe_ring_state_t { __IXGBE_TX_XPS_INIT_DONE, __IXGBE_TX_DETECT_HANG, __IXGBE_HANG_CHECK_ARMED, + __IXGBE_TX_XDP_RING, }; #define ring_uses_build_skb(ring) \ @@ -314,10 +319,17 @@ struct ixgbe_fwd_adapter { set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) #define clear_ring_rsc_enabled(ring) \ clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) +#define ring_is_xdp(ring) \ + test_bit(__IXGBE_TX_XDP_RING, &(ring)->state) +#define set_ring_xdp(ring) \ + set_bit(__IXGBE_TX_XDP_RING, &(ring)->state) +#define clear_ring_xdp(ring) \ + clear_bit(__IXGBE_TX_XDP_RING, &(ring)->state) struct ixgbe_ring { struct ixgbe_ring *next; /* pointer to next ring in q_vector */ struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */ struct net_device *netdev; /* netdev ring belongs to */ + struct bpf_prog *xdp_prog; struct device *dev; /* device for DMA mapping */ struct ixgbe_fwd_adapter *l2_accel_priv; void *desc; /* descriptor ring memory */ @@ -379,6 +391,7 @@ enum ixgbe_ring_f_enum { #define IXGBE_MAX_FCOE_INDICES 8 #define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) #define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) +#define MAX_XDP_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) #define IXGBE_MAX_L2A_QUEUES 4 #define IXGBE_BAD_L2A_QUEUE 3 #define IXGBE_MAX_MACVLANS 31 @@ -555,6 +568,7 @@ struct ixgbe_adapter { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; /* OS defined structs */ struct net_device *netdev; + struct bpf_prog *xdp_prog; struct pci_dev *pdev; unsigned long state; @@ -621,6 +635,10 @@ struct ixgbe_adapter { __be16 vxlan_port; __be16 geneve_port; + /* XDP */ + int num_xdp_queues; + struct ixgbe_ring *xdp_ring[MAX_XDP_QUEUES]; + /* TX */ struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; @@ -667,6 +685,7 @@ struct ixgbe_adapter { u64 tx_busy; unsigned int tx_ring_count; + unsigned int xdp_ring_count; unsigned int rx_ring_count; u32 link_speed; @@ -750,7 +769,7 @@ struct ixgbe_adapter { u8 rss_indir_tbl[IXGBE_MAX_RETA_ENTRIES]; #define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ - u32 rss_key[IXGBE_RSS_KEY_SIZE / sizeof(u32)]; + u32 *rss_key; }; static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) @@ -807,6 +826,7 @@ enum ixgbe_boards { board_X540, board_X550, board_X550EM_x, + board_x550em_x_fw, board_x550em_a, board_x550em_a_fw, }; @@ -816,6 +836,7 @@ extern const struct ixgbe_info ixgbe_82599_info; extern const struct ixgbe_info ixgbe_X540_info; extern const struct ixgbe_info ixgbe_X550_info; extern const struct ixgbe_info ixgbe_X550EM_x_info; +extern const struct ixgbe_info ixgbe_x550em_x_fw_info; extern const struct ixgbe_info ixgbe_x550em_a_info; extern const struct ixgbe_info ixgbe_x550em_a_fw_info; #ifdef CONFIG_IXGBE_DCB @@ -835,7 +856,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter); void ixgbe_reinit_locked(struct ixgbe_adapter *adapter); void ixgbe_reset(struct ixgbe_adapter *adapter); void ixgbe_set_ethtool_ops(struct net_device *netdev); -int ixgbe_setup_rx_resources(struct ixgbe_ring *); +int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); int ixgbe_setup_tx_resources(struct ixgbe_ring *); void ixgbe_free_rx_resources(struct ixgbe_ring *); void ixgbe_free_tx_resources(struct ixgbe_ring *); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 59730ede4746..7e5e336d7dcc 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1071,15 +1071,19 @@ static int ixgbe_set_ringparam(struct net_device *netdev, if (!netif_running(adapter->netdev)) { for (i = 0; i < adapter->num_tx_queues; i++) adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_xdp_queues; i++) + adapter->xdp_ring[i]->count = new_tx_count; for (i = 0; i < adapter->num_rx_queues; i++) adapter->rx_ring[i]->count = new_rx_count; adapter->tx_ring_count = new_tx_count; + adapter->xdp_ring_count = new_tx_count; adapter->rx_ring_count = new_rx_count; goto clear_reset; } /* allocate temporary buffer to store rings in */ i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues); + i = max_t(int, i, adapter->num_xdp_queues); temp_ring = vmalloc(i * sizeof(struct ixgbe_ring)); if (!temp_ring) { @@ -1111,12 +1115,33 @@ static int ixgbe_set_ringparam(struct net_device *netdev, } } + for (i = 0; i < adapter->num_xdp_queues; i++) { + memcpy(&temp_ring[i], adapter->xdp_ring[i], + sizeof(struct ixgbe_ring)); + + temp_ring[i].count = new_tx_count; + err = ixgbe_setup_tx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + ixgbe_free_tx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + for (i = 0; i < adapter->num_tx_queues; i++) { ixgbe_free_tx_resources(adapter->tx_ring[i]); memcpy(adapter->tx_ring[i], &temp_ring[i], sizeof(struct ixgbe_ring)); } + for (i = 0; i < adapter->num_xdp_queues; i++) { + ixgbe_free_tx_resources(adapter->xdp_ring[i]); + + memcpy(adapter->xdp_ring[i], &temp_ring[i], + sizeof(struct ixgbe_ring)); + } adapter->tx_ring_count = new_tx_count; } @@ -1128,7 +1153,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev, sizeof(struct ixgbe_ring)); temp_ring[i].count = new_rx_count; - err = ixgbe_setup_rx_resources(&temp_ring[i]); + err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]); if (err) { while (i) { i--; @@ -1761,7 +1786,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) rx_ring->netdev = adapter->netdev; rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; - err = ixgbe_setup_rx_resources(rx_ring); + err = ixgbe_setup_rx_resources(adapter, rx_ring); if (err) { ret_val = 4; goto err_nomem; @@ -2942,9 +2967,7 @@ static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter) static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - return sizeof(adapter->rss_key); + return IXGBE_RSS_KEY_SIZE; } static u32 ixgbe_rss_indir_size(struct net_device *netdev) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 1b8be7d813bd..b45fdc98033d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -267,12 +267,14 @@ static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) **/ static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) { - int i; + int i, reg_idx; for (i = 0; i < adapter->num_rx_queues; i++) adapter->rx_ring[i]->reg_idx = i; - for (i = 0; i < adapter->num_tx_queues; i++) - adapter->tx_ring[i]->reg_idx = i; + for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++) + adapter->tx_ring[i]->reg_idx = reg_idx; + for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++) + adapter->xdp_ring[i]->reg_idx = reg_idx; return true; } @@ -308,6 +310,11 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) ixgbe_cache_ring_rss(adapter); } +static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter) +{ + return adapter->xdp_prog ? nr_cpu_ids : 0; +} + #define IXGBE_RSS_64Q_MASK 0x3F #define IXGBE_RSS_16Q_MASK 0xF #define IXGBE_RSS_8Q_MASK 0x7 @@ -382,6 +389,7 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) adapter->num_rx_queues_per_pool = tcs; adapter->num_tx_queues = vmdq_i * tcs; + adapter->num_xdp_queues = 0; adapter->num_rx_queues = vmdq_i * tcs; #ifdef IXGBE_FCOE @@ -479,6 +487,7 @@ static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) netdev_set_tc_queue(dev, i, rss_i, rss_i * i); adapter->num_tx_queues = rss_i * tcs; + adapter->num_xdp_queues = 0; adapter->num_rx_queues = rss_i * tcs; return true; @@ -549,6 +558,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) adapter->num_rx_queues = vmdq_i * rss_i; adapter->num_tx_queues = vmdq_i * rss_i; + adapter->num_xdp_queues = 0; /* disable ATR as it is not supported when VMDq is enabled */ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; @@ -669,6 +679,7 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) #endif /* IXGBE_FCOE */ adapter->num_rx_queues = rss_i; adapter->num_tx_queues = rss_i; + adapter->num_xdp_queues = ixgbe_xdp_queues(adapter); return true; } @@ -689,6 +700,7 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) /* Start with base case */ adapter->num_rx_queues = 1; adapter->num_tx_queues = 1; + adapter->num_xdp_queues = 0; adapter->num_rx_pools = adapter->num_rx_queues; adapter->num_rx_queues_per_pool = 1; @@ -719,8 +731,11 @@ static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; int i, vectors, vector_threshold; - /* We start by asking for one vector per queue pair */ + /* We start by asking for one vector per queue pair with XDP queues + * being stacked with TX queues. + */ vectors = max(adapter->num_rx_queues, adapter->num_tx_queues); + vectors = max(vectors, adapter->num_xdp_queues); /* It is easy to be greedy for MSI-X vectors. However, it really * doesn't do much good if we have a lot more vectors than CPUs. We'll @@ -800,6 +815,8 @@ static void ixgbe_add_ring(struct ixgbe_ring *ring, * @v_idx: index of vector in adapter struct * @txr_count: total number of Tx rings to allocate * @txr_idx: index of first Tx ring to allocate + * @xdp_count: total number of XDP rings to allocate + * @xdp_idx: index of first XDP ring to allocate * @rxr_count: total number of Rx rings to allocate * @rxr_idx: index of first Rx ring to allocate * @@ -808,6 +825,7 @@ static void ixgbe_add_ring(struct ixgbe_ring *ring, static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_count, int v_idx, int txr_count, int txr_idx, + int xdp_count, int xdp_idx, int rxr_count, int rxr_idx) { struct ixgbe_q_vector *q_vector; @@ -817,7 +835,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int ring_count, size; u8 tcs = netdev_get_num_tc(adapter->netdev); - ring_count = txr_count + rxr_count; + ring_count = txr_count + rxr_count + xdp_count; size = sizeof(struct ixgbe_q_vector) + (sizeof(struct ixgbe_ring) * ring_count); @@ -909,6 +927,33 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, ring++; } + while (xdp_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + ixgbe_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + ring->queue_index = xdp_idx; + set_ring_xdp(ring); + + /* assign ring to adapter */ + adapter->xdp_ring[xdp_idx] = ring; + + /* update count and index */ + xdp_count--; + xdp_idx++; + + /* push pointer to next ring */ + ring++; + } + while (rxr_count) { /* assign generic ring traits */ ring->dev = &adapter->pdev->dev; @@ -1002,17 +1047,18 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) int q_vectors = adapter->num_q_vectors; int rxr_remaining = adapter->num_rx_queues; int txr_remaining = adapter->num_tx_queues; - int rxr_idx = 0, txr_idx = 0, v_idx = 0; + int xdp_remaining = adapter->num_xdp_queues; + int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0; int err; /* only one q_vector if MSI-X is disabled. */ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) q_vectors = 1; - if (q_vectors >= (rxr_remaining + txr_remaining)) { + if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) { for (; rxr_remaining; v_idx++) { err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, - 0, 0, 1, rxr_idx); + 0, 0, 0, 0, 1, rxr_idx); if (err) goto err_out; @@ -1026,8 +1072,11 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) for (; v_idx < q_vectors; v_idx++) { int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors - v_idx); + err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, tqpv, txr_idx, + xqpv, xdp_idx, rqpv, rxr_idx); if (err) @@ -1036,14 +1085,17 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) /* update counts and index */ rxr_remaining -= rqpv; txr_remaining -= tqpv; + xdp_remaining -= xqpv; rxr_idx++; txr_idx++; + xdp_idx += xqpv; } return 0; err_out: adapter->num_tx_queues = 0; + adapter->num_xdp_queues = 0; adapter->num_rx_queues = 0; adapter->num_q_vectors = 0; @@ -1066,6 +1118,7 @@ static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) int v_idx = adapter->num_q_vectors; adapter->num_tx_queues = 0; + adapter->num_xdp_queues = 0; adapter->num_rx_queues = 0; adapter->num_q_vectors = 0; @@ -1172,9 +1225,10 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) ixgbe_cache_ring_register(adapter); - e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", + e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count = %u\n", (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", - adapter->num_rx_queues, adapter->num_tx_queues); + adapter->num_rx_queues, adapter->num_tx_queues, + adapter->num_xdp_queues); set_bit(__IXGBE_DOWN, &adapter->state); @@ -1195,6 +1249,7 @@ err_alloc_q_vectors: void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) { adapter->num_tx_queues = 0; + adapter->num_xdp_queues = 0; adapter->num_rx_queues = 0; ixgbe_free_q_vectors(adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index afff2ca7f8c0..22a29df1d29e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -49,6 +49,9 @@ #include <linux/if_macvlan.h> #include <linux/if_bridge.h> #include <linux/prefetch.h> +#include <linux/bpf.h> +#include <linux/bpf_trace.h> +#include <linux/atomic.h> #include <scsi/fc/fc_fcoe.h> #include <net/udp_tunnel.h> #include <net/pkt_cls.h> @@ -85,6 +88,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = { [board_X540] = &ixgbe_X540_info, [board_X550] = &ixgbe_X550_info, [board_X550EM_x] = &ixgbe_X550EM_x_info, + [board_x550em_x_fw] = &ixgbe_x550em_x_fw_info, [board_x550em_a] = &ixgbe_x550em_a_info, [board_x550em_a_fw] = &ixgbe_x550em_a_fw_info, }; @@ -135,6 +139,7 @@ static const struct pci_device_id ixgbe_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_1G_T), board_x550em_x_fw}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a }, @@ -590,6 +595,19 @@ static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo) } +static void ixgbe_print_buffer(struct ixgbe_ring *ring, int n) +{ + struct ixgbe_tx_buffer *tx_buffer; + + tx_buffer = &ring->tx_buffer_info[ring->next_to_clean]; + pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n", + n, ring->next_to_use, ring->next_to_clean, + (u64)dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + tx_buffer->next_to_watch, + (u64)tx_buffer->time_stamp); +} + /* * ixgbe_dump - Print registers, tx-rings and rx-rings */ @@ -599,7 +617,7 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_reg_info *reginfo; int n = 0; - struct ixgbe_ring *tx_ring; + struct ixgbe_ring *ring; struct ixgbe_tx_buffer *tx_buffer; union ixgbe_adv_tx_desc *tx_desc; struct my_u0 { u64 a; u64 b; } *u0; @@ -639,14 +657,13 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) "Queue [NTU] [NTC] [bi(ntc)->dma ]", "leng", "ntw", "timestamp"); for (n = 0; n < adapter->num_tx_queues; n++) { - tx_ring = adapter->tx_ring[n]; - tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; - pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n", - n, tx_ring->next_to_use, tx_ring->next_to_clean, - (u64)dma_unmap_addr(tx_buffer, dma), - dma_unmap_len(tx_buffer, len), - tx_buffer->next_to_watch, - (u64)tx_buffer->time_stamp); + ring = adapter->tx_ring[n]; + ixgbe_print_buffer(ring, n); + } + + for (n = 0; n < adapter->num_xdp_queues; n++) { + ring = adapter->xdp_ring[n]; + ixgbe_print_buffer(ring, n); } /* Print TX Rings */ @@ -691,28 +708,28 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) */ for (n = 0; n < adapter->num_tx_queues; n++) { - tx_ring = adapter->tx_ring[n]; + ring = adapter->tx_ring[n]; pr_info("------------------------------------\n"); - pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); + pr_info("TX QUEUE INDEX = %d\n", ring->queue_index); pr_info("------------------------------------\n"); pr_info("%s%s %s %s %s %s\n", "T [desc] [address 63:0 ] ", "[PlPOIdStDDt Ln] [bi->dma ] ", "leng", "ntw", "timestamp", "bi->skb"); - for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { - tx_desc = IXGBE_TX_DESC(tx_ring, i); - tx_buffer = &tx_ring->tx_buffer_info[i]; + for (i = 0; ring->desc && (i < ring->count); i++) { + tx_desc = IXGBE_TX_DESC(ring, i); + tx_buffer = &ring->tx_buffer_info[i]; u0 = (struct my_u0 *)tx_desc; if (dma_unmap_len(tx_buffer, len) > 0) { const char *ring_desc; - if (i == tx_ring->next_to_use && - i == tx_ring->next_to_clean) + if (i == ring->next_to_use && + i == ring->next_to_clean) ring_desc = " NTC/U"; - else if (i == tx_ring->next_to_use) + else if (i == ring->next_to_use) ring_desc = " NTU"; - else if (i == tx_ring->next_to_clean) + else if (i == ring->next_to_clean) ring_desc = " NTC"; else ring_desc = ""; @@ -981,6 +998,10 @@ static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter) for (i = 0; i < adapter->num_tx_queues; i++) clear_bit(__IXGBE_HANG_CHECK_ARMED, &adapter->tx_ring[i]->state); + + for (i = 0; i < adapter->num_xdp_queues; i++) + clear_bit(__IXGBE_HANG_CHECK_ARMED, + &adapter->xdp_ring[i]->state); } static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) @@ -1025,6 +1046,14 @@ static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) if (xoff[tc]) clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); } + + for (i = 0; i < adapter->num_xdp_queues; i++) { + struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i]; + + tc = xdp_ring->dcb_tc; + if (xoff[tc]) + clear_bit(__IXGBE_HANG_CHECK_ARMED, &xdp_ring->state); + } } static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring) @@ -1176,7 +1205,10 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, total_packets += tx_buffer->gso_segs; /* free the skb */ - napi_consume_skb(tx_buffer->skb, napi_budget); + if (ring_is_xdp(tx_ring)) + page_frag_free(tx_buffer->data); + else + napi_consume_skb(tx_buffer->skb, napi_budget); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -1237,7 +1269,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { /* schedule immediate reset if we believe we hung */ struct ixgbe_hw *hw = &adapter->hw; - e_err(drv, "Detected Tx Unit Hang\n" + e_err(drv, "Detected Tx Unit Hang %s\n" " Tx Queue <%d>\n" " TDH, TDT <%x>, <%x>\n" " next_to_use <%x>\n" @@ -1245,13 +1277,16 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, "tx_buffer_info[next_to_clean]\n" " time_stamp <%lx>\n" " jiffies <%lx>\n", + ring_is_xdp(tx_ring) ? "(XDP)" : "", tx_ring->queue_index, IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)), IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)), tx_ring->next_to_use, i, tx_ring->tx_buffer_info[i].time_stamp, jiffies); - netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + if (!ring_is_xdp(tx_ring)) + netif_stop_subqueue(tx_ring->netdev, + tx_ring->queue_index); e_info(probe, "tx hang %d detected on queue %d, resetting adapter\n", @@ -1264,6 +1299,9 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, return true; } + if (ring_is_xdp(tx_ring)) + return !!budget; + netdev_tx_completed_queue(txring_txq(tx_ring), total_packets, total_bytes); @@ -1855,6 +1893,10 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being fixed * + * Check if the skb is valid in the XDP case it will be an error pointer. + * Return true in this case to abort processing and advance to next + * descriptor. + * * Check for corrupted packet headers caused by senders on the local L2 * embedded NIC switch not setting up their Tx Descriptors right. These * should be very rare. @@ -1873,6 +1915,10 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, { struct net_device *netdev = rx_ring->netdev; + /* XDP packets use error pointer so abort at this point */ + if (IS_ERR(skb)) + return true; + /* verify that the packet does not have any known errors */ if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FRAME_ERR_MASK) && @@ -2048,7 +2094,7 @@ static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring, /* hand second half of page back to the ring */ ixgbe_reuse_rx_page(rx_ring, rx_buffer); } else { - if (IXGBE_CB(skb)->dma == rx_buffer->dma) { + if (!IS_ERR(skb) && IXGBE_CB(skb)->dma == rx_buffer->dma) { /* the page has been released from the ring */ IXGBE_CB(skb)->page_released = true; } else { @@ -2069,21 +2115,22 @@ static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring, static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *rx_buffer, - union ixgbe_adv_rx_desc *rx_desc, - unsigned int size) + struct xdp_buff *xdp, + union ixgbe_adv_rx_desc *rx_desc) { - void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; + unsigned int size = xdp->data_end - xdp->data; #if (PAGE_SIZE < 8192) unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; #else - unsigned int truesize = SKB_DATA_ALIGN(size); + unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end - + xdp->data_hard_start); #endif struct sk_buff *skb; /* prefetch first cache line of first page */ - prefetch(va); + prefetch(xdp->data); #if L1_CACHE_BYTES < 128 - prefetch(va + L1_CACHE_BYTES); + prefetch(xdp->data + L1_CACHE_BYTES); #endif /* allocate a skb to store the frags */ @@ -2096,7 +2143,7 @@ static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring, IXGBE_CB(skb)->dma = rx_buffer->dma; skb_add_rx_frag(skb, 0, rx_buffer->page, - rx_buffer->page_offset, + xdp->data - page_address(rx_buffer->page), size, truesize); #if (PAGE_SIZE < 8192) rx_buffer->page_offset ^= truesize; @@ -2104,7 +2151,8 @@ static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring, rx_buffer->page_offset += truesize; #endif } else { - memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + memcpy(__skb_put(skb, size), + xdp->data, ALIGN(size, sizeof(long))); rx_buffer->pagecnt_bias++; } @@ -2113,32 +2161,32 @@ static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring, static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *rx_buffer, - union ixgbe_adv_rx_desc *rx_desc, - unsigned int size) + struct xdp_buff *xdp, + union ixgbe_adv_rx_desc *rx_desc) { - void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; #if (PAGE_SIZE < 8192) unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; #else unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + - SKB_DATA_ALIGN(IXGBE_SKB_PAD + size); + SKB_DATA_ALIGN(xdp->data_end - + xdp->data_hard_start); #endif struct sk_buff *skb; /* prefetch first cache line of first page */ - prefetch(va); + prefetch(xdp->data); #if L1_CACHE_BYTES < 128 - prefetch(va + L1_CACHE_BYTES); + prefetch(xdp->data + L1_CACHE_BYTES); #endif - /* build an skb around the page buffer */ - skb = build_skb(va - IXGBE_SKB_PAD, truesize); + /* build an skb to around the page buffer */ + skb = build_skb(xdp->data_hard_start, truesize); if (unlikely(!skb)) return NULL; /* update pointers within the skb to store the data */ - skb_reserve(skb, IXGBE_SKB_PAD); - __skb_put(skb, size); + skb_reserve(skb, xdp->data - xdp->data_hard_start); + __skb_put(skb, xdp->data_end - xdp->data); /* record DMA address if this is the start of a chain of buffers */ if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) @@ -2154,6 +2202,65 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, return skb; } +#define IXGBE_XDP_PASS 0 +#define IXGBE_XDP_CONSUMED 1 +#define IXGBE_XDP_TX 2 + +static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, + struct xdp_buff *xdp); + +static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rx_ring, + struct xdp_buff *xdp) +{ + int result = IXGBE_XDP_PASS; + struct bpf_prog *xdp_prog; + u32 act; + + rcu_read_lock(); + xdp_prog = READ_ONCE(rx_ring->xdp_prog); + + if (!xdp_prog) + goto xdp_out; + + act = bpf_prog_run_xdp(xdp_prog, xdp); + switch (act) { + case XDP_PASS: + break; + case XDP_TX: + result = ixgbe_xmit_xdp_ring(adapter, xdp); + break; + default: + bpf_warn_invalid_xdp_action(act); + case XDP_ABORTED: + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); + /* fallthrough -- handle aborts by dropping packet */ + case XDP_DROP: + result = IXGBE_XDP_CONSUMED; + break; + } +xdp_out: + rcu_read_unlock(); + return ERR_PTR(-result); +} + +static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *rx_buffer, + unsigned int size) +{ +#if (PAGE_SIZE < 8192) + unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; + + rx_buffer->page_offset ^= truesize; +#else + unsigned int truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) : + SKB_DATA_ALIGN(size); + + rx_buffer->page_offset += truesize; +#endif +} + /** * ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf * @q_vector: structure containing interrupt and ring information @@ -2172,17 +2279,19 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, const int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; -#ifdef IXGBE_FCOE struct ixgbe_adapter *adapter = q_vector->adapter; +#ifdef IXGBE_FCOE int ddp_bytes; unsigned int mss = 0; #endif /* IXGBE_FCOE */ u16 cleaned_count = ixgbe_desc_unused(rx_ring); + bool xdp_xmit = false; while (likely(total_rx_packets < budget)) { union ixgbe_adv_rx_desc *rx_desc; struct ixgbe_rx_buffer *rx_buffer; struct sk_buff *skb; + struct xdp_buff xdp; unsigned int size; /* return some buffers to hardware, one at a time is too slow */ @@ -2205,14 +2314,34 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size); /* retrieve a buffer from the ring */ - if (skb) + if (!skb) { + xdp.data = page_address(rx_buffer->page) + + rx_buffer->page_offset; + xdp.data_hard_start = xdp.data - + ixgbe_rx_offset(rx_ring); + xdp.data_end = xdp.data + size; + + skb = ixgbe_run_xdp(adapter, rx_ring, &xdp); + } + + if (IS_ERR(skb)) { + if (PTR_ERR(skb) == -IXGBE_XDP_TX) { + xdp_xmit = true; + ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size); + } else { + rx_buffer->pagecnt_bias++; + } + total_rx_packets++; + total_rx_bytes += size; + } else if (skb) { ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size); - else if (ring_uses_build_skb(rx_ring)) + } else if (ring_uses_build_skb(rx_ring)) { skb = ixgbe_build_skb(rx_ring, rx_buffer, - rx_desc, size); - else + &xdp, rx_desc); + } else { skb = ixgbe_construct_skb(rx_ring, rx_buffer, - rx_desc, size); + &xdp, rx_desc); + } /* exit if we failed to retrieve a buffer */ if (!skb) { @@ -2269,6 +2398,16 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, total_rx_packets++; } + if (xdp_xmit) { + struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. + */ + wmb(); + writel(ring->next_to_use, ring->tail); + } + u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; rx_ring->stats.bytes += total_rx_bytes; @@ -3373,6 +3512,8 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) /* Setup the HW Tx Head and Tail descriptor pointers */ for (i = 0; i < adapter->num_tx_queues; i++) ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]); + for (i = 0; i < adapter->num_xdp_queues; i++) + ixgbe_configure_tx_ring(adapter, adapter->xdp_ring[i]); } static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter, @@ -3498,6 +3639,28 @@ void ixgbe_store_key(struct ixgbe_adapter *adapter) } /** + * ixgbe_init_rss_key - Initialize adapter RSS key + * @adapter: device handle + * + * Allocates and initializes the RSS key if it is not allocated. + **/ +static inline int ixgbe_init_rss_key(struct ixgbe_adapter *adapter) +{ + u32 *rss_key; + + if (!adapter->rss_key) { + rss_key = kzalloc(IXGBE_RSS_KEY_SIZE, GFP_KERNEL); + if (unlikely(!rss_key)) + return -ENOMEM; + + netdev_rss_key_fill(rss_key, IXGBE_RSS_KEY_SIZE); + adapter->rss_key = rss_key; + } + + return 0; +} + +/** * ixgbe_store_reta - Write the RETA table to HW * @adapter: device handle * @@ -3599,7 +3762,7 @@ static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter) /* Fill out hash function seeds */ for (i = 0; i < 10; i++) IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool), - adapter->rss_key[i]); + *(adapter->rss_key + i)); /* Fill out the redirection table */ for (i = 0, j = 0; i < 64; i++, j++) { @@ -3660,7 +3823,6 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; - netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key)); if ((hw->mac.type >= ixgbe_mac_X550) && (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { unsigned int pf_pool = adapter->num_vfs; @@ -5514,7 +5676,10 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) union ixgbe_adv_tx_desc *eop_desc, *tx_desc; /* Free all the Tx ring sk_buffs */ - dev_kfree_skb_any(tx_buffer->skb); + if (ring_is_xdp(tx_ring)) + page_frag_free(tx_buffer->data); + else + dev_kfree_skb_any(tx_buffer->skb); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -5555,7 +5720,8 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) } /* reset BQL for queue */ - netdev_tx_reset_queue(txring_txq(tx_ring)); + if (!ring_is_xdp(tx_ring)) + netdev_tx_reset_queue(txring_txq(tx_ring)); /* reset next_to_use and next_to_clean */ tx_ring->next_to_use = 0; @@ -5584,6 +5750,8 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter) for (i = 0; i < adapter->num_tx_queues; i++) ixgbe_clean_tx_ring(adapter->tx_ring[i]); + for (i = 0; i < adapter->num_xdp_queues; i++) + ixgbe_clean_tx_ring(adapter->xdp_ring[i]); } static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter) @@ -5678,6 +5846,11 @@ void ixgbe_down(struct ixgbe_adapter *adapter) u8 reg_idx = adapter->tx_ring[i]->reg_idx; IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); } + for (i = 0; i < adapter->num_xdp_queues; i++) { + u8 reg_idx = adapter->xdp_ring[i]->reg_idx; + + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); + } /* Disable the Tx DMA engine on 82599 and later MAC */ switch (hw->mac.type) { @@ -5863,6 +6036,9 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, if (!adapter->mac_table) return -ENOMEM; + if (ixgbe_init_rss_key(adapter)) + return -ENOMEM; + /* Set MAC specific capability flags and exceptions */ switch (hw->mac.type) { case ixgbe_mac_82598EB: @@ -6048,7 +6224,7 @@ err: **/ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) { - int i, err = 0; + int i, j = 0, err = 0; for (i = 0; i < adapter->num_tx_queues; i++) { err = ixgbe_setup_tx_resources(adapter->tx_ring[i]); @@ -6058,10 +6234,20 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) e_err(probe, "Allocation for Tx Queue %u failed\n", i); goto err_setup_tx; } + for (j = 0; j < adapter->num_xdp_queues; j++) { + err = ixgbe_setup_tx_resources(adapter->xdp_ring[j]); + if (!err) + continue; + + e_err(probe, "Allocation for Tx Queue %u failed\n", j); + goto err_setup_tx; + } return 0; err_setup_tx: /* rewind the index freeing the rings as we go */ + while (j--) + ixgbe_free_tx_resources(adapter->xdp_ring[j]); while (i--) ixgbe_free_tx_resources(adapter->tx_ring[i]); return err; @@ -6073,7 +6259,8 @@ err_setup_tx: * * Returns 0 on success, negative on failure **/ -int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) +int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rx_ring) { struct device *dev = rx_ring->dev; int orig_node = dev_to_node(dev); @@ -6112,6 +6299,8 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; + rx_ring->xdp_prog = adapter->xdp_prog; + return 0; err: vfree(rx_ring->rx_buffer_info); @@ -6135,7 +6324,7 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) int i, err = 0; for (i = 0; i < adapter->num_rx_queues; i++) { - err = ixgbe_setup_rx_resources(adapter->rx_ring[i]); + err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]); if (!err) continue; @@ -6191,6 +6380,9 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter) for (i = 0; i < adapter->num_tx_queues; i++) if (adapter->tx_ring[i]->desc) ixgbe_free_tx_resources(adapter->tx_ring[i]); + for (i = 0; i < adapter->num_xdp_queues; i++) + if (adapter->xdp_ring[i]->desc) + ixgbe_free_tx_resources(adapter->xdp_ring[i]); } /** @@ -6203,6 +6395,7 @@ void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring) { ixgbe_clean_rx_ring(rx_ring); + rx_ring->xdp_prog = NULL; vfree(rx_ring->rx_buffer_info); rx_ring->rx_buffer_info = NULL; @@ -6609,6 +6802,14 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) bytes += tx_ring->stats.bytes; packets += tx_ring->stats.packets; } + for (i = 0; i < adapter->num_xdp_queues; i++) { + struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i]; + + restart_queue += xdp_ring->tx_stats.restart_queue; + tx_busy += xdp_ring->tx_stats.tx_busy; + bytes += xdp_ring->stats.bytes; + packets += xdp_ring->stats.packets; + } adapter->restart_queue = restart_queue; adapter->tx_busy = tx_busy; netdev->stats.tx_bytes = bytes; @@ -6802,6 +7003,9 @@ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter) for (i = 0; i < adapter->num_tx_queues; i++) set_bit(__IXGBE_TX_FDIR_INIT_DONE, &(adapter->tx_ring[i]->state)); + for (i = 0; i < adapter->num_xdp_queues; i++) + set_bit(__IXGBE_TX_FDIR_INIT_DONE, + &adapter->xdp_ring[i]->state); /* re-enable flow director interrupts */ IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR); } else { @@ -6835,6 +7039,8 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) if (netif_carrier_ok(adapter->netdev)) { for (i = 0; i < adapter->num_tx_queues; i++) set_check_for_tx_hang(adapter->tx_ring[i]); + for (i = 0; i < adapter->num_xdp_queues; i++) + set_check_for_tx_hang(adapter->xdp_ring[i]); } if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { @@ -7065,6 +7271,13 @@ static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter) return true; } + for (i = 0; i < adapter->num_xdp_queues; i++) { + struct ixgbe_ring *ring = adapter->xdp_ring[i]; + + if (ring->next_to_use != ring->next_to_clean) + return true; + } + return false; } @@ -8022,6 +8235,62 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, #endif } +static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, + struct xdp_buff *xdp) +{ + struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; + struct ixgbe_tx_buffer *tx_buffer; + union ixgbe_adv_tx_desc *tx_desc; + u32 len, cmd_type; + dma_addr_t dma; + u16 i; + + len = xdp->data_end - xdp->data; + + if (unlikely(!ixgbe_desc_unused(ring))) + return IXGBE_XDP_CONSUMED; + + dma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE); + if (dma_mapping_error(ring->dev, dma)) + return IXGBE_XDP_CONSUMED; + + /* record the location of the first descriptor for this packet */ + tx_buffer = &ring->tx_buffer_info[ring->next_to_use]; + tx_buffer->bytecount = len; + tx_buffer->gso_segs = 1; + tx_buffer->protocol = 0; + + i = ring->next_to_use; + tx_desc = IXGBE_TX_DESC(ring, i); + + dma_unmap_len_set(tx_buffer, len, len); + dma_unmap_addr_set(tx_buffer, dma, dma); + tx_buffer->data = xdp->data; + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + /* put descriptor type bits */ + cmd_type = IXGBE_ADVTXD_DTYP_DATA | + IXGBE_ADVTXD_DCMD_DEXT | + IXGBE_ADVTXD_DCMD_IFCS; + cmd_type |= len | IXGBE_TXD_CMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + tx_desc->read.olinfo_status = + cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT); + + /* Avoid any potential race with xdp_xmit and cleanup */ + smp_wmb(); + + /* set next_to_watch value indicating a packet is present */ + i++; + if (i == ring->count) + i = 0; + + tx_buffer->next_to_watch = tx_desc; + ring->next_to_use = i; + + return IXGBE_XDP_TX; +} + netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring) @@ -8313,6 +8582,23 @@ static void ixgbe_netpoll(struct net_device *netdev) #endif +static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats, + struct ixgbe_ring *ring) +{ + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } +} + static void ixgbe_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { @@ -8338,18 +8624,13 @@ static void ixgbe_get_stats64(struct net_device *netdev, for (i = 0; i < adapter->num_tx_queues; i++) { struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]); - u64 bytes, packets; - unsigned int start; - if (ring) { - do { - start = u64_stats_fetch_begin_irq(&ring->syncp); - packets = ring->stats.packets; - bytes = ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); - stats->tx_packets += packets; - stats->tx_bytes += bytes; - } + ixgbe_get_ring_stats64(stats, ring); + } + for (i = 0; i < adapter->num_xdp_queues; i++) { + struct ixgbe_ring *ring = ACCESS_ONCE(adapter->xdp_ring[i]); + + ixgbe_get_ring_stats64(stats, ring); } rcu_read_unlock(); @@ -9468,6 +9749,68 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, return features; } +static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) +{ + int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct bpf_prog *old_prog; + + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + return -EINVAL; + + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) + return -EINVAL; + + /* verify ixgbe ring attributes are sufficient for XDP */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ixgbe_ring *ring = adapter->rx_ring[i]; + + if (ring_is_rsc_enabled(ring)) + return -EINVAL; + + if (frame_size > ixgbe_rx_bufsz(ring)) + return -EINVAL; + } + + if (nr_cpu_ids > MAX_XDP_QUEUES) + return -ENOMEM; + + old_prog = xchg(&adapter->xdp_prog, prog); + + /* If transitioning XDP modes reconfigure rings */ + if (!!prog != !!old_prog) { + int err = ixgbe_setup_tc(dev, netdev_get_num_tc(dev)); + + if (err) { + rcu_assign_pointer(adapter->xdp_prog, old_prog); + return -EINVAL; + } + } else { + for (i = 0; i < adapter->num_rx_queues; i++) + xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog); + } + + if (old_prog) + bpf_prog_put(old_prog); + + return 0; +} + +static int ixgbe_xdp(struct net_device *dev, struct netdev_xdp *xdp) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + + switch (xdp->command) { + case XDP_SETUP_PROG: + return ixgbe_xdp_setup(dev, xdp->prog); + case XDP_QUERY_PROG: + xdp->prog_attached = !!(adapter->xdp_prog); + return 0; + default: + return -EINVAL; + } +} + static const struct net_device_ops ixgbe_netdev_ops = { .ndo_open = ixgbe_open, .ndo_stop = ixgbe_close, @@ -9513,6 +9856,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port, .ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port, .ndo_features_check = ixgbe_features_check, + .ndo_xdp = ixgbe_xdp, }; /** @@ -9943,6 +10287,9 @@ skip_sriov: if (err) goto err_sw_init; + for (i = 0; i < adapter->num_xdp_queues; i++) + u64_stats_init(&adapter->xdp_ring[i]->syncp); + /* WOL not supported for all devices */ adapter->wol = 0; hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap); @@ -10068,6 +10415,7 @@ err_sw_init: iounmap(adapter->io_addr); kfree(adapter->jump_tables[0]); kfree(adapter->mac_table); + kfree(adapter->rss_key); err_ioremap: disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); free_netdev(netdev); @@ -10152,6 +10500,7 @@ static void ixgbe_remove(struct pci_dev *pdev) } kfree(adapter->mac_table); + kfree(adapter->rss_key); disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); free_netdev(netdev); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 102ca937ddb4..8baf298a8516 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -677,58 +677,6 @@ update_vlvfb: } } -static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; - u8 num_tcs = netdev_get_num_tc(adapter->netdev); - - /* remove VLAN filters beloning to this VF */ - ixgbe_clear_vf_vlans(adapter, vf); - - /* add back PF assigned VLAN or VLAN 0 */ - ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf); - - /* reset offloads to defaults */ - ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan); - - /* set outgoing tags for VFs */ - if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) { - ixgbe_clear_vmvir(adapter, vf); - } else { - if (vfinfo->pf_qos || !num_tcs) - ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, - vfinfo->pf_qos, vf); - else - ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, - adapter->default_up, vf); - - if (vfinfo->spoofchk_enabled) - hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); - } - - /* reset multicast table array for vf */ - adapter->vfinfo[vf].num_vf_mc_hashes = 0; - - /* Flush and reset the mta with the new values */ - ixgbe_set_rx_mode(adapter->netdev); - - ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); - - /* reset VF api back to unknown */ - adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10; -} - -static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, - int vf, unsigned char *mac_addr) -{ - ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); - memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN); - ixgbe_add_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); - - return 0; -} - static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, int vf, int index, unsigned char *mac_addr) { @@ -784,6 +732,59 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, return 0; } +static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + u8 num_tcs = netdev_get_num_tc(adapter->netdev); + + /* remove VLAN filters beloning to this VF */ + ixgbe_clear_vf_vlans(adapter, vf); + + /* add back PF assigned VLAN or VLAN 0 */ + ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf); + + /* reset offloads to defaults */ + ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan); + + /* set outgoing tags for VFs */ + if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) { + ixgbe_clear_vmvir(adapter, vf); + } else { + if (vfinfo->pf_qos || !num_tcs) + ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, + vfinfo->pf_qos, vf); + else + ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, + adapter->default_up, vf); + + if (vfinfo->spoofchk_enabled) + hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); + } + + /* reset multicast table array for vf */ + adapter->vfinfo[vf].num_vf_mc_hashes = 0; + + /* Flush and reset the mta with the new values */ + ixgbe_set_rx_mode(adapter->netdev); + + ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); + ixgbe_set_vf_macvlan(adapter, vf, 0, NULL); + + /* reset VF api back to unknown */ + adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10; +} + +static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, + int vf, unsigned char *mac_addr) +{ + ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); + memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN); + ixgbe_add_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); + + return 0; +} + int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) { struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); @@ -1112,7 +1113,7 @@ static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter, return -EOPNOTSUPP; } - memcpy(rss_key, adapter->rss_key, sizeof(adapter->rss_key)); + memcpy(rss_key, adapter->rss_key, IXGBE_RSS_KEY_SIZE); return 0; } @@ -1346,18 +1347,26 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter) int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs)) + + if (vf >= adapter->num_vfs) + return -EINVAL; + + if (is_zero_ether_addr(mac)) { + adapter->vfinfo[vf].pf_set_mac = false; + dev_info(&adapter->pdev->dev, "removing MAC on VF %d\n", vf); + } else if (is_valid_ether_addr(mac)) { + adapter->vfinfo[vf].pf_set_mac = true; + dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", + mac, vf); + dev_info(&adapter->pdev->dev, "Reload the VF driver to make this change effective."); + if (test_bit(__IXGBE_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, "The VF MAC address has been set, but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, "Bring the PF device up before attempting to use the VF device.\n"); + } + } else { return -EINVAL; - adapter->vfinfo[vf].pf_set_mac = true; - dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); - dev_info(&adapter->pdev->dev, "Reload the VF driver to make this" - " change effective."); - if (test_bit(__IXGBE_DOWN, &adapter->state)) { - dev_warn(&adapter->pdev->dev, "The VF MAC address has been set," - " but the PF device is not up.\n"); - dev_warn(&adapter->pdev->dev, "Bring the PF device up before" - " attempting to use the VF device.\n"); } + return ixgbe_set_vf_mac(adapter, vf, mac); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 2f06e4d9208d..9c2460c5ef1b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -3128,6 +3128,7 @@ enum ixgbe_phy_type { ixgbe_phy_x550em_kx4, ixgbe_phy_x550em_xfi, ixgbe_phy_x550em_ext_t, + ixgbe_phy_ext_1g_t, ixgbe_phy_cu_unknown, ixgbe_phy_qt, ixgbe_phy_xaui, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index 84a467a8ed3d..6ea0d6a5fb90 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -95,6 +95,7 @@ s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) { s32 status; u32 ctrl, i; + u32 swfw_mask = hw->phy.phy_semaphore_mask; /* Call adapter stop to disable tx/rx and clear interrupts */ status = hw->mac.ops.stop_adapter(hw); @@ -105,10 +106,17 @@ s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) ixgbe_clear_tx_pending(hw); mac_reset_top: + status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); + if (status) { + hw_dbg(hw, "semaphore failed with %d", status); + return IXGBE_ERR_SWFW_SYNC; + } + ctrl = IXGBE_CTRL_RST; ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_FLUSH(hw); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); usleep_range(1000, 1200); /* Poll for reset bit to self-clear indicating reset is complete */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 2658394599e4..2ba024b575ea 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -49,6 +49,18 @@ static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw) return 0; } +static s32 ixgbe_get_invariants_X550_x_fw(struct ixgbe_hw *hw) +{ + struct ixgbe_phy_info *phy = &hw->phy; + + /* Start with X540 invariants, since so similar */ + ixgbe_get_invariants_X540(hw); + + phy->ops.set_phy_power = NULL; + + return 0; +} + static s32 ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; @@ -334,9 +346,11 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) else hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; /* Fallthrough */ - case IXGBE_DEV_ID_X550EM_X_1G_T: case IXGBE_DEV_ID_X550EM_X_10G_T: return ixgbe_identify_phy_generic(hw); + case IXGBE_DEV_ID_X550EM_X_1G_T: + hw->phy.type = ixgbe_phy_ext_1g_t; + break; case IXGBE_DEV_ID_X550EM_A_1G_T: case IXGBE_DEV_ID_X550EM_A_1G_T_L: hw->phy.type = ixgbe_phy_fw; @@ -2158,6 +2172,8 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) ixgbe_set_soft_rate_select_speed; break; case ixgbe_media_type_copper: + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) + break; mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; mac->ops.setup_fc = ixgbe_setup_fc_generic; mac->ops.check_link = ixgbe_check_link_t_X550em; @@ -2238,6 +2254,7 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_10GB_FULL; break; + case ixgbe_phy_ext_1g_t: case ixgbe_phy_sgmii: *speed = IXGBE_LINK_SPEED_1GB_FULL; break; @@ -3185,6 +3202,11 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) phy->ops.setup_link = ixgbe_setup_fw_link; phy->ops.reset = ixgbe_reset_phy_fw; break; + case ixgbe_phy_ext_1g_t: + phy->ops.setup_link = NULL; + phy->ops.read_reg = NULL; + phy->ops.write_reg = NULL; + break; default: break; } @@ -3318,6 +3340,7 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) u32 ctrl = 0; u32 i; bool link_up = false; + u32 swfw_mask = hw->phy.phy_semaphore_mask; /* Call adapter stop to disable Tx/Rx and clear interrupts */ status = hw->mac.ops.stop_adapter(hw); @@ -3363,9 +3386,16 @@ mac_reset_top: ctrl = IXGBE_CTRL_RST; } + status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); + if (status) { + hw_dbg(hw, "semaphore failed with %d", status); + return IXGBE_ERR_SWFW_SYNC; + } + ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_FLUSH(hw); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); usleep_range(1000, 1200); /* Poll for reset bit to self-clear meaning reset is complete */ @@ -3880,6 +3910,17 @@ static const struct ixgbe_phy_operations phy_ops_X550EM_x = { .write_reg = &ixgbe_write_phy_reg_generic, }; +static const struct ixgbe_phy_operations phy_ops_x550em_x_fw = { + X550_COMMON_PHY + .check_overtemp = NULL, + .init = ixgbe_init_phy_ops_X550em, + .identify = ixgbe_identify_phy_x550em, + .read_reg = NULL, + .write_reg = NULL, + .read_reg_mdi = NULL, + .write_reg_mdi = NULL, +}; + static const struct ixgbe_phy_operations phy_ops_x550em_a = { X550_COMMON_PHY .check_overtemp = &ixgbe_tn_check_overtemp, @@ -3942,6 +3983,16 @@ const struct ixgbe_info ixgbe_X550EM_x_info = { .link_ops = &link_ops_x550em_x, }; +const struct ixgbe_info ixgbe_x550em_x_fw_info = { + .mac = ixgbe_mac_X550EM_x, + .get_invariants = ixgbe_get_invariants_X550_x_fw, + .mac_ops = &mac_ops_X550EM_x, + .eeprom_ops = &eeprom_ops_X550EM_x, + .phy_ops = &phy_ops_x550em_x_fw, + .mbx_ops = &mbx_ops_generic, + .mvals = ixgbe_mvals_X550EM_x, +}; + const struct ixgbe_info ixgbe_x550em_a_info = { .mac = ixgbe_mac_x550em_a, .get_invariants = &ixgbe_get_invariants_X550_a, diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index 6bf740945260..ff9d05f308ee 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -80,7 +80,7 @@ static struct ixgbe_stats ixgbevf_gstrings_stats[] = { #define IXGBEVF_QUEUE_STATS_LEN ( \ (((struct ixgbevf_adapter *)netdev_priv(netdev))->num_tx_queues + \ ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_rx_queues) * \ - (sizeof(struct ixgbe_stats) / sizeof(u64))) + (sizeof(struct ixgbevf_stats) / sizeof(u64))) #define IXGBEVF_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbevf_gstrings_stats) #define IXGBEVF_STATS_LEN (IXGBEVF_GLOBAL_STATS_LEN + IXGBEVF_QUEUE_STATS_LEN) @@ -855,7 +855,8 @@ static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) { if (key) - memcpy(key, adapter->rss_key, sizeof(adapter->rss_key)); + memcpy(key, adapter->rss_key, + ixgbevf_get_rxfh_key_size(netdev)); if (indir) { int i; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index a8cbc2dda0dd..581f44bbd7b3 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -319,7 +319,7 @@ struct ixgbevf_adapter { spinlock_t mbx_lock; unsigned long last_reset; - u32 rss_key[IXGBEVF_VFRSSRK_REGS]; + u32 *rss_key; u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE]; }; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 80bab261a0ec..eee29bddddc1 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1660,6 +1660,28 @@ static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, reg_idx); } +/** + * ixgbevf_init_rss_key - Initialize adapter RSS key + * @adapter: device handle + * + * Allocates and initializes the RSS key if it is not allocated. + **/ +static inline int ixgbevf_init_rss_key(struct ixgbevf_adapter *adapter) +{ + u32 *rss_key; + + if (!adapter->rss_key) { + rss_key = kzalloc(IXGBEVF_RSS_HASH_KEY_SIZE, GFP_KERNEL); + if (unlikely(!rss_key)) + return -ENOMEM; + + netdev_rss_key_fill(rss_key, IXGBEVF_RSS_HASH_KEY_SIZE); + adapter->rss_key = rss_key; + } + + return 0; +} + static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; @@ -1668,9 +1690,8 @@ static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter) u8 i, j; /* Fill out hash function seeds */ - netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key)); for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++) - IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), adapter->rss_key[i]); + IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), *(adapter->rss_key + i)); for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) { if (j == rss_i) @@ -2611,6 +2632,12 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) hw->mbx.ops.init_params(hw); + if (hw->mac.type >= ixgbe_mac_X550_vf) { + err = ixgbevf_init_rss_key(adapter); + if (err) + goto out; + } + /* assume legacy case in which PF would only give VF 2 queues */ hw->mac.max_tx_queues = 2; hw->mac.max_rx_queues = 2; @@ -4127,6 +4154,7 @@ err_register: err_sw_init: ixgbevf_reset_interrupt_capability(adapter); iounmap(adapter->io_addr); + kfree(adapter->rss_key); err_ioremap: disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state); free_netdev(netdev); @@ -4173,6 +4201,7 @@ static void ixgbevf_remove(struct pci_dev *pdev) hw_dbg(&adapter->hw, "Remove complete\n"); + kfree(adapter->rss_key); disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state); free_netdev(netdev); diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index 8a5db9d7219d..b6d0c01eab10 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -333,7 +333,7 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues) switch (hw->api_version) { case ixgbe_mbox_api_13: case ixgbe_mbox_api_12: - if (hw->mac.type >= ixgbe_mac_X550_vf) + if (hw->mac.type < ixgbe_mac_X550_vf) break; default: return -EOPNOTSUPP; @@ -399,7 +399,7 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key) switch (hw->api_version) { case ixgbe_mbox_api_13: case ixgbe_mbox_api_12: - if (hw->mac.type >= ixgbe_mac_X550_vf) + if (hw->mac.type < ixgbe_mac_X550_vf) break; default: return -EOPNOTSUPP; @@ -419,7 +419,7 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key) msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; /* If the operation has been refused by a PF return -EPERM */ - if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK)) + if (msgbuf[0] == (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_NACK)) return -EPERM; /* If we didn't get an ACK there must have been diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 8a8f1396de66..c07191cb7631 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -474,6 +474,11 @@ struct qed_hwfn { struct qed_ptt *p_main_ptt; struct qed_ptt *p_dpc_ptt; + /* PTP will be used only by the leading function. + * Usage of all PTP-apis should be synchronized as result. + */ + struct qed_ptt *p_ptp_ptt; + struct qed_sb_sp_info *p_sp_sb; struct qed_sb_attn_info *p_sb_attn; @@ -532,8 +537,6 @@ struct qed_hwfn { struct qed_ptt *p_arfs_ptt; - /* p_ptp_ptt is valid for leading HWFN only */ - struct qed_ptt *p_ptp_ptt; struct qed_simd_fp_handler simd_proto_handler[64]; #ifdef CONFIG_QED_SRIOV @@ -767,6 +770,7 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); int qed_device_num_engines(struct qed_dev *cdev); +int qed_device_get_port_id(struct qed_dev *cdev); #define QED_LEADING_HWFN(dev) (&dev->hwfns[0]) diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index ea7931b85879..aa1a4d5c864c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -2347,9 +2347,6 @@ static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn) return 0; } -#define QED_RESC_ALLOC_LOCK_RETRY_CNT 10 -#define QED_RESC_ALLOC_LOCK_RETRY_INTVL_US 10000 /* 10 msec */ - static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_resc_unlock_params resc_unlock_params; @@ -2366,13 +2363,8 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) * needed, and proceed to the queries. Other failures, including a * failure to acquire the lock, will cause this function to fail. */ - memset(&resc_lock_params, 0, sizeof(resc_lock_params)); - resc_lock_params.resource = QED_RESC_LOCK_RESC_ALLOC; - resc_lock_params.retry_num = QED_RESC_ALLOC_LOCK_RETRY_CNT; - resc_lock_params.retry_interval = QED_RESC_ALLOC_LOCK_RETRY_INTVL_US; - resc_lock_params.sleep_b4_retry = true; - memset(&resc_unlock_params, 0, sizeof(resc_unlock_params)); - resc_unlock_params.resource = QED_RESC_LOCK_RESC_ALLOC; + qed_mcp_resc_lock_default_init(&resc_lock_params, &resc_unlock_params, + QED_RESC_LOCK_RESC_ALLOC, false); rc = qed_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params); if (rc && rc != -EINVAL) { @@ -4072,3 +4064,17 @@ int qed_device_num_engines(struct qed_dev *cdev) { return QED_IS_BB(cdev) ? 2 : 1; } + +static int qed_device_num_ports(struct qed_dev *cdev) +{ + /* in CMT always only one port */ + if (cdev->num_hwfns > 1) + return 1; + + return cdev->num_ports_in_engines * qed_device_num_engines(cdev); +} + +int qed_device_get_port_id(struct qed_dev *cdev) +{ + return (QED_LEADING_HWFN(cdev)->abs_pf_id) % qed_device_num_ports(cdev); +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index a919260b68f2..8a5a0649fc4a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -956,13 +956,6 @@ static int qed_slowpath_start(struct qed_dev *cdev, } } #endif - p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); - if (p_ptt) { - QED_LEADING_HWFN(cdev)->p_ptp_ptt = p_ptt; - } else { - DP_NOTICE(cdev, "Failed to acquire PTT for PTP\n"); - goto err; - } } cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS; @@ -1076,9 +1069,6 @@ err: qed_ptt_release(QED_LEADING_HWFN(cdev), QED_LEADING_HWFN(cdev)->p_arfs_ptt); #endif - if (IS_PF(cdev) && QED_LEADING_HWFN(cdev)->p_ptp_ptt) - qed_ptt_release(QED_LEADING_HWFN(cdev), - QED_LEADING_HWFN(cdev)->p_ptp_ptt); qed_iov_wq_stop(cdev, false); @@ -1098,8 +1088,6 @@ static int qed_slowpath_stop(struct qed_dev *cdev) qed_ptt_release(QED_LEADING_HWFN(cdev), QED_LEADING_HWFN(cdev)->p_arfs_ptt); #endif - qed_ptt_release(QED_LEADING_HWFN(cdev), - QED_LEADING_HWFN(cdev)->p_ptp_ptt); qed_free_stream_mem(cdev); if (IS_QED_ETH_IF(cdev)) qed_sriov_disable(cdev, true); diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index ff6080df2246..7266b36a2655 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -2615,3 +2615,33 @@ qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn, return 0; } + +void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock, + struct qed_resc_unlock_params *p_unlock, + enum qed_resc_lock + resource, bool b_is_permanent) +{ + if (p_lock) { + memset(p_lock, 0, sizeof(*p_lock)); + + /* Permanent resources don't require aging, and there's no + * point in trying to acquire them more than once since it's + * unexpected another entity would release them. + */ + if (b_is_permanent) { + p_lock->timeout = QED_MCP_RESC_LOCK_TO_NONE; + } else { + p_lock->retry_num = QED_MCP_RESC_LOCK_RETRY_CNT_DFLT; + p_lock->retry_interval = + QED_MCP_RESC_LOCK_RETRY_VAL_DFLT; + p_lock->sleep_b4_retry = true; + } + + p_lock->resource = resource; + } + + if (p_unlock) { + memset(p_unlock, 0, sizeof(*p_unlock)); + p_unlock->resource = resource; + } +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index ac7d406be1ed..5ae35d6cc7d1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -795,7 +795,12 @@ int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn, enum qed_resc_lock { QED_RESC_LOCK_DBG_DUMP = QED_MCP_RESC_LOCK_MIN_VAL, - QED_RESC_LOCK_RESC_ALLOC = QED_MCP_RESC_LOCK_MAX_VAL + QED_RESC_LOCK_PTP_PORT0, + QED_RESC_LOCK_PTP_PORT1, + QED_RESC_LOCK_PTP_PORT2, + QED_RESC_LOCK_PTP_PORT3, + QED_RESC_LOCK_RESC_ALLOC = QED_MCP_RESC_LOCK_MAX_VAL, + QED_RESC_LOCK_RESC_INVALID }; /** @@ -818,9 +823,11 @@ struct qed_resc_lock_params { /* Number of times to retry locking */ u8 retry_num; +#define QED_MCP_RESC_LOCK_RETRY_CNT_DFLT 10 /* The interval in usec between retries */ u16 retry_interval; +#define QED_MCP_RESC_LOCK_RETRY_VAL_DFLT 10000 /* Use sleep or delay between retries */ bool sleep_b4_retry; @@ -872,4 +879,17 @@ qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_resc_unlock_params *p_params); +/** + * @brief - default initialization for lock/unlock resource structs + * + * @param p_lock - lock params struct to be initialized; Can be NULL + * @param p_unlock - unlock params struct to be initialized; Can be NULL + * @param resource - the requested resource + * @paral b_is_permanent - disable retries & aging when set + */ +void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock, + struct qed_resc_unlock_params *p_unlock, + enum qed_resc_lock + resource, bool b_is_permanent); + #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c index 80c9c0b172dd..1871ebfdb793 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ptp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c @@ -34,7 +34,7 @@ #include "qed_dev_api.h" #include "qed_hw.h" #include "qed_l2.h" -#include "qed_ptp.h" +#include "qed_mcp.h" #include "qed_reg_addr.h" /* 16 nano second time quantas to wait before making a Drift adjustment */ @@ -45,6 +45,82 @@ #define QED_DRIFT_CNTR_DIRECTION_SHIFT 31 #define QED_TIMESTAMP_MASK BIT(16) +static enum qed_resc_lock qed_ptcdev_to_resc(struct qed_hwfn *p_hwfn) +{ + switch (qed_device_get_port_id(p_hwfn->cdev)) { + case 0: + return QED_RESC_LOCK_PTP_PORT0; + case 1: + return QED_RESC_LOCK_PTP_PORT1; + case 2: + return QED_RESC_LOCK_PTP_PORT2; + case 3: + return QED_RESC_LOCK_PTP_PORT3; + default: + return QED_RESC_LOCK_RESC_INVALID; + } +} + +static int qed_ptp_res_lock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_resc_lock_params params; + enum qed_resc_lock resource; + int rc; + + resource = qed_ptcdev_to_resc(p_hwfn); + if (resource == QED_RESC_LOCK_RESC_INVALID) + return -EINVAL; + + qed_mcp_resc_lock_default_init(¶ms, NULL, resource, true); + + rc = qed_mcp_resc_lock(p_hwfn, p_ptt, ¶ms); + if (rc && rc != -EINVAL) { + return rc; + } else if (rc == -EINVAL) { + /* MFW doesn't support resource locking, first PF on the port + * has lock ownership. + */ + if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engines) + return 0; + + DP_INFO(p_hwfn, "PF doesn't have lock ownership\n"); + return -EBUSY; + } else if (!rc && !params.b_granted) { + DP_INFO(p_hwfn, "Failed to acquire ptp resource lock\n"); + return -EBUSY; + } + + return rc; +} + +static int qed_ptp_res_unlock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_resc_unlock_params params; + enum qed_resc_lock resource; + int rc; + + resource = qed_ptcdev_to_resc(p_hwfn); + if (resource == QED_RESC_LOCK_RESC_INVALID) + return -EINVAL; + + qed_mcp_resc_lock_default_init(NULL, ¶ms, resource, true); + + rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, ¶ms); + if (rc == -EINVAL) { + /* MFW doesn't support locking, first PF has lock ownership */ + if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engines) { + rc = 0; + } else { + DP_INFO(p_hwfn, "PF doesn't have lock ownership\n"); + return -EINVAL; + } + } else if (rc) { + DP_INFO(p_hwfn, "Failed to release the ptp resource lock\n"); + } + + return rc; +} + /* Read Rx timestamp */ static int qed_ptp_hw_read_rx_ts(struct qed_dev *cdev, u64 *timestamp) { @@ -248,7 +324,25 @@ static int qed_ptp_hw_adjfreq(struct qed_dev *cdev, s32 ppb) static int qed_ptp_hw_enable(struct qed_dev *cdev) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); - struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; + struct qed_ptt *p_ptt; + int rc; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) { + DP_NOTICE(p_hwfn, "Failed to acquire PTT for PTP\n"); + return -EBUSY; + } + + p_hwfn->p_ptp_ptt = p_ptt; + + rc = qed_ptp_res_lock(p_hwfn, p_ptt); + if (rc) { + DP_INFO(p_hwfn, + "Couldn't acquire the resource lock, skip ptp enable for this PF\n"); + qed_ptt_release(p_hwfn, p_ptt); + p_hwfn->p_ptp_ptt = NULL; + return rc; + } /* Reset PTP event detection rules - will be configured in the IOCTL */ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF); @@ -305,6 +399,8 @@ static int qed_ptp_hw_disable(struct qed_dev *cdev) struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; + qed_ptp_res_unlock(p_hwfn, p_ptt); + /* Reset PTP event detection rules */ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF); qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF); @@ -316,6 +412,9 @@ static int qed_ptp_hw_disable(struct qed_dev *cdev) qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 0x0); qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0); + qed_ptt_release(p_hwfn, p_ptt); + p_hwfn->p_ptp_ptt = NULL; + return 0; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.h b/drivers/net/ethernet/qlogic/qed/qed_ptp.h deleted file mode 100644 index 63c666d0b739..000000000000 --- a/drivers/net/ethernet/qlogic/qed/qed_ptp.h +++ /dev/null @@ -1,47 +0,0 @@ -/* QLogic qed NIC Driver - * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef _QED_PTP_H -#define _QED_PTP_H -#include <linux/types.h> - -int qed_ptp_hwtstamp_tx_on(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); -int qed_ptp_cfg_rx_filters(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - enum qed_ptp_filter_type type); -int qed_ptp_read_rx_ts(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u64 *ts); -int qed_ptp_read_tx_ts(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u64 *ts); -int qed_ptp_read_cc(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u64 *cycles); -int qed_ptp_adjfreq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, s32 ppb); -int qed_ptp_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); -int qed_ptp_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); - -#endif diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 292e2dc3f8ae..b9ba23d71c61 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -907,13 +907,8 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION); /* PTP not supported on VFs */ - if (!is_vf) { - rc = qede_ptp_register_phc(edev); - if (rc) { - DP_NOTICE(edev, "Cannot register PHC\n"); - goto err5; - } - } + if (!is_vf) + qede_ptp_enable(edev, true); edev->ops->register_ops(cdev, &qede_ll_ops, edev); @@ -928,8 +923,6 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, return 0; -err5: - unregister_netdev(edev->ndev); err4: qede_roce_dev_remove(edev); err3: @@ -980,7 +973,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) unregister_netdev(ndev); cancel_delayed_work_sync(&edev->sp_task); - qede_ptp_remove(edev); + qede_ptp_disable(edev); qede_roce_dev_remove(edev); @@ -1877,8 +1870,6 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, qede_roce_dev_event_close(edev); edev->state = QEDE_STATE_CLOSED; - qede_ptp_stop(edev); - /* Close OS Tx */ netif_tx_disable(edev->ndev); netif_carrier_off(edev->ndev); @@ -1987,13 +1978,10 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, qede_roce_dev_event_open(edev); - qede_ptp_start(edev, (mode == QEDE_LOAD_NORMAL)); - edev->state = QEDE_STATE_OPEN; DP_INFO(edev, "Ending successfully qede load\n"); - goto out; err4: qede_sync_free_irqs(edev); diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c index 2e62dec09bd7..6396363a804e 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c @@ -206,21 +206,6 @@ static u64 qede_ptp_read_cc(const struct cyclecounter *cc) return phc_cycles; } -static void qede_ptp_init_cc(struct qede_dev *edev) -{ - struct qede_ptp *ptp; - - ptp = edev->ptp; - if (!ptp) - return; - - memset(&ptp->cc, 0, sizeof(ptp->cc)); - ptp->cc.read = qede_ptp_read_cc; - ptp->cc.mask = CYCLECOUNTER_MASK(64); - ptp->cc.shift = 0; - ptp->cc.mult = 1; -} - static int qede_ptp_cfg_filters(struct qede_dev *edev) { struct qede_ptp *ptp = edev->ptp; @@ -324,61 +309,6 @@ int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr) sizeof(config)) ? -EFAULT : 0; } -/* Called during load, to initialize PTP-related stuff */ -static void qede_ptp_init(struct qede_dev *edev, bool init_tc) -{ - struct qede_ptp *ptp; - int rc; - - ptp = edev->ptp; - if (!ptp) - return; - - spin_lock_init(&ptp->lock); - - /* Configure PTP in HW */ - rc = ptp->ops->enable(edev->cdev); - if (rc) { - DP_ERR(edev, "Stopping PTP initialization\n"); - return; - } - - /* Init work queue for Tx timestamping */ - INIT_WORK(&ptp->work, qede_ptp_task); - - /* Init cyclecounter and timecounter. This is done only in the first - * load. If done in every load, PTP application will fail when doing - * unload / load (e.g. MTU change) while it is running. - */ - if (init_tc) { - qede_ptp_init_cc(edev); - timecounter_init(&ptp->tc, &ptp->cc, - ktime_to_ns(ktime_get_real())); - } - - DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP initialization is successful\n"); -} - -void qede_ptp_start(struct qede_dev *edev, bool init_tc) -{ - qede_ptp_init(edev, init_tc); - qede_ptp_cfg_filters(edev); -} - -void qede_ptp_remove(struct qede_dev *edev) -{ - struct qede_ptp *ptp; - - ptp = edev->ptp; - if (ptp && ptp->clock) { - ptp_clock_unregister(ptp->clock); - ptp->clock = NULL; - } - - kfree(ptp); - edev->ptp = NULL; -} - int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info) { struct qede_ptp *ptp = edev->ptp; @@ -417,8 +347,7 @@ int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info) return 0; } -/* Called during unload, to stop PTP-related stuff */ -void qede_ptp_stop(struct qede_dev *edev) +void qede_ptp_disable(struct qede_dev *edev) { struct qede_ptp *ptp; @@ -426,6 +355,11 @@ void qede_ptp_stop(struct qede_dev *edev) if (!ptp) return; + if (ptp->clock) { + ptp_clock_unregister(ptp->clock); + ptp->clock = NULL; + } + /* Cancel PTP work queue. Should be done after the Tx queues are * drained to prevent additional scheduling. */ @@ -439,11 +373,54 @@ void qede_ptp_stop(struct qede_dev *edev) spin_lock_bh(&ptp->lock); ptp->ops->disable(edev->cdev); spin_unlock_bh(&ptp->lock); + + kfree(ptp); + edev->ptp = NULL; } -int qede_ptp_register_phc(struct qede_dev *edev) +static int qede_ptp_init(struct qede_dev *edev, bool init_tc) { struct qede_ptp *ptp; + int rc; + + ptp = edev->ptp; + if (!ptp) + return -EINVAL; + + spin_lock_init(&ptp->lock); + + /* Configure PTP in HW */ + rc = ptp->ops->enable(edev->cdev); + if (rc) { + DP_INFO(edev, "PTP HW enable failed\n"); + return rc; + } + + /* Init work queue for Tx timestamping */ + INIT_WORK(&ptp->work, qede_ptp_task); + + /* Init cyclecounter and timecounter. This is done only in the first + * load. If done in every load, PTP application will fail when doing + * unload / load (e.g. MTU change) while it is running. + */ + if (init_tc) { + memset(&ptp->cc, 0, sizeof(ptp->cc)); + ptp->cc.read = qede_ptp_read_cc; + ptp->cc.mask = CYCLECOUNTER_MASK(64); + ptp->cc.shift = 0; + ptp->cc.mult = 1; + + timecounter_init(&ptp->tc, &ptp->cc, + ktime_to_ns(ktime_get_real())); + } + + return rc; +} + +int qede_ptp_enable(struct qede_dev *edev, bool init_tc) +{ + struct qede_ptp *ptp; + int rc; ptp = kzalloc(sizeof(*ptp), GFP_KERNEL); if (!ptp) { @@ -454,14 +431,19 @@ int qede_ptp_register_phc(struct qede_dev *edev) ptp->edev = edev; ptp->ops = edev->ops->ptp; if (!ptp->ops) { - kfree(ptp); - edev->ptp = NULL; - DP_ERR(edev, "PTP clock registeration failed\n"); - return -EIO; + DP_INFO(edev, "PTP enable failed\n"); + rc = -EIO; + goto err1; } edev->ptp = ptp; + rc = qede_ptp_init(edev, init_tc); + if (rc) + goto err1; + + qede_ptp_cfg_filters(edev); + /* Fill the ptp_clock_info struct and register PTP clock */ ptp->clock_info.owner = THIS_MODULE; snprintf(ptp->clock_info.name, 16, "%s", edev->ndev->name); @@ -478,13 +460,21 @@ int qede_ptp_register_phc(struct qede_dev *edev) ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev); if (IS_ERR(ptp->clock)) { - ptp->clock = NULL; - kfree(ptp); - edev->ptp = NULL; + rc = -EINVAL; DP_ERR(edev, "PTP clock registeration failed\n"); + goto err2; } return 0; + +err2: + qede_ptp_disable(edev); + ptp->clock = NULL; +err1: + kfree(ptp); + edev->ptp = NULL; + + return rc; } void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb) diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.h b/drivers/net/ethernet/qlogic/qede/qede_ptp.h index f328f9bba53a..691a14c4b2c5 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.h +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.h @@ -40,10 +40,8 @@ void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb); void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb); int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req); -void qede_ptp_start(struct qede_dev *edev, bool init_tc); -void qede_ptp_stop(struct qede_dev *edev); -void qede_ptp_remove(struct qede_dev *edev); -int qede_ptp_register_phc(struct qede_dev *edev); +void qede_ptp_disable(struct qede_dev *edev); +int qede_ptp_enable(struct qede_dev *edev, bool init_tc); int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *ts); static inline void qede_ptp_record_rx_ts(struct qede_dev *edev, diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index aa5d30428bba..ceda5861da78 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -877,6 +877,12 @@ static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev) { int ret; + /* do not allow loopback device to be enslaved to a VRF. + * The vrf device acts as the loopback for the vrf. + */ + if (port_dev == dev_net(dev)->loopback_dev) + return -EOPNOTSUPP; + port_dev->priv_flags |= IFF_L3MDEV_SLAVE; ret = netdev_master_upper_dev_link(port_dev, dev, NULL, NULL); if (ret < 0) diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 3448a3ce5919..84b6067ff6e7 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -632,11 +632,11 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar, sgi = (info3 >> 7) & 1; status->rate_idx = mcs; - status->flag |= RX_FLAG_HT; + status->encoding = RX_ENC_HT; if (sgi) - status->flag |= RX_FLAG_SHORT_GI; + status->enc_flags |= RX_ENC_FLAG_SHORT_GI; if (bw) - status->flag |= RX_FLAG_40MHZ; + status->bw = RATE_INFO_BW_40; break; case HTT_RX_VHT: case HTT_RX_VHT_WITH_TXBF: @@ -689,10 +689,10 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar, } status->rate_idx = mcs; - status->vht_nss = nss; + status->nss = nss; if (sgi) - status->flag |= RX_FLAG_SHORT_GI; + status->enc_flags |= RX_ENC_FLAG_SHORT_GI; switch (bw) { /* 20MHZ */ @@ -700,18 +700,18 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar, break; /* 40MHZ */ case 1: - status->flag |= RX_FLAG_40MHZ; + status->bw = RATE_INFO_BW_40; break; /* 80MHZ */ case 2: - status->vht_flag |= RX_VHT_FLAG_80MHZ; + status->bw = RATE_INFO_BW_80; break; case 3: - status->vht_flag |= RX_VHT_FLAG_160MHZ; + status->bw = RATE_INFO_BW_160; break; } - status->flag |= RX_FLAG_VHT; + status->encoding = RX_ENC_VHT; break; default: break; @@ -874,13 +874,10 @@ static void ath10k_htt_rx_h_ppdu(struct ath10k *ar, /* New PPDU starts so clear out the old per-PPDU status. */ status->freq = 0; status->rate_idx = 0; - status->vht_nss = 0; - status->vht_flag &= ~RX_VHT_FLAG_80MHZ; - status->flag &= ~(RX_FLAG_HT | - RX_FLAG_VHT | - RX_FLAG_SHORT_GI | - RX_FLAG_40MHZ | - RX_FLAG_MACTIME_END); + status->nss = 0; + status->encoding = RX_ENC_LEGACY; + status->bw = RATE_INFO_BW_20; + status->flag &= ~RX_FLAG_MACTIME_END; status->flag |= RX_FLAG_NO_SIGNAL_VAL; ath10k_htt_rx_h_signal(ar, status, rxd); @@ -933,7 +930,7 @@ static void ath10k_process_rx(struct ath10k *ar, *status = *rx_status; ath10k_dbg(ar, ATH10K_DBG_DATA, - "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n", + "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", skb, skb->len, ieee80211_get_SA(hdr), @@ -941,16 +938,15 @@ static void ath10k_process_rx(struct ath10k *ar, is_multicast_ether_addr(ieee80211_get_DA(hdr)) ? "mcast" : "ucast", (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4, - (status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) == 0 ? - "legacy" : "", - status->flag & RX_FLAG_HT ? "ht" : "", - status->flag & RX_FLAG_VHT ? "vht" : "", - status->flag & RX_FLAG_40MHZ ? "40" : "", - status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "", - status->vht_flag & RX_VHT_FLAG_160MHZ ? "160" : "", - status->flag & RX_FLAG_SHORT_GI ? "sgi " : "", + (status->encoding == RX_ENC_LEGACY) ? "legacy" : "", + (status->encoding == RX_ENC_HT) ? "ht" : "", + (status->encoding == RX_ENC_VHT) ? "vht" : "", + (status->bw == RATE_INFO_BW_40) ? "40" : "", + (status->bw == RATE_INFO_BW_80) ? "80" : "", + (status->bw == RATE_INFO_BW_160) ? "160" : "", + status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", status->rate_idx, - status->vht_nss, + status->nss, status->freq, status->band, status->flag, !!(status->flag & RX_FLAG_FAILED_FCS_CRC), diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 92ece64fd455..527afcf39246 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -1414,10 +1414,10 @@ ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb, rxs->flag |= ath5k_rx_decrypted(ah, skb, rs); switch (ah->ah_bwmode) { case AR5K_BWMODE_5MHZ: - rxs->flag |= RX_FLAG_5MHZ; + rxs->bw = RATE_INFO_BW_5; break; case AR5K_BWMODE_10MHZ: - rxs->flag |= RX_FLAG_10MHZ; + rxs->bw = RATE_INFO_BW_10; break; default: break; @@ -1425,7 +1425,7 @@ ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb, if (rs->rs_rate == ah->sbands[ah->curchan->band].bitrates[rxs->rate_idx].hw_value_short) - rxs->flag |= RX_FLAG_SHORTPRE; + rxs->enc_flags |= RX_ENC_FLAG_SHORTPRE; trace_ath5k_rx(ah, skb); diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index 0c118b7c362c..414b5b596efc 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -169,7 +169,7 @@ static void ath6kl_cfg80211_sscan_disable(struct ath6kl_vif *vif) if (!stopped) return; - cfg80211_sched_scan_stopped(ar->wiphy); + cfg80211_sched_scan_stopped(ar->wiphy, 0); } static int ath6kl_set_wpa_version(struct ath6kl_vif *vif, @@ -806,9 +806,15 @@ void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel, WLAN_STATUS_SUCCESS, GFP_KERNEL); cfg80211_put_bss(ar->wiphy, bss); } else if (vif->sme_state == SME_CONNECTED) { + struct cfg80211_roam_info roam_info = { + .bss = bss, + .req_ie = assoc_req_ie, + .req_ie_len = assoc_req_len, + .resp_ie = assoc_resp_ie, + .resp_ie_len = assoc_resp_len, + }; /* inform roam event to cfg80211 */ - cfg80211_roamed_bss(vif->ndev, bss, assoc_req_ie, assoc_req_len, - assoc_resp_ie, assoc_resp_len, GFP_KERNEL); + cfg80211_roamed(vif->ndev, &roam_info, GFP_KERNEL); } } @@ -3352,7 +3358,7 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy, } static int ath6kl_cfg80211_sscan_stop(struct wiphy *wiphy, - struct net_device *dev) + struct net_device *dev, u64 reqid) { struct ath6kl_vif *vif = netdev_priv(dev); bool stopped; @@ -3973,7 +3979,7 @@ int ath6kl_cfg80211_init(struct ath6kl *ar) WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN_V2, ar->fw_capabilities)) - ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; + ar->wiphy->max_sched_scan_reqs = 1; if (test_bit(ATH6KL_FW_CAPABILITY_INACTIVITY_TIMEOUT, ar->fw_capabilities)) diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c index a082de81ec4c..bfc20b45b806 100644 --- a/drivers/net/wireless/ath/ath6kl/wmi.c +++ b/drivers/net/wireless/ath/ath6kl/wmi.c @@ -1082,7 +1082,7 @@ void ath6kl_wmi_sscan_timer(unsigned long ptr) { struct ath6kl_vif *vif = (struct ath6kl_vif *) ptr; - cfg80211_sched_scan_results(vif->ar->wiphy); + cfg80211_sched_scan_results(vif->ar->wiphy, 0); } static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len, diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c index cc5bb0a76baf..68fcbe03bce2 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c @@ -494,7 +494,8 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs, rxs->rs_status = 0; rxs->rs_flags = 0; - rxs->flag = 0; + rxs->enc_flags = 0; + rxs->bw = RATE_INFO_BW_20; rxs->rs_datalen = rxsp->status2 & AR_DataLen; rxs->rs_tstamp = rxsp->status3; @@ -520,8 +521,8 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs, rxs->rs_isaggr = (rxsp->status11 & AR_RxAggr) ? 1 : 0; rxs->rs_moreaggr = (rxsp->status11 & AR_RxMoreAggr) ? 1 : 0; rxs->rs_antenna = (MS(rxsp->status4, AR_RxAntenna) & 0x7); - rxs->flag |= (rxsp->status4 & AR_GI) ? RX_FLAG_SHORT_GI : 0; - rxs->flag |= (rxsp->status4 & AR_2040) ? RX_FLAG_40MHZ : 0; + rxs->enc_flags |= (rxsp->status4 & AR_GI) ? RX_ENC_FLAG_SHORT_GI : 0; + rxs->enc_flags |= (rxsp->status4 & AR_2040) ? RX_ENC_FLAG_40MHZ : 0; rxs->evm0 = rxsp->status6; rxs->evm1 = rxsp->status7; diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c index b80e08b13b74..c67d0e08bd4c 100644 --- a/drivers/net/wireless/ath/ath9k/common.c +++ b/drivers/net/wireless/ath/ath9k/common.c @@ -181,14 +181,15 @@ int ath9k_cmn_process_rate(struct ath_common *common, sband = hw->wiphy->bands[band]; if (IS_CHAN_QUARTER_RATE(ah->curchan)) - rxs->flag |= RX_FLAG_5MHZ; + rxs->bw = RATE_INFO_BW_5; else if (IS_CHAN_HALF_RATE(ah->curchan)) - rxs->flag |= RX_FLAG_10MHZ; + rxs->bw = RATE_INFO_BW_10; if (rx_stats->rs_rate & 0x80) { /* HT rate */ - rxs->flag |= RX_FLAG_HT; - rxs->flag |= rx_stats->flag; + rxs->encoding = RX_ENC_HT; + rxs->enc_flags |= rx_stats->enc_flags; + rxs->bw = rx_stats->bw; rxs->rate_idx = rx_stats->rs_rate & 0x7f; return 0; } @@ -199,7 +200,7 @@ int ath9k_cmn_process_rate(struct ath_common *common, return 0; } if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) { - rxs->flag |= RX_FLAG_SHORTPRE; + rxs->enc_flags |= RX_ENC_FLAG_SHORTPRE; rxs->rate_idx = i; return 0; } diff --git a/drivers/net/wireless/ath/ath9k/debug_sta.c b/drivers/net/wireless/ath/ath9k/debug_sta.c index 524cbf13ca9c..efc692ee67d4 100644 --- a/drivers/net/wireless/ath/ath9k/debug_sta.c +++ b/drivers/net/wireless/ath/ath9k/debug_sta.c @@ -116,12 +116,12 @@ void ath_debug_rate_stats(struct ath_softc *sc, if (rxs->rate_idx >= ARRAY_SIZE(rstats->ht_stats)) goto exit; - if (rxs->flag & RX_FLAG_40MHZ) + if ((rxs->bw == RATE_INFO_BW_40)) rstats->ht_stats[rxs->rate_idx].ht40_cnt++; else rstats->ht_stats[rxs->rate_idx].ht20_cnt++; - if (rxs->flag & RX_FLAG_SHORT_GI) + if (rxs->enc_flags & RX_ENC_FLAG_SHORT_GI) rstats->ht_stats[rxs->rate_idx].sgi_cnt++; else rstats->ht_stats[rxs->rate_idx].lgi_cnt++; @@ -130,7 +130,7 @@ void ath_debug_rate_stats(struct ath_softc *sc, } if (IS_CCK_RATE(rs->rs_rate)) { - if (rxs->flag & RX_FLAG_SHORTPRE) + if (rxs->enc_flags & RX_ENC_FLAG_SHORTPRE) rstats->cck_stats[rxs->rate_idx].cck_sp_cnt++; else rstats->cck_stats[rxs->rate_idx].cck_lp_cnt++; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index f333ef1e3e7b..b38a586ea59a 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -929,11 +929,12 @@ void ath9k_host_rx_init(struct ath9k_htc_priv *priv) static inline void convert_htc_flag(struct ath_rx_status *rx_stats, struct ath_htc_rx_status *rxstatus) { - rx_stats->flag = 0; + rx_stats->enc_flags = 0; + rx_stats->bw = RATE_INFO_BW_20; if (rxstatus->rs_flags & ATH9K_RX_2040) - rx_stats->flag |= RX_FLAG_40MHZ; + rx_stats->bw = RATE_INFO_BW_40; if (rxstatus->rs_flags & ATH9K_RX_GI) - rx_stats->flag |= RX_FLAG_SHORT_GI; + rx_stats->enc_flags |= RX_ENC_FLAG_SHORT_GI; } static void rx_status_htc_to_ath(struct ath_rx_status *rx_stats, diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c index d937c39b3a0b..6128c2bb23d8 100644 --- a/drivers/net/wireless/ath/ath9k/mac.c +++ b/drivers/net/wireless/ath/ath9k/mac.c @@ -535,7 +535,8 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds, rs->rs_status = 0; rs->rs_flags = 0; - rs->flag = 0; + rs->enc_flags = 0; + rs->bw = RATE_INFO_BW_20; rs->rs_datalen = ads.ds_rxstatus1 & AR_DataLen; rs->rs_tstamp = ads.AR_RcvTimestamp; @@ -577,15 +578,15 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds, rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna); /* directly mapped flags for ieee80211_rx_status */ - rs->flag |= - (ads.ds_rxstatus3 & AR_GI) ? RX_FLAG_SHORT_GI : 0; - rs->flag |= - (ads.ds_rxstatus3 & AR_2040) ? RX_FLAG_40MHZ : 0; + rs->enc_flags |= + (ads.ds_rxstatus3 & AR_GI) ? RX_ENC_FLAG_SHORT_GI : 0; + rs->enc_flags |= + (ads.ds_rxstatus3 & AR_2040) ? RX_ENC_FLAG_40MHZ : 0; if (AR_SREV_9280_20_OR_LATER(ah)) - rs->flag |= + rs->enc_flags |= (ads.ds_rxstatus3 & AR_STBC) ? /* we can only Nss=1 STBC */ - (1 << RX_FLAG_STBC_SHIFT) : 0; + (1 << RX_ENC_FLAG_STBC_SHIFT) : 0; if (ads.ds_rxstatus8 & AR_PreDelimCRCErr) rs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE; diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h index 770fc11b41d1..fd6aa49adadf 100644 --- a/drivers/net/wireless/ath/ath9k/mac.h +++ b/drivers/net/wireless/ath/ath9k/mac.h @@ -16,6 +16,7 @@ #ifndef MAC_H #define MAC_H +#include <net/cfg80211.h> #define set11nTries(_series, _index) \ (SM((_series)[_index].Tries, AR_XmitDataTries##_index)) @@ -143,7 +144,8 @@ struct ath_rx_status { u32 evm2; u32 evm3; u32 evm4; - u32 flag; /* see enum mac80211_rx_flags */ + u16 enc_flags; + enum rate_info_bw bw; }; struct ath_htc_rx_status { diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index d79837fe333f..2197aee2bb72 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -1037,11 +1037,11 @@ static void ath_rx_count_airtime(struct ath_softc *sc, rxs = IEEE80211_SKB_RXCB(skb); - is_sgi = !!(rxs->flag & RX_FLAG_SHORT_GI); - is_40 = !!(rxs->flag & RX_FLAG_40MHZ); - is_sp = !!(rxs->flag & RX_FLAG_SHORTPRE); + is_sgi = !!(rxs->enc_flags & RX_ENC_FLAG_SHORT_GI); + is_40 = !!(rxs->bw == RATE_INFO_BW_40); + is_sp = !!(rxs->enc_flags & RX_ENC_FLAG_SHORTPRE); - if (!!(rxs->flag & RX_FLAG_HT)) { + if (!!(rxs->encoding == RX_ENC_HT)) { /* MCS rates */ airtime += ath_pkt_duration(sc, rxs->rate_idx, len, diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c index 0c34c8729dc6..b2166726b05d 100644 --- a/drivers/net/wireless/ath/carl9170/rx.c +++ b/drivers/net/wireless/ath/carl9170/rx.c @@ -358,7 +358,7 @@ static int carl9170_rx_mac_status(struct ar9170 *ar, switch (mac->status & AR9170_RX_STATUS_MODULATION) { case AR9170_RX_STATUS_MODULATION_CCK: if (mac->status & AR9170_RX_STATUS_SHORT_PREAMBLE) - status->flag |= RX_FLAG_SHORTPRE; + status->enc_flags |= RX_ENC_FLAG_SHORTPRE; switch (head->plcp[0]) { case AR9170_RX_PHY_RATE_CCK_1M: status->rate_idx = 0; @@ -423,12 +423,12 @@ static int carl9170_rx_mac_status(struct ar9170 *ar, case AR9170_RX_STATUS_MODULATION_HT: if (head->plcp[3] & 0x80) - status->flag |= RX_FLAG_40MHZ; + status->bw = RATE_INFO_BW_40; if (head->plcp[6] & 0x80) - status->flag |= RX_FLAG_SHORT_GI; + status->enc_flags |= RX_ENC_FLAG_SHORT_GI; status->rate_idx = clamp(0, 75, head->plcp[3] & 0x7f); - status->flag |= RX_FLAG_HT; + status->encoding = RX_ENC_HT; break; default: diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c index 8c387a0a3c09..22304edc5948 100644 --- a/drivers/net/wireless/ath/wcn36xx/txrx.c +++ b/drivers/net/wireless/ath/wcn36xx/txrx.c @@ -68,7 +68,7 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb) RX_FLAG_MMIC_STRIPPED | RX_FLAG_DECRYPTED; - wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%llx\n", status.flag); + wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x\n", status.flag); memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); diff --git a/drivers/net/wireless/broadcom/b43/xmit.c b/drivers/net/wireless/broadcom/b43/xmit.c index b068d5aeee24..1b9c191e2a22 100644 --- a/drivers/net/wireless/broadcom/b43/xmit.c +++ b/drivers/net/wireless/broadcom/b43/xmit.c @@ -694,7 +694,7 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr) if (unlikely(phystat0 & (B43_RX_PHYST0_PLCPHCF | B43_RX_PHYST0_PLCPFV))) status.flag |= RX_FLAG_FAILED_PLCP_CRC; if (phystat0 & B43_RX_PHYST0_SHORTPRMBL) - status.flag |= RX_FLAG_SHORTPRE; + status.enc_flags |= RX_ENC_FLAG_SHORTPRE; if (macstat & B43_RX_MAC_DECERR) { /* Decryption with the given key failed. * Drop the packet. We also won't be able to decrypt it with diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 8c7f1ef288c6..cd1d6730eab7 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -762,7 +762,7 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg, brcmf_dbg(SCAN, "scheduled scan completed\n"); cfg->internal_escan = false; if (!aborted) - cfg80211_sched_scan_results(cfg_to_wiphy(cfg)); + cfg80211_sched_scan_results(cfg_to_wiphy(cfg), 0); } else if (scan_request) { struct cfg80211_scan_info info = { .aborted = aborted, @@ -3372,7 +3372,7 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp, goto free_req; out_err: - cfg80211_sched_scan_stopped(wiphy); + cfg80211_sched_scan_stopped(wiphy, 0); free_req: kfree(request); return err; @@ -3405,7 +3405,7 @@ brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy, } static int brcmf_cfg80211_sched_scan_stop(struct wiphy *wiphy, - struct net_device *ndev) + struct net_device *ndev, u64 reqid) { struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct brcmf_if *ifp = netdev_priv(ndev); @@ -3607,7 +3607,7 @@ static s32 brcmf_cfg80211_resume(struct wiphy *wiphy) cfg->wowl.pre_pmmode); cfg->wowl.active = false; if (cfg->wowl.nd_enabled) { - brcmf_cfg80211_sched_scan_stop(cfg->wiphy, ifp->ndev); + brcmf_cfg80211_sched_scan_stop(cfg->wiphy, ifp->ndev, 0); brcmf_fweh_unregister(cfg->pub, BRCMF_E_PFN_NET_FOUND); brcmf_fweh_register(cfg->pub, BRCMF_E_PFN_NET_FOUND, brcmf_notify_sched_scan_results); @@ -3691,7 +3691,7 @@ static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy, /* Stop scheduled scan */ if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PNO)) - brcmf_cfg80211_sched_scan_stop(wiphy, ndev); + brcmf_cfg80211_sched_scan_stop(wiphy, ndev, 0); /* end any scanning */ if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) @@ -5359,6 +5359,7 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg, struct ieee80211_supported_band *band; struct brcmf_bss_info_le *bi; struct brcmu_chan ch; + struct cfg80211_roam_info roam_info = {}; u32 freq; s32 err = 0; u8 *buf; @@ -5397,9 +5398,15 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg, done: kfree(buf); - cfg80211_roamed(ndev, notify_channel, (u8 *)profile->bssid, - conn_info->req_ie, conn_info->req_ie_len, - conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL); + + roam_info.channel = notify_channel; + roam_info.bssid = profile->bssid; + roam_info.req_ie = conn_info->req_ie; + roam_info.req_ie_len = conn_info->req_ie_len; + roam_info.resp_ie = conn_info->resp_ie; + roam_info.resp_ie_len = conn_info->resp_ie_len; + + cfg80211_roamed(ndev, &roam_info, GFP_KERNEL); brcmf_dbg(CONN, "Report roaming result\n"); set_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state); @@ -6374,11 +6381,11 @@ err: static void brcmf_wiphy_pno_params(struct wiphy *wiphy) { /* scheduled scan settings */ + wiphy->max_sched_scan_reqs = 1; wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT; wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT; wiphy->max_sched_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX; wiphy->max_sched_scan_plan_interval = BRCMF_PNO_SCHED_SCAN_MAX_PERIOD; - wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; } #ifdef CONFIG_PM diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index 24118ce72b4f..a3d82368f1a9 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -197,7 +197,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb, int ret; struct brcmf_if *ifp = netdev_priv(ndev); struct brcmf_pub *drvr = ifp->drvr; - struct ethhdr *eh = (struct ethhdr *)(skb->data); + struct ethhdr *eh; brcmf_dbg(DATA, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx); @@ -210,22 +210,13 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb, goto done; } - /* Make sure there's enough room for any header */ - if (skb_headroom(skb) < drvr->hdrlen) { - struct sk_buff *skb2; - - brcmf_dbg(INFO, "%s: insufficient headroom\n", + /* Make sure there's enough writable headroom*/ + ret = skb_cow_head(skb, drvr->hdrlen); + if (ret < 0) { + brcmf_err("%s: skb_cow_head failed\n", brcmf_ifname(ifp)); - drvr->bus_if->tx_realloc++; - skb2 = skb_realloc_headroom(skb, drvr->hdrlen); dev_kfree_skb(skb); - skb = skb2; - if (skb == NULL) { - brcmf_err("%s: skb_realloc_headroom failed\n", - brcmf_ifname(ifp)); - ret = -ENOMEM; - goto done; - } + goto done; } /* validate length for ether packet */ @@ -235,6 +226,8 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb, goto done; } + eh = (struct ethhdr *)(skb->data); + if (eh->h_proto == htons(ETH_P_PAE)) atomic_inc(&ifp->pend_8021x_cnt); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c index c2a938b59044..0a14942b8216 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c @@ -7092,9 +7092,9 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh, rspec = brcms_c_compute_rspec(rxh, plcp); if (is_mcs_rate(rspec)) { rx_status->rate_idx = rspec & RSPEC_RATE_MASK; - rx_status->flag |= RX_FLAG_HT; + rx_status->encoding = RX_ENC_HT; if (rspec_is40mhz(rspec)) - rx_status->flag |= RX_FLAG_40MHZ; + rx_status->bw = RATE_INFO_BW_40; } else { switch (rspec2rate(rspec)) { case BRCM_RATE_1M: @@ -7149,9 +7149,9 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh, /* Determine short preamble and rate_idx */ if (is_cck_rate(rspec)) { if (rxh->PhyRxStatus_0 & PRXS0_SHORTH) - rx_status->flag |= RX_FLAG_SHORTPRE; + rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE; } else if (is_ofdm_rate(rspec)) { - rx_status->flag |= RX_FLAG_SHORTPRE; + rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE; } else { brcms_err(wlc->hw->d11core, "%s: Unknown modulation\n", __func__); @@ -7159,7 +7159,7 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh, } if (plcp3_issgi(plcp[3])) - rx_status->flag |= RX_FLAG_SHORT_GI; + rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; if (rxh->RxStatus1 & RXS_DECERR) { rx_status->flag |= RX_FLAG_FAILED_PLCP_CRC; diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c index 4db327a95414..080ea8155b90 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945.c +++ b/drivers/net/wireless/intel/iwlegacy/3945.c @@ -570,7 +570,7 @@ il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb) /* set the preamble flag if appropriate */ if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) - rx_status.flag |= RX_FLAG_SHORTPRE; + rx_status.enc_flags |= RX_ENC_FLAG_SHORTPRE; if ((unlikely(rx_stats->phy_count > 20))) { D_DROP("dsp size out of range [0,20]: %d\n", diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c index 7eda525e3f4f..5d5faa3cad24 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c @@ -728,15 +728,15 @@ il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb) /* set the preamble flag if appropriate */ if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) - rx_status.flag |= RX_FLAG_SHORTPRE; + rx_status.enc_flags |= RX_ENC_FLAG_SHORTPRE; /* Set up the HT phy flags */ if (rate_n_flags & RATE_MCS_HT_MSK) - rx_status.flag |= RX_FLAG_HT; + rx_status.encoding = RX_ENC_HT; if (rate_n_flags & RATE_MCS_HT40_MSK) - rx_status.flag |= RX_FLAG_40MHZ; + rx_status.enc_flags |= RX_ENC_FLAG_40MHZ; if (rate_n_flags & RATE_MCS_SGI_MSK) - rx_status.flag |= RX_FLAG_SHORT_GI; + rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI; if (phy_res->phy_flags & RX_RES_PHY_FLAGS_AGG_MSK) { /* We know which subframes of an A-MPDU belong diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c index 6c2d6da7eec6..74e52f7c5aa1 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c @@ -180,7 +180,7 @@ void iwlagn_dev_txfifo_flush(struct iwl_priv *priv) goto done; } IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n"); - iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff); + iwl_trans_wait_tx_queues_empty(priv->trans, 0xffffffff); done: ieee80211_wake_queues(priv->hw); mutex_unlock(&priv->mutex); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c index e3cab60ddf0f..444c74371929 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c @@ -1145,7 +1145,7 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, } IWL_DEBUG_TX_QUEUES(priv, "wait transmit/flush all frames\n"); - iwl_trans_wait_tx_queue_empty(priv->trans, scd_queues); + iwl_trans_wait_tx_queues_empty(priv->trans, scd_queues); done: mutex_unlock(&priv->mutex); IWL_DEBUG_MAC80211(priv, "leave\n"); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c index dfa2041cfdac..1ee1ba9931a7 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c @@ -873,7 +873,7 @@ static void iwlagn_rx_reply_rx(struct iwl_priv *priv, /* set the preamble flag if appropriate */ if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) - rx_status.flag |= RX_FLAG_SHORTPRE; + rx_status.enc_flags |= RX_ENC_FLAG_SHORTPRE; if (phy_res->phy_flags & RX_RES_PHY_FLAGS_AGG_MSK) { /* @@ -887,13 +887,13 @@ static void iwlagn_rx_reply_rx(struct iwl_priv *priv, /* Set up the HT phy flags */ if (rate_n_flags & RATE_MCS_HT_MSK) - rx_status.flag |= RX_FLAG_HT; + rx_status.encoding = RX_ENC_HT; if (rate_n_flags & RATE_MCS_HT40_MSK) - rx_status.flag |= RX_FLAG_40MHZ; + rx_status.enc_flags |= RX_ENC_FLAG_40MHZ; if (rate_n_flags & RATE_MCS_SGI_MSK) - rx_status.flag |= RX_FLAG_SHORT_GI; + rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI; if (rate_n_flags & RATE_MCS_GF_MSK) - rx_status.flag |= RX_FLAG_HT_GF; + rx_status.enc_flags |= RX_ENC_FLAG_HT_GF; iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status, rxb, &rx_status); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c index aeefd42d23ad..3b3e076571d6 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c @@ -73,8 +73,8 @@ /* Highest firmware API version supported */ #define IWL7260_UCODE_API_MAX 17 #define IWL7265_UCODE_API_MAX 17 -#define IWL7265D_UCODE_API_MAX 30 -#define IWL3168_UCODE_API_MAX 30 +#define IWL7265D_UCODE_API_MAX 29 +#define IWL3168_UCODE_API_MAX 29 /* Lowest firmware API version supported */ #define IWL7260_UCODE_API_MIN 17 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-a000.c b/drivers/net/wireless/intel/iwlwifi/iwl-a000.c index 097cb45c8ad9..c648cfb981a3 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-a000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-a000.c @@ -85,7 +85,7 @@ static const struct iwl_base_params iwl_a000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_A000, - .num_of_queues = 31, + .num_of_queues = 512, .shadow_ram_support = true, .led_compensation = 57, .wd_timeout = IWL_LONG_WD_TIMEOUT, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 4af1267181a9..a12197e3ce78 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -170,7 +170,7 @@ struct iwl_base_params { apmg_wake_up_wa:1, scd_chain_ext_wa:1; - u8 num_of_queues; /* def: HW dependent */ + u16 num_of_queues; /* def: HW dependent */ u8 max_ll_items; u8 led_compensation; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 212fb8d5c064..5cfacb0bca84 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1282,7 +1282,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) pieces = kzalloc(sizeof(*pieces), GFP_KERNEL); if (!pieces) - return; + goto out_free_fw; if (!ucode_raw) goto try_again; @@ -1494,7 +1494,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) * or hangs loading. */ if (load_module) { - err = request_module("%s", op->name); + request_module("%s", op->name); #ifdef CONFIG_IWLWIFI_OPMODE_MODULAR if (err) IWL_ERR(drv, @@ -1512,17 +1512,18 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) goto free; out_free_fw: - IWL_ERR(drv, "failed to allocate pci memory\n"); iwl_dealloc_ucode(drv); release_firmware(ucode_raw); out_unbind: complete(&drv->request_firmware_complete); device_release_driver(drv->trans->dev); free: - for (i = 0; i < ARRAY_SIZE(pieces->img); i++) - kfree(pieces->img[i].sec); - kfree(pieces->dbg_mem_tlv); - kfree(pieces); + if (pieces) { + for (i = 0; i < ARRAY_SIZE(pieces->img); i++) + kfree(pieces->img[i].sec); + kfree(pieces->dbg_mem_tlv); + kfree(pieces); + } } struct iwl_drv *iwl_drv_start(struct iwl_trans *trans) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h index 287e83eb30d9..44419e82da1b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h @@ -243,6 +243,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t; * scan request. * @IWL_UCODE_TLV_API_TKIP_MIC_KEYS: This ucode supports version 2 of * ADD_MODIFY_STA_KEY_API_S_VER_2. + * @IWL_UCODE_TLV_API_STA_TYPE: This ucode supports station type assignement. * * @NUM_IWL_UCODE_TLV_API: number of bits used */ @@ -253,6 +254,7 @@ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20, IWL_UCODE_TLV_API_SCAN_TSF_REPORT = (__force iwl_ucode_tlv_api_t)28, IWL_UCODE_TLV_API_TKIP_MIC_KEYS = (__force iwl_ucode_tlv_api_t)29, + IWL_UCODE_TLV_API_STA_TYPE = (__force iwl_ucode_tlv_api_t)30, NUM_IWL_UCODE_TLV_API #ifdef __CHECKER__ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c index 0f893ae6e715..9c8b09cf1f7b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c @@ -246,6 +246,9 @@ void iwl_force_nmi(struct iwl_trans *trans) DEVICE_SET_NMI_VAL_DRV); iwl_write_prph(trans, DEVICE_SET_NMI_REG, DEVICE_SET_NMI_VAL_HW); + } else if (trans->cfg->gen2) { + iwl_write_prph(trans, UREG_NIC_SET_NMI_DRIVER, + DEVICE_SET_NMI_8000_VAL); } else { iwl_write_prph(trans, DEVICE_SET_NMI_8000_REG, DEVICE_SET_NMI_8000_VAL); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 3bd6fc1b76d4..721ae6bef5da 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -7,7 +7,7 @@ * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,6 +34,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -438,25 +439,16 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map; } -static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, - struct iwl_nvm_data *data, - const __le16 *ch_section, - u8 tx_chains, u8 rx_chains, bool lar_supported) +void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, + struct iwl_nvm_data *data, const __le16 *nvm_ch_flags, + u8 tx_chains, u8 rx_chains, bool lar_supported) { int n_channels; int n_used = 0; struct ieee80211_supported_band *sband; - if (cfg->device_family != IWL_DEVICE_FAMILY_8000) - n_channels = iwl_init_channel_map( - dev, cfg, data, - &ch_section[NVM_CHANNELS], lar_supported); - else - n_channels = iwl_init_channel_map( - dev, cfg, data, - &ch_section[NVM_CHANNELS_FAMILY_8000], - lar_supported); - + n_channels = iwl_init_channel_map(dev, cfg, data, nvm_ch_flags, + lar_supported); sband = &data->bands[NL80211_BAND_2GHZ]; sband->band = NL80211_BAND_2GHZ; sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS]; @@ -482,6 +474,7 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n", n_used, n_channels); } +IWL_EXPORT_SYMBOL(iwl_init_sbands); static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw, const __le16 *phy_sku) @@ -559,8 +552,8 @@ static void iwl_flip_hw_address(__le32 mac_addr0, __le32 mac_addr1, u8 *dest) dest[5] = hw_addr[0]; } -static void iwl_set_hw_address_from_csr(struct iwl_trans *trans, - struct iwl_nvm_data *data) +void iwl_set_hw_address_from_csr(struct iwl_trans *trans, + struct iwl_nvm_data *data) { __le32 mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_STRAP)); __le32 mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_STRAP)); @@ -578,6 +571,7 @@ static void iwl_set_hw_address_from_csr(struct iwl_trans *trans, iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); } +IWL_EXPORT_SYMBOL(iwl_set_hw_address_from_csr); static void iwl_set_hw_address_family_8000(struct iwl_trans *trans, const struct iwl_cfg *cfg, @@ -718,7 +712,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB); data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1); lar_enabled = true; - ch_section = nvm_sw; + ch_section = &nvm_sw[NVM_CHANNELS]; } else { u16 lar_offset = data->nvm_version < 0xE39 ? NVM_LAR_OFFSET_FAMILY_8000_OLD : @@ -728,7 +722,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, data->lar_enabled = !!(lar_config & NVM_LAR_ENABLED_FAMILY_8000); lar_enabled = data->lar_enabled; - ch_section = regulatory; + ch_section = ®ulatory[NVM_CHANNELS_FAMILY_8000]; } /* If no valid mac address was found - bail out */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h index 7249e5b403f4..3fd6506a02ab 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2015 Intel Corporation. All rights reserved. - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -82,6 +83,19 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, u8 tx_chains, u8 rx_chains, bool lar_fw_supported); /** + * iwl_set_hw_address_from_csr - sets HW address for 9000 devices and on + */ +void iwl_set_hw_address_from_csr(struct iwl_trans *trans, + struct iwl_nvm_data *data); + +/** + * iwl_init_sbands - parse and set all channel profiles + */ +void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, + struct iwl_nvm_data *data, const __le16 *nvm_ch_flags, + u8 tx_chains, u8 rx_chains, bool lar_supported); + +/** * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW * * This function parses the regulatory channel data received as a diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index f832e58e0ef9..306bc967742e 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -114,6 +114,7 @@ #define DEVICE_SET_NMI_VAL_DRV BIT(7) #define DEVICE_SET_NMI_8000_REG 0x00a01c24 #define DEVICE_SET_NMI_8000_VAL 0x1000000 +#define UREG_NIC_SET_NMI_DRIVER 0x00a05c10 /* Shared registers (0x0..0x3ff, via target indirect or periphery */ #define SHR_BASE 0x00a10000 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 626e2703a57f..0ebfdbb22992 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -396,6 +396,8 @@ static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r) * currently supports */ #define IWL_MAX_HW_QUEUES 32 +#define IWL_MAX_TVQM_QUEUES 512 + #define IWL_MAX_TID_COUNT 8 #define IWL_MGMT_TID 15 #define IWL_FRAME_LIMIT 64 @@ -689,7 +691,7 @@ struct iwl_trans_ops { void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id, bool shared); - int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm); + int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm); void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs, bool freeze); void (*block_txq_ptrs)(struct iwl_trans *trans, bool block); @@ -1193,15 +1195,15 @@ static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans, trans->ops->block_txq_ptrs(trans, block); } -static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans, - u32 txqs) +static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans, + u32 txqs) { if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); return -EIO; } - return trans->ops->wait_tx_queue_empty(trans, txqs); + return trans->ops->wait_tx_queues_empty(trans, txqs); } static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/binding.c b/drivers/net/wireless/intel/iwlwifi/mvm/binding.c index 2e0ed080457f..75d35f6b041e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/binding.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/binding.c @@ -86,6 +86,8 @@ static int iwl_mvm_binding_cmd(struct iwl_mvm *mvm, u32 action, u32 status; int size; + memset(&cmd, 0, sizeof(cmd)); + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)) { size = sizeof(cmd); @@ -98,8 +100,6 @@ static int iwl_mvm_binding_cmd(struct iwl_mvm *mvm, u32 action, size = IWL_BINDING_CMD_SIZE_V1; } - memset(&cmd, 0, sizeof(cmd)); - cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color)); cmd.action = cpu_to_le32(action); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h index d3cdd889c85c..970b030ed28d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h @@ -157,7 +157,8 @@ enum iwl_tsf_id { * @bi_reciprocal: 2^32 / bi * @dtim_interval: dtim transmit time in TU * @dtim_reciprocal: 2^32 / dtim_interval - * @mcast_qid: queue ID for multicast traffic + * @mcast_qid: queue ID for multicast traffic. + * NOTE: obsolete from VER2 and on * @beacon_template: beacon template ID */ struct iwl_mac_data_ap { @@ -169,7 +170,7 @@ struct iwl_mac_data_ap { __le32 dtim_reciprocal; __le32 mcast_qid; __le32 beacon_template; -} __packed; /* AP_MAC_DATA_API_S_VER_1 */ +} __packed; /* AP_MAC_DATA_API_S_VER_2 */ /** * struct iwl_mac_data_ibss - configuration data for IBSS MAC context diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h index ad9cc03e16c4..1b7d265ffb0a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -227,6 +229,9 @@ enum { */ #define RATE_LEGACY_RATE_MSK 0xff +/* Bit 10 - OFDM HE */ +#define RATE_MCS_OFDM_HE_POS 10 +#define RATE_MCS_OFDM_HE_MSK BIT(RATE_MCS_OFDM_HE_POS) /* * Bit 11-12: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz @@ -255,18 +260,29 @@ enum { #define RATE_MCS_ANT_MSK RATE_MCS_ANT_ABC_MSK #define RATE_MCS_ANT_NUM 3 -/* Bit 17-18: (0) SS, (1) SS*2 */ +/* Bit 17: (0) SS, (1) SS*2 */ #define RATE_MCS_STBC_POS 17 -#define RATE_MCS_HT_STBC_MSK (3 << RATE_MCS_STBC_POS) -#define RATE_MCS_VHT_STBC_MSK (1 << RATE_MCS_STBC_POS) +#define RATE_MCS_STBC_MSK BIT(RATE_MCS_STBC_POS) + +/* Bit 18: OFDM-HE dual carrier mode */ +#define RATE_HE_DUAL_CARRIER_MODE 18 +#define RATE_HE_DUAL_CARRIER_MODE_MSK BIT(RATE_HE_DUAL_CARRIER_MODE) /* Bit 19: (0) Beamforming is off, (1) Beamforming is on */ #define RATE_MCS_BF_POS 19 #define RATE_MCS_BF_MSK (1 << RATE_MCS_BF_POS) -/* Bit 20: (0) ZLF is off, (1) ZLF is on */ -#define RATE_MCS_ZLF_POS 20 -#define RATE_MCS_ZLF_MSK (1 << RATE_MCS_ZLF_POS) +/* + * Bit 20-21: HE guard interval and LTF type. + * (0) 1xLTF+1.6us, (1) 2xLTF+0.8us, + * (2) 2xLTF+1.6us, (3) 4xLTF+3.2us + */ +#define RATE_MCS_HE_GI_LTF_POS 20 +#define RATE_MCS_HE_GI_LTF_MSK (3 << RATE_MCS_HE_GI_LTF_POS) + +/* Bit 22-23: HE type. (0) SU, (1) SU_EXT, (2) MU, (3) trigger based */ +#define RATE_MCS_HE_TYPE_POS 22 +#define RATE_MCS_HE_TYPE_MSK (3 << RATE_MCS_HE_TYPE_POS) /* Bit 24-25: (0) 20MHz (no dup), (1) 2x20MHz, (2) 4x20MHz, 3 8x20MHz */ #define RATE_MCS_DUP_POS 24 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h index e79df1c53d68..421b9dd1fb66 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h @@ -214,20 +214,6 @@ enum iwl_sta_sleep_flag { STA_SLEEP_STATE_MOREDATA = BIT(2), }; -/* STA ID and color bits definitions */ -#define STA_ID_SEED (0x0f) -#define STA_ID_POS (0) -#define STA_ID_MSK (STA_ID_SEED << STA_ID_POS) - -#define STA_COLOR_SEED (0x7) -#define STA_COLOR_POS (4) -#define STA_COLOR_MSK (STA_COLOR_SEED << STA_COLOR_POS) - -#define STA_ID_N_COLOR_GET_COLOR(id_n_color) \ - (((id_n_color) & STA_COLOR_MSK) >> STA_COLOR_POS) -#define STA_ID_N_COLOR_GET_ID(id_n_color) \ - (((id_n_color) & STA_ID_MSK) >> STA_ID_POS) - #define STA_KEY_MAX_NUM (16) #define STA_KEY_IDX_INVALID (0xff) #define STA_KEY_MAX_DATA_KEY_NUM (4) @@ -324,6 +310,24 @@ struct iwl_mvm_add_sta_cmd_v7 { } __packed; /* ADD_STA_CMD_API_S_VER_7 */ /** + * enum iwl_sta_type - FW station types + * ( REPLY_ADD_STA = 0x18 ) + * @IWL_STA_LINK: Link station - normal RX and TX traffic. + * @IWL_STA_GENERAL_PURPOSE: General purpose. In AP mode used for beacons + * and probe responses. + * @IWL_STA_MULTICAST: multicast traffic, + * @IWL_STA_TDLS_LINK: TDLS link station + * @IWL_STA_AUX_ACTIVITY: auxilary station (scan, ROC and so on). + */ +enum iwl_sta_type { + IWL_STA_LINK, + IWL_STA_GENERAL_PURPOSE, + IWL_STA_MULTICAST, + IWL_STA_TDLS_LINK, + IWL_STA_AUX_ACTIVITY, +}; + +/** * struct iwl_mvm_add_sta_cmd - Add/modify a station in the fw's sta table. * ( REPLY_ADD_STA = 0x18 ) * @add_modify: 1: modify existing, 0: add new station @@ -347,6 +351,7 @@ struct iwl_mvm_add_sta_cmd_v7 { * @sleep_tx_count: number of packets to transmit to station even though it is * asleep. Used to synchronise PS-poll and u-APSD responses while ucode * keeps track of STA sleep state. + * @station_type: type of this station. See &enum iwl_sta_type. * @sleep_state_flags: Look at %iwl_sta_sleep_flag. * @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP * mac-addr. @@ -381,14 +386,15 @@ struct iwl_mvm_add_sta_cmd { u8 remove_immediate_ba_tid; __le16 add_immediate_ba_ssn; __le16 sleep_tx_count; - __le16 sleep_state_flags; + u8 sleep_state_flags; + u8 station_type; __le16 assoc_id; __le16 beamform_flags; __le32 tfd_queue_msk; __le16 rx_ba_window; u8 sp_length; u8 uapsd_acs; -} __packed; /* ADD_STA_CMD_API_S_VER_9 */ +} __packed; /* ADD_STA_CMD_API_S_VER_10 */ /** * struct iwl_mvm_add_sta_key_common - add/modify sta key common part diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 900f1e25b9da..e6c9528eeeda 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -698,6 +698,82 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, return 0; } +static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) +{ + struct iwl_notification_wait init_wait; + struct iwl_nvm_access_complete_cmd nvm_complete = {}; + struct iwl_init_extended_cfg_cmd init_cfg = { + .init_flags = cpu_to_le32(BIT(IWL_INIT_NVM)), + }; + static const u16 init_complete[] = { + INIT_COMPLETE_NOTIF, + }; + int ret; + + lockdep_assert_held(&mvm->mutex); + + iwl_init_notification_wait(&mvm->notif_wait, + &init_wait, + init_complete, + ARRAY_SIZE(init_complete), + iwl_wait_init_complete, + NULL); + + /* Will also start the device */ + ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR); + if (ret) { + IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); + goto error; + } + + /* Send init config command to mark that we are sending NVM access + * commands + */ + ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(SYSTEM_GROUP, + INIT_EXTENDED_CFG_CMD), 0, + sizeof(init_cfg), &init_cfg); + if (ret) { + IWL_ERR(mvm, "Failed to run init config command: %d\n", + ret); + goto error; + } + + /* Read the NVM only at driver load time, no need to do this twice */ + if (read_nvm) { + /* Read nvm */ + ret = iwl_nvm_init(mvm, true); + if (ret) { + IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); + goto error; + } + } + + /* In case we read the NVM from external file, load it to the NIC */ + if (mvm->nvm_file_name) + iwl_mvm_load_nvm_to_nic(mvm); + + ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans); + if (WARN_ON(ret)) + goto error; + + ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP, + NVM_ACCESS_COMPLETE), 0, + sizeof(nvm_complete), &nvm_complete); + if (ret) { + IWL_ERR(mvm, "Failed to run complete NVM access: %d\n", + ret); + goto error; + } + + /* We wait for the INIT complete notification */ + return iwl_wait_notification(&mvm->notif_wait, &init_wait, + MVM_UCODE_ALIVE_TIMEOUT); + +error: + iwl_remove_notification(&mvm->notif_wait, &init_wait); + return ret; +} + static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) { struct iwl_phy_cfg_cmd phy_cfg_cmd; @@ -726,6 +802,9 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) }; int ret; + if (iwl_mvm_has_new_tx_api(mvm)) + return iwl_run_unified_mvm_ucode(mvm, true); + lockdep_assert_held(&mvm->mutex); if (WARN_ON_ONCE(mvm->calibrating)) @@ -832,82 +911,6 @@ out: return ret; } -int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) -{ - struct iwl_notification_wait init_wait; - struct iwl_nvm_access_complete_cmd nvm_complete = {}; - struct iwl_init_extended_cfg_cmd init_cfg = { - .init_flags = cpu_to_le32(BIT(IWL_INIT_NVM)), - }; - static const u16 init_complete[] = { - INIT_COMPLETE_NOTIF, - }; - int ret; - - lockdep_assert_held(&mvm->mutex); - - iwl_init_notification_wait(&mvm->notif_wait, - &init_wait, - init_complete, - ARRAY_SIZE(init_complete), - iwl_wait_init_complete, - NULL); - - /* Will also start the device */ - ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR); - if (ret) { - IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); - goto error; - } - - /* Send init config command to mark that we are sending NVM access - * commands - */ - ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(SYSTEM_GROUP, - INIT_EXTENDED_CFG_CMD), 0, - sizeof(init_cfg), &init_cfg); - if (ret) { - IWL_ERR(mvm, "Failed to run init config command: %d\n", - ret); - goto error; - } - - /* Read the NVM only at driver load time, no need to do this twice */ - if (read_nvm) { - /* Read nvm */ - ret = iwl_nvm_init(mvm, true); - if (ret) { - IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); - goto error; - } - } - - /* In case we read the NVM from external file, load it to the NIC */ - if (mvm->nvm_file_name) - iwl_mvm_load_nvm_to_nic(mvm); - - ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans); - if (WARN_ON(ret)) - goto error; - - ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP, - NVM_ACCESS_COMPLETE), 0, - sizeof(nvm_complete), &nvm_complete); - if (ret) { - IWL_ERR(mvm, "Failed to run complete NVM access: %d\n", - ret); - goto error; - } - - /* We wait for the INIT complete notification */ - return iwl_wait_notification(&mvm->notif_wait, &init_wait, - MVM_UCODE_ALIVE_TIMEOUT); - -error: - iwl_remove_notification(&mvm->notif_wait, &init_wait); - return ret; -} - static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { @@ -1198,6 +1201,12 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm) enabled = !!(wifi_pkg->package.elements[1].integer.value); n_profiles = wifi_pkg->package.elements[2].integer.value; + /* in case of BIOS bug */ + if (n_profiles <= 0) { + ret = -EINVAL; + goto out_free; + } + for (i = 0; i < n_profiles; i++) { /* the tables start at element 3 */ static int pos = 3; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 9e69b9d2012c..0f1831b41915 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -467,6 +467,11 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, queue = IWL_MVM_DQA_GCAST_QUEUE; } + /* + * For TVQM this will be overwritten later with the FW assigned + * queue value (when queue is enabled). + */ + mvmvif->cab_queue = queue; vif->cab_queue = queue; } else { vif->cab_queue = IEEE80211_INVAL_HW_QUEUE; @@ -902,7 +907,7 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm, /* Allocate sniffer station */ ret = iwl_mvm_allocate_int_sta(mvm, &mvm->snif_sta, tfd_queue_msk, - vif->type); + vif->type, IWL_STA_GENERAL_PURPOSE); if (ret) return ret; @@ -1223,7 +1228,9 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm, cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int * vif->bss_conf.dtim_period)); - ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue); + if (!fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_STA_TYPE)) + ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue); /* * Only set the beacon time when the MAC is being added, when we diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 5cdd95775ba6..a67aa1f5a51c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -620,7 +620,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) else hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; - hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; + hw->wiphy->max_sched_scan_reqs = 1; hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES; /* we create the 802.11 header and zero length SSID IE. */ @@ -1358,7 +1358,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, * which shouldn't be in TFD mask anyway */ ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta, - 0, vif->type); + 0, vif->type, + IWL_STA_MULTICAST); if (ret) goto out_release; } @@ -1477,7 +1478,7 @@ static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, * already marked as draining, so to complete the draining, we * just need to wait until the transport is empty. */ - iwl_trans_wait_tx_queue_empty(mvm->trans, tfd_msk); + iwl_trans_wait_tx_queues_empty(mvm->trans, tfd_msk); } if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { @@ -2111,15 +2112,15 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, if (ret) goto out_remove; - /* Send the bcast station. At this stage the TBTT and DTIM time events - * are added and applied to the scheduler */ - ret = iwl_mvm_send_add_bcast_sta(mvm, vif); + ret = iwl_mvm_add_mcast_sta(mvm, vif); if (ret) goto out_unbind; - ret = iwl_mvm_add_mcast_sta(mvm, vif); + /* Send the bcast station. At this stage the TBTT and DTIM time events + * are added and applied to the scheduler */ + ret = iwl_mvm_send_add_bcast_sta(mvm, vif); if (ret) - goto out_rm_bcast; + goto out_rm_mcast; /* must be set before quota calculations */ mvmvif->ap_ibss_active = true; @@ -2148,9 +2149,9 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, out_quota_failed: iwl_mvm_power_update_mac(mvm); mvmvif->ap_ibss_active = false; - iwl_mvm_rm_mcast_sta(mvm, vif); -out_rm_bcast: iwl_mvm_send_rm_bcast_sta(mvm, vif); +out_rm_mcast: + iwl_mvm_rm_mcast_sta(mvm, vif); out_unbind: iwl_mvm_binding_remove_vif(mvm, vif); out_remove: @@ -2196,8 +2197,20 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); iwl_mvm_update_quotas(mvm, false, NULL); - iwl_mvm_rm_mcast_sta(mvm, vif); + + /* + * This is not very nice, but the simplest: + * For older FWs removing the mcast sta before the bcast station may + * cause assert 0x2b00. + * This is fixed in later FW (which will stop beaconing when removing + * bcast station). + * So make the order of removal depend on the TLV + */ + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) + iwl_mvm_rm_mcast_sta(mvm, vif); iwl_mvm_send_rm_bcast_sta(mvm, vif); + if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) + iwl_mvm_rm_mcast_sta(mvm, vif); iwl_mvm_binding_remove_vif(mvm, vif); iwl_mvm_power_update_mac(mvm); @@ -2363,7 +2376,7 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA) continue; - if (tid_data->txq_id == IEEE80211_INVAL_HW_QUEUE) + if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE) continue; __set_bit(tid_data->txq_id, &txqs); @@ -3988,7 +4001,7 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, /* this can take a while, and we may need/want other operations * to succeed while doing this, so do it without the mutex held */ - iwl_trans_wait_tx_queue_empty(mvm->trans, msk); + iwl_trans_wait_tx_queues_empty(mvm->trans, msk); } } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 1938dfb44152..4e74a6b90e70 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -380,6 +380,8 @@ struct iwl_mvm_vif { bool associated; u8 ap_assoc_sta_count; + u16 cab_queue; + bool uploaded; bool ap_ibss_active; bool pm_enabled; @@ -715,6 +717,8 @@ enum iwl_mvm_queue_status { }; #define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ) +#define IWL_MVM_INVALID_QUEUE 0xFFFF + #define IWL_MVM_NUM_CIPHERS 10 #ifdef CONFIG_ACPI @@ -784,9 +788,9 @@ struct iwl_mvm { u64 on_time_scan; } radio_stats, accu_radio_stats; + u8 hw_queue_to_mac80211[IWL_MAX_TVQM_QUEUES]; + struct { - /* Map to HW queue */ - u32 hw_queue_to_mac80211; u8 hw_queue_refcount; u8 ra_sta_id; /* The RA this queue is mapped to, if exists */ bool reserved; /* Is this the TXQ reserved for a STA */ @@ -1312,7 +1316,6 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm); ******************/ /* uCode */ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm); -int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm); /* Utils */ int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, @@ -1828,6 +1831,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, u32 size); void iwl_mvm_reorder_timer_expired(unsigned long data); struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm); +bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm); void iwl_mvm_inactivity_check(struct iwl_mvm *mvm); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index eade099b6dbf..283c41df622c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -817,6 +817,11 @@ void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); + if (iwl_mvm_is_vif_assoc(mvm) && notif->source_id == MCC_SOURCE_WIFI) { + IWL_DEBUG_LAR(mvm, "Ignore mcc update while associated\n"); + return; + } + if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm))) return; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 888053323c92..9ffff6ed8133 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -746,10 +746,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mutex_lock(&mvm->mutex); iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE); - if (iwl_mvm_has_new_tx_api(mvm)) - err = iwl_run_unified_mvm_ucode(mvm, true); - else - err = iwl_run_init_mvm_ucode(mvm, true); + err = iwl_run_init_mvm_ucode(mvm, true); if (!err || !iwlmvm_mod_params.init_dbg) iwl_mvm_stop_device(mvm); iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE); @@ -1047,7 +1044,7 @@ static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) unsigned long mq; spin_lock_bh(&mvm->queue_info_lock); - mq = mvm->queue_info[hw_queue].hw_queue_to_mac80211; + mq = mvm->hw_queue_to_mac80211[hw_queue]; spin_unlock_bh(&mvm->queue_info_lock); iwl_mvm_stop_mac_queues(mvm, mq); @@ -1077,7 +1074,7 @@ static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) unsigned long mq; spin_lock_bh(&mvm->queue_info_lock); - mq = mvm->queue_info[hw_queue].hw_queue_to_mac80211; + mq = mvm->hw_queue_to_mac80211[hw_queue]; spin_unlock_bh(&mvm->queue_info_lock); iwl_mvm_start_mac_queues(mvm, mq); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index ce907c58ebf6..7788eefcd2bd 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -826,7 +826,7 @@ static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm, if (is_siso(rate) && rate->stbc) { /* To enable STBC we need to set both a flag and ANT_AB */ ucode_rate |= RATE_MCS_ANT_AB_MSK; - ucode_rate |= RATE_MCS_VHT_STBC_MSK; + ucode_rate |= RATE_MCS_STBC_MSK; } ucode_rate |= rate->bw; @@ -873,7 +873,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate, rate->sgi = true; if (ucode_rate & RATE_MCS_LDPC_MSK) rate->ldpc = true; - if (ucode_rate & RATE_MCS_VHT_STBC_MSK) + if (ucode_rate & RATE_MCS_STBC_MSK) rate->stbc = true; if (ucode_rate & RATE_MCS_BF_MSK) rate->bfer = true; @@ -3641,13 +3641,12 @@ int rs_pretty_print_rate(char *buf, const u32 rate) bw = "BAD BW"; } - return sprintf(buf, "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s%s\n", + return sprintf(buf, "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s\n", type, rs_pretty_ant(ant), bw, mcs, nss, (rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ", - (rate & RATE_MCS_HT_STBC_MSK) ? "STBC " : "", + (rate & RATE_MCS_STBC_MSK) ? "STBC " : "", (rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "", - (rate & RATE_MCS_BF_MSK) ? "BF " : "", - (rate & RATE_MCS_ZLF_MSK) ? "ZLF " : ""); + (rate & RATE_MCS_BF_MSK) ? "BF " : ""); } /** diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index d4c0ca7ccb34..fd1dd06c4f18 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -410,7 +410,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, /* set the preamble flag if appropriate */ if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_SHORT_PREAMBLE)) - rx_status->flag |= RX_FLAG_SHORTPRE; + rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE; if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) { /* @@ -427,38 +427,38 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, case RATE_MCS_CHAN_WIDTH_20: break; case RATE_MCS_CHAN_WIDTH_40: - rx_status->flag |= RX_FLAG_40MHZ; + rx_status->bw = RATE_INFO_BW_40; break; case RATE_MCS_CHAN_WIDTH_80: - rx_status->vht_flag |= RX_VHT_FLAG_80MHZ; + rx_status->bw = RATE_INFO_BW_80; break; case RATE_MCS_CHAN_WIDTH_160: - rx_status->vht_flag |= RX_VHT_FLAG_160MHZ; + rx_status->bw = RATE_INFO_BW_160; break; } if (rate_n_flags & RATE_MCS_SGI_MSK) - rx_status->flag |= RX_FLAG_SHORT_GI; + rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; if (rate_n_flags & RATE_HT_MCS_GF_MSK) - rx_status->flag |= RX_FLAG_HT_GF; + rx_status->enc_flags |= RX_ENC_FLAG_HT_GF; if (rate_n_flags & RATE_MCS_LDPC_MSK) - rx_status->flag |= RX_FLAG_LDPC; + rx_status->enc_flags |= RX_ENC_FLAG_LDPC; if (rate_n_flags & RATE_MCS_HT_MSK) { - u8 stbc = (rate_n_flags & RATE_MCS_HT_STBC_MSK) >> + u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; - rx_status->flag |= RX_FLAG_HT; + rx_status->encoding = RX_ENC_HT; rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK; - rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT; + rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; } else if (rate_n_flags & RATE_MCS_VHT_MSK) { - u8 stbc = (rate_n_flags & RATE_MCS_VHT_STBC_MSK) >> + u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; - rx_status->vht_nss = + rx_status->nss = ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> RATE_VHT_MCS_NSS_POS) + 1; rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; - rx_status->flag |= RX_FLAG_VHT; - rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT; + rx_status->encoding = RX_ENC_VHT; + rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; if (rate_n_flags & RATE_MCS_BF_MSK) - rx_status->vht_flag |= RX_VHT_FLAG_BF; + rx_status->enc_flags |= RX_ENC_FLAG_BF; } else { int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, rx_status->band); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 24c4fbe139a3..966cd7543629 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -824,7 +824,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, } /* set the preamble flag if appropriate */ if (phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE) - rx_status->flag |= RX_FLAG_SHORTPRE; + rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE; if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) { rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise); @@ -958,38 +958,38 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, case RATE_MCS_CHAN_WIDTH_20: break; case RATE_MCS_CHAN_WIDTH_40: - rx_status->flag |= RX_FLAG_40MHZ; + rx_status->bw = RATE_INFO_BW_40; break; case RATE_MCS_CHAN_WIDTH_80: - rx_status->vht_flag |= RX_VHT_FLAG_80MHZ; + rx_status->bw = RATE_INFO_BW_80; break; case RATE_MCS_CHAN_WIDTH_160: - rx_status->vht_flag |= RX_VHT_FLAG_160MHZ; + rx_status->bw = RATE_INFO_BW_160; break; } if (rate_n_flags & RATE_MCS_SGI_MSK) - rx_status->flag |= RX_FLAG_SHORT_GI; + rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; if (rate_n_flags & RATE_HT_MCS_GF_MSK) - rx_status->flag |= RX_FLAG_HT_GF; + rx_status->enc_flags |= RX_ENC_FLAG_HT_GF; if (rate_n_flags & RATE_MCS_LDPC_MSK) - rx_status->flag |= RX_FLAG_LDPC; + rx_status->enc_flags |= RX_ENC_FLAG_LDPC; if (rate_n_flags & RATE_MCS_HT_MSK) { - u8 stbc = (rate_n_flags & RATE_MCS_HT_STBC_MSK) >> + u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; - rx_status->flag |= RX_FLAG_HT; + rx_status->encoding = RX_ENC_HT; rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK; - rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT; + rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; } else if (rate_n_flags & RATE_MCS_VHT_MSK) { - u8 stbc = (rate_n_flags & RATE_MCS_VHT_STBC_MSK) >> + u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; - rx_status->vht_nss = + rx_status->nss = ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> RATE_VHT_MCS_NSS_POS) + 1; rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; - rx_status->flag |= RX_FLAG_VHT; - rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT; + rx_status->encoding = RX_ENC_VHT; + rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; if (rate_n_flags & RATE_MCS_BF_MSK) - rx_status->vht_flag |= RX_VHT_FLAG_BF; + rx_status->enc_flags |= RX_ENC_FLAG_BF; } else { int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, rx_status->band); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 9668f945b4e6..8d1b994ae79f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -81,44 +81,30 @@ enum iwl_mvm_traffic_load { IWL_MVM_TRAFFIC_HIGH, }; +#define IWL_SCAN_DWELL_ACTIVE 10 +#define IWL_SCAN_DWELL_PASSIVE 110 +#define IWL_SCAN_DWELL_FRAGMENTED 44 +#define IWL_SCAN_DWELL_EXTENDED 90 + struct iwl_mvm_scan_timing_params { - u32 dwell_active; - u32 dwell_passive; - u32 dwell_fragmented; - u32 dwell_extended; u32 suspend_time; u32 max_out_time; }; static struct iwl_mvm_scan_timing_params scan_timing[] = { [IWL_SCAN_TYPE_UNASSOC] = { - .dwell_active = 10, - .dwell_passive = 110, - .dwell_fragmented = 44, - .dwell_extended = 90, .suspend_time = 0, .max_out_time = 0, }, [IWL_SCAN_TYPE_WILD] = { - .dwell_active = 10, - .dwell_passive = 110, - .dwell_fragmented = 44, - .dwell_extended = 90, .suspend_time = 30, .max_out_time = 120, }, [IWL_SCAN_TYPE_MILD] = { - .dwell_active = 10, - .dwell_passive = 110, - .dwell_fragmented = 44, - .dwell_extended = 90, .suspend_time = 120, .max_out_time = 120, }, [IWL_SCAN_TYPE_FRAGMENTED] = { - .dwell_active = 10, - .dwell_passive = 110, - .dwell_fragmented = 44, .suspend_time = 95, .max_out_time = 44, }, @@ -294,34 +280,15 @@ int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm) return max_ie_len; } -static u8 *iwl_mvm_dump_channel_list(struct iwl_scan_results_notif *res, - int num_res, u8 *buf, size_t buf_size) -{ - int i; - u8 *pos = buf, *end = buf + buf_size; - - for (i = 0; pos < end && i < num_res; i++) - pos += snprintf(pos, end - pos, " %u", res[i].channel); - - /* terminate the string in case the buffer was too short */ - *(buf + buf_size - 1) = '\0'; - - return buf; -} - void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data; - u8 buf[256]; IWL_DEBUG_SCAN(mvm, - "Scan offload iteration complete: status=0x%x scanned channels=%d channels list: %s\n", - notif->status, notif->scanned_channels, - iwl_mvm_dump_channel_list(notif->results, - notif->scanned_channels, buf, - sizeof(buf))); + "Scan offload iteration complete: status=0x%x scanned channels=%d\n", + notif->status, notif->scanned_channels); if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) { IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n"); @@ -743,10 +710,10 @@ static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm, struct iwl_scan_req_lmac *cmd, struct iwl_mvm_scan_params *params) { - cmd->active_dwell = scan_timing[params->type].dwell_active; - cmd->passive_dwell = scan_timing[params->type].dwell_passive; - cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented; - cmd->extended_dwell = scan_timing[params->type].dwell_extended; + cmd->active_dwell = IWL_SCAN_DWELL_ACTIVE; + cmd->passive_dwell = IWL_SCAN_DWELL_PASSIVE; + cmd->fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED; + cmd->extended_dwell = IWL_SCAN_DWELL_EXTENDED; cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time); cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time); cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); @@ -944,13 +911,12 @@ static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm) } static void iwl_mvm_fill_scan_dwell(struct iwl_mvm *mvm, - struct iwl_scan_dwell *dwell, - struct iwl_mvm_scan_timing_params *timing) + struct iwl_scan_dwell *dwell) { - dwell->active = timing->dwell_active; - dwell->passive = timing->dwell_passive; - dwell->fragmented = timing->dwell_fragmented; - dwell->extended = timing->dwell_extended; + dwell->active = IWL_SCAN_DWELL_ACTIVE; + dwell->passive = IWL_SCAN_DWELL_PASSIVE; + dwell->fragmented = IWL_SCAN_DWELL_FRAGMENTED; + dwell->extended = IWL_SCAN_DWELL_EXTENDED; } static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels) @@ -979,7 +945,7 @@ static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config, cfg->out_of_channel_time = cpu_to_le32(scan_timing[type].max_out_time); cfg->suspend_time = cpu_to_le32(scan_timing[type].suspend_time); - iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell, &scan_timing[type]); + iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell); memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN); @@ -1010,7 +976,7 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config, cpu_to_le32(scan_timing[type].max_out_time); } - iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell, &scan_timing[type]); + iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell); memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN); @@ -1114,11 +1080,11 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, cmd->passive_dwell = params->measurement_dwell; cmd->extended_dwell = params->measurement_dwell; } else { - cmd->active_dwell = timing->dwell_active; - cmd->passive_dwell = timing->dwell_passive; - cmd->extended_dwell = timing->dwell_extended; + cmd->active_dwell = IWL_SCAN_DWELL_ACTIVE; + cmd->passive_dwell = IWL_SCAN_DWELL_PASSIVE; + cmd->extended_dwell = IWL_SCAN_DWELL_EXTENDED; } - cmd->fragmented_dwell = timing->dwell_fragmented; + cmd->fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED; if (iwl_mvm_has_new_tx_api(mvm)) { cmd->v6.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); @@ -1612,16 +1578,12 @@ void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data; - u8 buf[256]; mvm->scan_start = le64_to_cpu(notif->start_tsf); IWL_DEBUG_SCAN(mvm, - "UMAC Scan iteration complete: status=0x%x scanned_channels=%d channels list: %s\n", - notif->status, notif->scanned_channels, - iwl_mvm_dump_channel_list(notif->results, - notif->scanned_channels, buf, - sizeof(buf))); + "UMAC Scan iteration complete: status=0x%x scanned_channels=%d\n", + notif->status, notif->scanned_channels); if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) { IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n"); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index a2a1fa06b781..f5c786ddc526 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -77,9 +77,11 @@ */ static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm) { - return iwl_mvm_has_new_rx_api(mvm) ? - sizeof(struct iwl_mvm_add_sta_cmd) : - sizeof(struct iwl_mvm_add_sta_cmd_v7); + if (iwl_mvm_has_new_rx_api(mvm) || + fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) + return sizeof(struct iwl_mvm_add_sta_cmd); + else + return sizeof(struct iwl_mvm_add_sta_cmd_v7); } static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, @@ -126,6 +128,9 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, u32 status; u32 agg_size = 0, mpdu_dens = 0; + if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) + add_sta_cmd.station_type = mvm_sta->sta_type; + if (!update || (flags & STA_MODIFY_QUEUES)) { memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN); @@ -464,7 +469,7 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { if (mvmsta->tid_data[tid].state == IWL_AGG_ON) disable_agg_tids |= BIT(tid); - mvmsta->tid_data[tid].txq_id = IEEE80211_INVAL_HW_QUEUE; + mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; } mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */ @@ -495,6 +500,8 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, spin_unlock_bh(&mvm->queue_info_lock); mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); + if (WARN_ON(!mvmsta)) + return -EINVAL; disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue); /* Disable the queue */ @@ -642,7 +649,7 @@ int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, cmd.sta_id = mvm->queue_info[queue].ra_sta_id; cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac]; cmd.tid = mvm->queue_info[queue].txq_tid; - mq = mvm->queue_info[queue].hw_queue_to_mac80211; + mq = mvm->hw_queue_to_mac80211[queue]; shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1); spin_unlock_bh(&mvm->queue_info_lock); @@ -651,7 +658,7 @@ int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, /* Stop MAC queues and wait for this queue to empty */ iwl_mvm_stop_mac_queues(mvm, mq); - ret = iwl_trans_wait_tx_queue_empty(mvm->trans, BIT(queue)); + ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue)); if (ret) { IWL_ERR(mvm, "Error draining queue %d before reconfig\n", queue); @@ -730,10 +737,6 @@ static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm, mvmsta->tfd_queue_msk |= BIT(queue); spin_unlock_bh(&mvmsta->lock); - spin_lock_bh(&mvm->queue_info_lock); - mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; - spin_unlock_bh(&mvm->queue_info_lock); - return 0; } @@ -1084,7 +1087,7 @@ static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm, ac = iwl_mvm_tid_to_ac_queue(tid); mac_queue = IEEE80211_SKB_CB(skb)->hw_queue; - if (tid_data->txq_id == IEEE80211_INVAL_HW_QUEUE && + if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE && iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) { IWL_ERR(mvm, "Can't alloc TXQ for sta %d tid %d - dropping frame\n", @@ -1129,8 +1132,12 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) mutex_lock(&mvm->mutex); + /* No queue reconfiguration in TVQM mode */ + if (iwl_mvm_has_new_tx_api(mvm)) + goto alloc_queues; + /* Reconfigure queues requiring reconfiguation */ - for (queue = 0; queue < IWL_MAX_HW_QUEUES; queue++) { + for (queue = 0; queue < ARRAY_SIZE(mvm->queue_info); queue++) { bool reconfig; bool change_owner; @@ -1158,6 +1165,7 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) iwl_mvm_change_queue_owner(mvm, queue); } +alloc_queues: /* Go over all stations with deferred traffic */ for_each_set_bit(sta_id, mvm->sta_deferred_frames, IWL_MVM_STATION_COUNT) { @@ -1186,6 +1194,10 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, int queue; bool using_inactive_queue = false, same_sta = false; + /* queue reserving is disabled on new TX path */ + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return 0; + /* * Check for inactive queues, so we don't reach a situation where we * can't add a STA due to a shortage in queues that doesn't really exist @@ -1261,7 +1273,7 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, int ac; u8 mac_queue; - if (txq_id == IEEE80211_INVAL_HW_QUEUE) + if (txq_id == IWL_MVM_INVALID_QUEUE) continue; skb_queue_head_init(&tid_data->deferred_tx_frames); @@ -1292,9 +1304,8 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg, wdg_timeout); + mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY; } - - mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY; } atomic_set(&mvm->pending_frames[mvm_sta->sta_id], 0); @@ -1336,6 +1347,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF; mvm_sta->tx_protection = 0; mvm_sta->tt_tx_protection = false; + mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK; /* HW restart, don't assume the memory has been zeroed */ atomic_set(&mvm->pending_frames[sta_id], 0); @@ -1369,7 +1381,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, * Mark all queues for this STA as unallocated and defer TX * frames until the queue is allocated */ - mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE; + mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE; skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames); } mvm_sta->deferred_traffic_tid_map = 0; @@ -1385,7 +1397,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, mvm_sta->dup_data = dup_data; } - if (iwl_mvm_is_dqa_supported(mvm)) { + if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) { ret = iwl_mvm_reserve_sta_stream(mvm, sta, ieee80211_vif_type_p2p(vif)); if (ret) @@ -1568,13 +1580,13 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) { - if (mvm_sta->tid_data[i].txq_id == IEEE80211_INVAL_HW_QUEUE) + if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE) continue; ac = iwl_mvm_tid_to_ac_queue(i); iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id, vif->hw_queue[ac], i, 0); - mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE; + mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE; } } @@ -1602,8 +1614,8 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0); if (ret) return ret; - ret = iwl_trans_wait_tx_queue_empty(mvm->trans, - mvm_sta->tfd_queue_msk); + ret = iwl_trans_wait_tx_queues_empty(mvm->trans, + mvm_sta->tfd_queue_msk); if (ret) return ret; ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); @@ -1719,7 +1731,8 @@ int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm, int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta, - u32 qmask, enum nl80211_iftype iftype) + u32 qmask, enum nl80211_iftype iftype, + enum iwl_sta_type type) { if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype); @@ -1728,6 +1741,7 @@ int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, } sta->tfd_queue_msk = qmask; + sta->type = type; /* put a non-NULL value so iterating over the stations won't stop */ rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL)); @@ -1756,6 +1770,8 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, cmd.sta_id = sta->sta_id; cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id, color)); + if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) + cmd.station_type = sta->type; if (!iwl_mvm_has_new_tx_api(mvm)) cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk); @@ -1820,7 +1836,8 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) /* Allocate aux station and assign to it the aux queue */ ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue), - NL80211_IFTYPE_UNSPECIFIED); + NL80211_IFTYPE_UNSPECIFIED, + IWL_STA_AUX_ACTIVITY); if (ret) return ret; @@ -1893,7 +1910,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; const u8 *baddr = _baddr; - int queue = 0; + int queue; int ret; unsigned int wdg_timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false); @@ -1938,10 +1955,11 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) * to firmware so enable queue here - after the station was added */ if (iwl_mvm_has_new_tx_api(mvm)) { - int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0], - bsta->sta_id, - IWL_MAX_TID_COUNT, - wdg_timeout); + queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0], + bsta->sta_id, + IWL_MAX_TID_COUNT, + wdg_timeout); + if (vif->type == NL80211_IFTYPE_AP) mvm->probe_queue = queue; else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) @@ -2018,7 +2036,8 @@ int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) } return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask, - ieee80211_vif_type_p2p(vif)); + ieee80211_vif_type_p2p(vif), + IWL_STA_GENERAL_PURPOSE); } /* Allocate a new station entry for the broadcast station to the given vif, @@ -2104,6 +2123,16 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) if (WARN_ON(vif->type != NL80211_IFTYPE_AP)) return -ENOTSUPP; + /* + * While in previous FWs we had to exclude cab queue from TFD queue + * mask, now it is needed as any other queue. + */ + if (!iwl_mvm_has_new_tx_api(mvm) && + fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) { + iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0, + &cfg, timeout); + msta->tfd_queue_msk |= BIT(vif->cab_queue); + } ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr, mvmvif->id, mvmvif->color); if (ret) { @@ -2114,15 +2143,18 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) /* * Enable cab queue after the ADD_STA command is sent. * This is needed for a000 firmware which won't accept SCD_QUEUE_CFG - * command with unknown station id. + * command with unknown station id, and for FW that doesn't support + * station API since the cab queue is not included in the + * tfd_queue_mask. */ if (iwl_mvm_has_new_tx_api(mvm)) { int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue, msta->sta_id, IWL_MAX_TID_COUNT, timeout); - vif->cab_queue = queue; - } else { + mvmvif->cab_queue = queue; + } else if (!fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_STA_TYPE)) { iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0, &cfg, timeout); } @@ -2144,7 +2176,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) if (!iwl_mvm_is_dqa_supported(mvm)) return 0; - iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue, + iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue, IWL_MAX_TID_COUNT, 0); ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id); @@ -2485,10 +2517,18 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, * one and mark it as reserved * 3. In DQA mode, but no traffic yet on this TID: same treatment as in * non-DQA mode, since the TXQ hasn't yet been allocated + * Don't support case 3 for new TX path as it is not expected to happen + * and aggregation will be offloaded soon anyway */ txq_id = mvmsta->tid_data[tid].txq_id; - if (iwl_mvm_is_dqa_supported(mvm) && - unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED)) { + if (iwl_mvm_has_new_tx_api(mvm)) { + if (txq_id == IWL_MVM_INVALID_QUEUE) { + ret = -ENXIO; + goto release_locks; + } + } else if (iwl_mvm_is_dqa_supported(mvm) && + unlikely(mvm->queue_info[txq_id].status == + IWL_MVM_QUEUE_SHARED)) { ret = -ENXIO; IWL_DEBUG_TX_QUEUES(mvm, "Can't start tid %d agg on shared queue!\n", @@ -2584,6 +2624,20 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, tid_data->amsdu_in_ampdu_allowed = amsdu; spin_unlock_bh(&mvmsta->lock); + if (iwl_mvm_has_new_tx_api(mvm)) { + /* + * If no queue iwl_mvm_sta_tx_agg_start() would have failed so + * no need to check queue's status + */ + if (buf_size < mvmsta->max_agg_bufsize) + return -ENOTSUPP; + + ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); + if (ret) + return -EIO; + goto out; + } + cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; spin_lock_bh(&mvm->queue_info_lock); @@ -2602,18 +2656,11 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, */ if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) { /* - * On new TX API rs and BA manager are offloaded. - * For now though, just don't support being reconfigured - */ - if (iwl_mvm_has_new_tx_api(mvm)) - return -ENOTSUPP; - - /* * If reconfiguring an existing queue, it first must be * drained */ - ret = iwl_trans_wait_tx_queue_empty(mvm->trans, - BIT(queue)); + ret = iwl_trans_wait_tx_queues_empty(mvm->trans, + BIT(queue)); if (ret) { IWL_ERR(mvm, "Error draining queue before reconfig\n"); @@ -2648,6 +2695,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; spin_unlock_bh(&mvm->queue_info_lock); +out: /* * Even though in theory the peer could have different * aggregation reorder buffer sizes for different sessions, @@ -2665,6 +2713,27 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false); } +static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvmsta, + u16 txq_id) +{ + if (iwl_mvm_has_new_tx_api(mvm)) + return; + + spin_lock_bh(&mvm->queue_info_lock); + /* + * The TXQ is marked as reserved only if no traffic came through yet + * This means no traffic has been sent on this TID (agg'd or not), so + * we no longer have use for the queue. Since it hasn't even been + * allocated through iwl_mvm_enable_txq, so we can just mark it back as + * free. + */ + if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) + mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; + + spin_unlock_bh(&mvm->queue_info_lock); +} + int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid) { @@ -2691,18 +2760,7 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, mvmsta->agg_tids &= ~BIT(tid); - spin_lock_bh(&mvm->queue_info_lock); - /* - * The TXQ is marked as reserved only if no traffic came through yet - * This means no traffic has been sent on this TID (agg'd or not), so - * we no longer have use for the queue. Since it hasn't even been - * allocated through iwl_mvm_enable_txq, so we can just mark it back as - * free. - */ - if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) - mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; - - spin_unlock_bh(&mvm->queue_info_lock); + iwl_mvm_unreserve_agg_queue(mvm, mvmsta, txq_id); switch (tid_data->state) { case IWL_AGG_ON: @@ -2782,24 +2840,14 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, mvmsta->agg_tids &= ~BIT(tid); spin_unlock_bh(&mvmsta->lock); - spin_lock_bh(&mvm->queue_info_lock); - /* - * The TXQ is marked as reserved only if no traffic came through yet - * This means no traffic has been sent on this TID (agg'd or not), so - * we no longer have use for the queue. Since it hasn't even been - * allocated through iwl_mvm_enable_txq, so we can just mark it back as - * free. - */ - if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) - mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; - spin_unlock_bh(&mvm->queue_info_lock); + iwl_mvm_unreserve_agg_queue(mvm, mvmsta, txq_id); if (old_state >= IWL_AGG_ON) { iwl_mvm_drain_sta(mvm, mvmsta, true); if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0)) IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); - iwl_trans_wait_tx_queue_empty(mvm->trans, - mvmsta->tfd_queue_msk); + iwl_trans_wait_tx_queues_empty(mvm->trans, + mvmsta->tfd_queue_msk); iwl_mvm_drain_sta(mvm, mvmsta, false); iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); @@ -3429,13 +3477,13 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, /* Note: this is ignored by firmware not supporting GO uAPSD */ if (more_data) - cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_MOREDATA); + cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA; if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) { mvmsta->next_status_eosp = true; - cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_PS_POLL); + cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL; } else { - cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD); + cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD; } /* block the Tx queues until the FW updated the sleep Tx count */ @@ -3512,6 +3560,27 @@ void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm, spin_unlock_bh(&mvm_sta->lock); } +static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm, + struct iwl_mvm_vif *mvmvif, + struct iwl_mvm_int_sta *sta, + bool disable) +{ + u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color); + struct iwl_mvm_add_sta_cmd cmd = { + .add_modify = STA_MODE_MODIFY, + .sta_id = sta->sta_id, + .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0, + .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX), + .mac_id_n_color = cpu_to_le32(id), + }; + int ret; + + ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0, + iwl_mvm_add_sta_cmd_size(mvm), &cmd); + if (ret) + IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); +} + void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif, bool disable) @@ -3536,6 +3605,22 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable); } + + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) + return; + + /* Need to block/unblock also multicast station */ + if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA) + iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif, + &mvmvif->mcast_sta, disable); + + /* + * Only unblock the broadcast station (FW blocks it for immediate + * quiet, not the driver) + */ + if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA) + iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif, + &mvmvif->bcast_sta, disable); } void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index a143a8757e27..2716cb5483bf 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -380,6 +380,7 @@ struct iwl_mvm_rxq_dup_data { * @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for * tid. * @max_agg_bufsize: the maximal size of the AGG buffer for this station + * @sta_type: station type * @bt_reduced_txpower: is reduced tx power enabled for this station * @next_status_eosp: the next reclaimed packet is a PS-Poll response and * we need to signal the EOSP @@ -416,6 +417,7 @@ struct iwl_mvm_sta { u32 mac_id_n_color; u16 tid_disable_agg; u8 max_agg_bufsize; + enum iwl_sta_type sta_type; bool bt_reduced_txpower; bool next_status_eosp; spinlock_t lock; @@ -453,10 +455,12 @@ iwl_mvm_sta_from_mac80211(struct ieee80211_sta *sta) * struct iwl_mvm_int_sta - representation of an internal station (auxiliary or * broadcast) * @sta_id: the index of the station in the fw (will be replaced by id_n_color) + * @type: station type * @tfd_queue_msk: the tfd queues used by the station */ struct iwl_mvm_int_sta { u32 sta_id; + enum iwl_sta_type type; u32 tfd_queue_msk; }; @@ -536,7 +540,8 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta, - u32 qmask, enum nl80211_iftype iftype); + u32 qmask, enum nl80211_iftype iftype, + enum iwl_sta_type type); void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta); int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 8f737f6cdd80..bcaceb64a6e8 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -630,6 +630,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) if (queue < 0) return -1; + if (queue == info.control.vif->cab_queue) + queue = mvmvif->cab_queue; } else if (info.control.vif->type == NL80211_IFTYPE_STATION && is_multicast_ether_addr(hdr->addr1)) { u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id); @@ -918,7 +920,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, __le16 fc; u16 seq_number = 0; u8 tid = IWL_MAX_TID_COUNT; - u8 txq_id = info->hw_queue; + u16 txq_id = info->hw_queue; bool is_ampdu = false; int hdrlen; @@ -988,11 +990,11 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); /* Check if TXQ needs to be allocated or re-activated */ - if (unlikely(txq_id == IEEE80211_INVAL_HW_QUEUE || + if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE || !mvmsta->tid_data[tid].is_tid_active) && iwl_mvm_is_dqa_supported(mvm)) { /* If TXQ needs to be allocated... */ - if (txq_id == IEEE80211_INVAL_HW_QUEUE) { + if (txq_id == IWL_MVM_INVALID_QUEUE) { iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb); /* @@ -1004,6 +1006,9 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, return 0; } + /* queue should always be active in new TX path */ + WARN_ON(iwl_mvm_has_new_tx_api(mvm)); + /* If we are here - TXQ exists and needs to be re-activated */ spin_lock(&mvm->queue_info_lock); mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY; @@ -1014,7 +1019,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, txq_id); } - if (iwl_mvm_is_dqa_supported(mvm)) { + if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) { /* Keep track of the time of the last frame for this RA/TID */ mvm->queue_info[txq_id].last_frame_time[tid] = jiffies; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 1dde05697c29..8f4f176e204e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -592,15 +592,16 @@ int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq) lockdep_assert_held(&mvm->queue_info_lock); + /* This should not be hit with new TX path */ + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return -ENOSPC; + /* Start by looking for a free queue */ for (i = minq; i <= maxq; i++) if (mvm->queue_info[i].hw_queue_refcount == 0 && mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE) return i; - if (iwl_mvm_has_new_tx_api(mvm)) - return -ENOSPC; - /* * If no free queue found - settle for an inactive one to reconfigure * Make sure that the inactive queue either already belongs to this STA, @@ -670,7 +671,8 @@ static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue, if (mvm->queue_info[queue].hw_queue_refcount > 0) enable_queue = false; - mvm->queue_info[queue].hw_queue_to_mac80211 |= BIT(mac80211_queue); + mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue); + mvm->queue_info[queue].hw_queue_refcount++; mvm->queue_info[queue].tid_bitmap |= BIT(tid); mvm->queue_info[queue].ra_sta_id = sta_id; @@ -688,7 +690,7 @@ static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue, IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n", queue, mvm->queue_info[queue].hw_queue_refcount, - mvm->queue_info[queue].hw_queue_to_mac80211); + mvm->hw_queue_to_mac80211[queue]); spin_unlock_bh(&mvm->queue_info_lock); @@ -720,7 +722,10 @@ int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue, IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n", queue, sta_id, tid); - iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue, sta_id, tid); + mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue); + IWL_DEBUG_TX_QUEUES(mvm, + "Enabling TXQ #%d (mac80211 map:0x%x)\n", + queue, mvm->hw_queue_to_mac80211[queue]); return queue; } @@ -764,6 +769,17 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, .action = SCD_CFG_DISABLE_QUEUE, }; bool remove_mac_queue = true; + int ret; + + if (iwl_mvm_has_new_tx_api(mvm)) { + spin_lock_bh(&mvm->queue_info_lock); + mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac80211_queue); + spin_unlock_bh(&mvm->queue_info_lock); + + iwl_trans_txq_free(mvm->trans, queue); + + return 0; + } spin_lock_bh(&mvm->queue_info_lock); @@ -791,7 +807,7 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, } if (remove_mac_queue) - mvm->queue_info[queue].hw_queue_to_mac80211 &= + mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac80211_queue); mvm->queue_info[queue].hw_queue_refcount--; @@ -804,7 +820,7 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, "Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n", queue, mvm->queue_info[queue].hw_queue_refcount, - mvm->queue_info[queue].hw_queue_to_mac80211); + mvm->hw_queue_to_mac80211[queue]); /* If the queue is still enabled - nothing left to do in this func */ if (cmd.action == SCD_CFG_ENABLE_QUEUE) { @@ -818,39 +834,30 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, /* Make sure queue info is correct even though we overwrite it */ WARN(mvm->queue_info[queue].hw_queue_refcount || mvm->queue_info[queue].tid_bitmap || - mvm->queue_info[queue].hw_queue_to_mac80211, + mvm->hw_queue_to_mac80211[queue], "TXQ #%d info out-of-sync - refcount=%d, mac map=0x%x, tid=0x%x\n", queue, mvm->queue_info[queue].hw_queue_refcount, - mvm->queue_info[queue].hw_queue_to_mac80211, + mvm->hw_queue_to_mac80211[queue], mvm->queue_info[queue].tid_bitmap); /* If we are here - the queue is freed and we can zero out these vals */ mvm->queue_info[queue].hw_queue_refcount = 0; mvm->queue_info[queue].tid_bitmap = 0; - mvm->queue_info[queue].hw_queue_to_mac80211 = 0; + mvm->hw_queue_to_mac80211[queue] = 0; /* Regardless if this is a reserved TXQ for a STA - mark it as false */ mvm->queue_info[queue].reserved = false; spin_unlock_bh(&mvm->queue_info_lock); - if (iwl_mvm_has_new_tx_api(mvm)) { - iwl_trans_txq_free(mvm->trans, queue); - } else { - int ret; - - iwl_trans_txq_disable(mvm->trans, queue, false); - ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags, - sizeof(struct iwl_scd_txq_cfg_cmd), - &cmd); - - if (ret) - IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n", - queue, ret); - return ret; - } + iwl_trans_txq_disable(mvm->trans, queue, false); + ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags, + sizeof(struct iwl_scd_txq_cfg_cmd), &cmd); - return 0; + if (ret) + IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n", + queue, ret); + return ret; } /** @@ -1062,6 +1069,35 @@ struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm) return bss_iter_data.vif; } +struct iwl_sta_iter_data { + bool assoc; +}; + +static void iwl_mvm_sta_iface_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_sta_iter_data *data = _data; + + if (vif->type != NL80211_IFTYPE_STATION) + return; + + if (vif->bss_conf.assoc) + data->assoc = true; +} + +bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm) +{ + struct iwl_sta_iter_data data = { + .assoc = false, + }; + + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_sta_iface_iterator, + &data); + return data.assoc; +} + unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool tdls, bool cmd_q) @@ -1173,8 +1209,8 @@ static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]; - mvmsta->tid_data[tid].txq_id = IEEE80211_INVAL_HW_QUEUE; - mvm->queue_info[queue].hw_queue_to_mac80211 &= ~BIT(mac_queue); + mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; + mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue); mvm->queue_info[queue].hw_queue_refcount--; mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); mvmsta->tid_data[tid].is_tid_active = false; @@ -1194,7 +1230,7 @@ static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, */ tid_bitmap = mvm->queue_info[queue].tid_bitmap; for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { - mvm->queue_info[queue].hw_queue_to_mac80211 |= + mvm->hw_queue_to_mac80211[queue] |= BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]); } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c index 1d95512361b2..b1f43397bb59 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c @@ -251,6 +251,10 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, iwl_enable_interrupts(trans); + /* Configure debug, if exists */ + if (trans->dbg_dest_tlv) + iwl_pcie_apply_destination(trans); + /* kick FW self load */ iwl_write64(trans, CSR_CTXT_INFO_BA, trans_pcie->ctxt_info_dma_addr); iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index b9e9e10c32fa..fd4faaaa1484 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -414,9 +414,9 @@ struct iwl_trans_pcie { struct iwl_dma_ptr kw; struct iwl_txq *txq_memory; - struct iwl_txq *txq[IWL_MAX_HW_QUEUES]; - unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; - unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; + struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES]; + unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; + unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; /* PCI bus related data */ struct pci_dev *pci_dev; @@ -778,6 +778,7 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr, size_t size); void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr); +void iwl_pcie_apply_destination(struct iwl_trans *trans); /* transport gen 2 exported functions */ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index f98f2d2b8a1b..1da2de205cdf 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -1147,7 +1147,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, * Ucode should set SEQ_RX_FRAME bit if ucode-originated, * but apparently a few don't get set; catch them here. */ reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME); - if (reclaim) { + if (reclaim && !pkt->hdr.group_id) { int i; for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) { diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 91f6030529b3..70acf850a9f1 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -831,7 +831,7 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans, return 0; } -static void iwl_pcie_apply_destination(struct iwl_trans *trans) +void iwl_pcie_apply_destination(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv; @@ -2833,7 +2833,7 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans) .ref = iwl_trans_pcie_ref, \ .unref = iwl_trans_pcie_unref, \ .dump_data = iwl_trans_pcie_dump_data, \ - .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty, \ + .wait_tx_queues_empty = iwl_trans_pcie_wait_txq_empty, \ .d3_suspend = iwl_trans_pcie_d3_suspend, \ .d3_resume = iwl_trans_pcie_d3_resume @@ -2976,7 +2976,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, * PCI Tx retries from interfering with C3 CPU state */ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); - trans->dev = &pdev->dev; trans_pcie->pci_dev = pdev; iwl_disable_interrupts(trans); diff --git a/drivers/net/wireless/intersil/orinoco/main.c b/drivers/net/wireless/intersil/orinoco/main.c index 28cf97489001..d9128bb25e85 100644 --- a/drivers/net/wireless/intersil/orinoco/main.c +++ b/drivers/net/wireless/intersil/orinoco/main.c @@ -2283,7 +2283,7 @@ int orinoco_if_add(struct orinoco_private *priv, priv->ndev = dev; /* Report what we've done */ - dev_dbg(priv->dev, "Registerred interface %s.\n", dev->name); + dev_dbg(priv->dev, "Registered interface %s.\n", dev->name); return 0; diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c index 98e1380b9917..132f5fbda58b 100644 --- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c @@ -769,18 +769,31 @@ static int ezusb_submit_in_urb(struct ezusb_priv *upriv) static inline int ezusb_8051_cpucs(struct ezusb_priv *upriv, int reset) { - u8 res_val = reset; /* avoid argument promotion */ + int ret; + u8 *res_val = NULL; if (!upriv->udev) { err("%s: !upriv->udev", __func__); return -EFAULT; } - return usb_control_msg(upriv->udev, + + res_val = kmalloc(sizeof(*res_val), GFP_KERNEL); + + if (!res_val) + return -ENOMEM; + + *res_val = reset; /* avoid argument promotion */ + + ret = usb_control_msg(upriv->udev, usb_sndctrlpipe(upriv->udev, 0), EZUSB_REQUEST_FW_TRANS, USB_TYPE_VENDOR | USB_RECIP_DEVICE | - USB_DIR_OUT, EZUSB_CPUCS_REG, 0, &res_val, - sizeof(res_val), DEF_TIMEOUT); + USB_DIR_OUT, EZUSB_CPUCS_REG, 0, res_val, + sizeof(*res_val), DEF_TIMEOUT); + + kfree(res_val); + + return ret; } static int ezusb_firmware_download(struct ezusb_priv *upriv, diff --git a/drivers/net/wireless/intersil/p54/txrx.c b/drivers/net/wireless/intersil/p54/txrx.c index 1af7da0b386e..5e1c91a80c58 100644 --- a/drivers/net/wireless/intersil/p54/txrx.c +++ b/drivers/net/wireless/intersil/p54/txrx.c @@ -352,7 +352,7 @@ static int p54_rx_data(struct p54_common *priv, struct sk_buff *skb) rx_status->signal = p54_rssi_to_dbm(priv, hdr->rssi); if (hdr->rate & 0x10) - rx_status->flag |= RX_FLAG_SHORTPRE; + rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE; if (priv->hw->conf.chandef.chan->band == NL80211_BAND_5GHZ) rx_status->rate_idx = (rate < 4) ? 0 : rate - 4; else diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 0cab122669c8..87444af20fc5 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -1192,18 +1192,18 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw, if (info->control.rates[0].flags & IEEE80211_TX_RC_VHT_MCS) { rx_status.rate_idx = ieee80211_rate_get_vht_mcs(&info->control.rates[0]); - rx_status.vht_nss = + rx_status.nss = ieee80211_rate_get_vht_nss(&info->control.rates[0]); - rx_status.flag |= RX_FLAG_VHT; + rx_status.encoding = RX_ENC_VHT; } else { rx_status.rate_idx = info->control.rates[0].idx; if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS) - rx_status.flag |= RX_FLAG_HT; + rx_status.encoding = RX_ENC_HT; } if (info->control.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) - rx_status.flag |= RX_FLAG_40MHZ; + rx_status.enc_flags |= RX_ENC_FLAG_40MHZ; if (info->control.rates[0].flags & IEEE80211_TX_RC_SHORT_GI) - rx_status.flag |= RX_FLAG_SHORT_GI; + rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI; /* TODO: simulate real signal strength (and optional packet loss) */ rx_status.signal = -50; if (info->control.vif) diff --git a/drivers/net/wireless/marvell/libertas/if_spi.c b/drivers/net/wireless/marvell/libertas/if_spi.c index c3a53cd6988e..7b4955cc38db 100644 --- a/drivers/net/wireless/marvell/libertas/if_spi.c +++ b/drivers/net/wireless/marvell/libertas/if_spi.c @@ -1181,6 +1181,10 @@ static int if_spi_probe(struct spi_device *spi) /* Initialize interrupt handling stuff. */ card->workqueue = alloc_workqueue("libertas_spi", WQ_MEM_RECLAIM, 0); + if (!card->workqueue) { + err = -ENOMEM; + goto remove_card; + } INIT_WORK(&card->packet_work, if_spi_host_to_card_worker); INIT_WORK(&card->resume_work, if_spi_resume_worker); @@ -1209,6 +1213,7 @@ release_irq: free_irq(spi->irq, card); terminate_workqueue: destroy_workqueue(card->workqueue); +remove_card: lbs_remove_card(priv); /* will call free_netdev */ free_card: free_if_spi_card(card); diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 49b4c805b7d5..7ec06bf13413 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -2053,7 +2053,7 @@ mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev, struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); if (!mwifiex_stop_bg_scan(priv)) - cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy); + cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy, 0); if (mwifiex_deauthenticate(priv, NULL)) return -EFAULT; @@ -2321,7 +2321,7 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, (int)sme->ssid_len, (char *)sme->ssid, sme->bssid); if (!mwifiex_stop_bg_scan(priv)) - cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy); + cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy, 0); ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid, priv->bss_mode, sme->channel, sme, 0); @@ -2530,7 +2530,7 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, priv->scan_block = false; if (!mwifiex_stop_bg_scan(priv)) - cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy); + cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy, 0); user_scan_cfg = kzalloc(sizeof(*user_scan_cfg), GFP_KERNEL); if (!user_scan_cfg) @@ -2720,7 +2720,7 @@ mwifiex_cfg80211_sched_scan_start(struct wiphy *wiphy, * previous bgscan configuration in the firmware */ static int mwifiex_cfg80211_sched_scan_stop(struct wiphy *wiphy, - struct net_device *dev) + struct net_device *dev, u64 reqid) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); @@ -4297,7 +4297,6 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME | WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD | WIPHY_FLAG_AP_UAPSD | - WIPHY_FLAG_SUPPORTS_SCHED_SCAN | WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | WIPHY_FLAG_HAS_CHANNEL_SWITCH | WIPHY_FLAG_PS_ON_BY_DEFAULT; @@ -4316,6 +4315,7 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; + wiphy->max_sched_scan_reqs = 1; wiphy->max_sched_scan_ssids = MWIFIEX_MAX_SSID_LIST_LENGTH; wiphy->max_sched_scan_ie_len = MWIFIEX_MAX_VSIE_LEN; wiphy->max_match_sets = MWIFIEX_MAX_SSID_LIST_LENGTH; diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 976011d532d5..dd87b9ff64c3 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -748,7 +748,7 @@ mwifiex_close(struct net_device *dev) mwifiex_dbg(priv->adapter, INFO, "aborting bgscan on ndo_stop\n"); mwifiex_stop_bg_scan(priv); - cfg80211_sched_scan_stopped(priv->wdev.wiphy); + cfg80211_sched_scan_stopped(priv->wdev.wiphy, 0); } return 0; diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c index ab75da3e0c2b..f1d1f56fc23f 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c @@ -1201,7 +1201,7 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no, break; case HostCmd_CMD_802_11_BG_SCAN_QUERY: ret = mwifiex_ret_802_11_scan(priv, resp); - cfg80211_sched_scan_results(priv->wdev.wiphy); + cfg80211_sched_scan_results(priv->wdev.wiphy, 0); mwifiex_dbg(adapter, CMD, "info: CMD_RESP: BG_SCAN result is ready!\n"); break; diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c index b5b7664507eb..839df8a9634e 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_event.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c @@ -793,7 +793,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) case EVENT_BG_SCAN_STOPPED: dev_dbg(adapter->dev, "event: BGS_STOPPED\n"); - cfg80211_sched_scan_stopped(priv->wdev.wiphy); + cfg80211_sched_scan_stopped(priv->wdev.wiphy, 0); if (priv->sched_scanning) priv->sched_scanning = false; break; diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c index 1532ac9cee0b..42997e05d90f 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c @@ -560,7 +560,7 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter) #endif mwifiex_dbg(adapter, CMD, "aborting bgscan!\n"); mwifiex_stop_bg_scan(priv); - cfg80211_sched_scan_stopped(priv->wdev.wiphy); + cfg80211_sched_scan_stopped(priv->wdev.wiphy, 0); #ifdef CONFIG_PM } #endif diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c index c295a4c6e5cd..e813b2ca740c 100644 --- a/drivers/net/wireless/marvell/mwl8k.c +++ b/drivers/net/wireless/marvell/mwl8k.c @@ -994,9 +994,9 @@ mwl8k_rxd_ap_process(void *_rxd, struct ieee80211_rx_status *status, *noise = -rxd->noise_floor; if (rxd->rate & MWL8K_AP_RATE_INFO_MCS_FORMAT) { - status->flag |= RX_FLAG_HT; + status->encoding = RX_ENC_HT; if (rxd->rate & MWL8K_AP_RATE_INFO_40MHZ) - status->flag |= RX_FLAG_40MHZ; + status->bw = RATE_INFO_BW_40; status->rate_idx = MWL8K_AP_RATE_INFO_RATEID(rxd->rate); } else { int i; @@ -1011,7 +1011,7 @@ mwl8k_rxd_ap_process(void *_rxd, struct ieee80211_rx_status *status, if (rxd->channel > 14) { status->band = NL80211_BAND_5GHZ; - if (!(status->flag & RX_FLAG_HT)) + if (!(status->encoding == RX_ENC_HT)) status->rate_idx -= 5; } else { status->band = NL80211_BAND_2GHZ; @@ -1109,17 +1109,17 @@ mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status, status->rate_idx = MWL8K_STA_RATE_INFO_RATEID(rate_info); if (rate_info & MWL8K_STA_RATE_INFO_SHORTPRE) - status->flag |= RX_FLAG_SHORTPRE; + status->enc_flags |= RX_ENC_FLAG_SHORTPRE; if (rate_info & MWL8K_STA_RATE_INFO_40MHZ) - status->flag |= RX_FLAG_40MHZ; + status->bw = RATE_INFO_BW_40; if (rate_info & MWL8K_STA_RATE_INFO_SHORTGI) - status->flag |= RX_FLAG_SHORT_GI; + status->enc_flags |= RX_ENC_FLAG_SHORT_GI; if (rate_info & MWL8K_STA_RATE_INFO_MCS_FORMAT) - status->flag |= RX_FLAG_HT; + status->encoding = RX_ENC_HT; if (rxd->channel > 14) { status->band = NL80211_BAND_5GHZ; - if (!(status->flag & RX_FLAG_HT)) + if (!(status->encoding == RX_ENC_HT)) status->rate_idx -= 5; } else { status->band = NL80211_BAND_2GHZ; diff --git a/drivers/net/wireless/mediatek/mt7601u/mac.c b/drivers/net/wireless/mediatek/mt7601u/mac.c index 3c576392ed89..d6dc59bb00df 100644 --- a/drivers/net/wireless/mediatek/mt7601u/mac.c +++ b/drivers/net/wireless/mediatek/mt7601u/mac.c @@ -401,7 +401,7 @@ mt76_mac_process_rate(struct ieee80211_rx_status *status, u16 rate) case MT_PHY_TYPE_CCK: if (idx >= 8) { idx -= 8; - status->flag |= RX_FLAG_SHORTPRE; + status->enc_flags |= RX_ENC_FLAG_SHORTPRE; } if (WARN_ON(idx >= 4)) @@ -410,10 +410,10 @@ mt76_mac_process_rate(struct ieee80211_rx_status *status, u16 rate) status->rate_idx = idx; return; case MT_PHY_TYPE_HT_GF: - status->flag |= RX_FLAG_HT_GF; + status->enc_flags |= RX_ENC_FLAG_HT_GF; /* fall through */ case MT_PHY_TYPE_HT: - status->flag |= RX_FLAG_HT; + status->encoding = RX_ENC_HT; status->rate_idx = idx; break; default: @@ -422,13 +422,13 @@ mt76_mac_process_rate(struct ieee80211_rx_status *status, u16 rate) } if (rate & MT_RXWI_RATE_SGI) - status->flag |= RX_FLAG_SHORT_GI; + status->enc_flags |= RX_ENC_FLAG_SHORT_GI; if (rate & MT_RXWI_RATE_STBC) - status->flag |= 1 << RX_FLAG_STBC_SHIFT; + status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT; if (rate & MT_RXWI_RATE_BW) - status->flag |= RX_FLAG_40MHZ; + status->bw = RATE_INFO_BW_40; } static void diff --git a/drivers/net/wireless/mediatek/mt7601u/mcu.c b/drivers/net/wireless/mediatek/mt7601u/mcu.c index dbdfb3f5c507..a9f5f398b2f8 100644 --- a/drivers/net/wireless/mediatek/mt7601u/mcu.c +++ b/drivers/net/wireless/mediatek/mt7601u/mcu.c @@ -66,8 +66,10 @@ mt7601u_mcu_msg_alloc(struct mt7601u_dev *dev, const void *data, int len) WARN_ON(len % 4); /* if length is not divisible by 4 we need to pad */ skb = alloc_skb(len + MT_DMA_HDR_LEN + 4, GFP_KERNEL); - skb_reserve(skb, MT_DMA_HDR_LEN); - memcpy(skb_put(skb, len), data, len); + if (skb) { + skb_reserve(skb, MT_DMA_HDR_LEN); + memcpy(skb_put(skb, len), data, len); + } return skb; } @@ -170,6 +172,8 @@ static int mt7601u_mcu_function_select(struct mt7601u_dev *dev, }; skb = mt7601u_mcu_msg_alloc(dev, &msg, sizeof(msg)); + if (!skb) + return -ENOMEM; return mt7601u_mcu_msg_send(dev, skb, CMD_FUN_SET_OP, func == 5); } @@ -205,6 +209,8 @@ mt7601u_mcu_calibrate(struct mt7601u_dev *dev, enum mcu_calibrate cal, u32 val) }; skb = mt7601u_mcu_msg_alloc(dev, &msg, sizeof(msg)); + if (!skb) + return -ENOMEM; return mt7601u_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP, true); } diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index 8585cdc3de53..d11c7b210e81 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -889,10 +889,10 @@ void rt2800_process_rxwi(struct queue_entry *entry, rt2x00_desc_read(rxwi, 1, &word); if (rt2x00_get_field32(word, RXWI_W1_SHORT_GI)) - rxdesc->flags |= RX_FLAG_SHORT_GI; + rxdesc->enc_flags |= RX_ENC_FLAG_SHORT_GI; if (rt2x00_get_field32(word, RXWI_W1_BW)) - rxdesc->flags |= RX_FLAG_40MHZ; + rxdesc->bw = RATE_INFO_BW_40; /* * Detect RX rate, always use MCS as signal type. diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c index 5f9fa97b6088..357c0941aaad 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c @@ -825,7 +825,7 @@ void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp) rate_idx = rt2x00lib_rxdone_read_signal(rt2x00dev, &rxdesc); if (rxdesc.rate_mode == RATE_MODE_HT_MIX || rxdesc.rate_mode == RATE_MODE_HT_GREENFIELD) - rxdesc.flags |= RX_FLAG_HT; + rxdesc.encoding = RX_ENC_HT; /* * Check if this is a beacon, and more frames have been @@ -865,6 +865,9 @@ void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp) rx_status->rate_idx = rate_idx; rx_status->signal = rxdesc.rssi; rx_status->flag = rxdesc.flags; + rx_status->enc_flags = rxdesc.enc_flags; + rx_status->encoding = rxdesc.encoding; + rx_status->bw = rxdesc.bw; rx_status->antenna = rt2x00dev->link.ant.active.rx; ieee80211_rx_ni(rt2x00dev->hw, entry->skb); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h index c78fb8c8838a..6055f36211b9 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h @@ -184,6 +184,9 @@ struct rxdone_entry_desc { int flags; int dev_flags; u16 rate_mode; + u16 enc_flags; + enum mac80211_rx_encoding encoding; + enum rate_info_bw bw; u8 cipher; u8 cipher_status; diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c index e387dec82d3d..225c1c8851cc 100644 --- a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c @@ -315,7 +315,7 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev) rx_status.mactime = tsft; rx_status.flag |= RX_FLAG_MACTIME_START; if (flags & RTL818X_RX_DESC_FLAG_SPLCP) - rx_status.flag |= RX_FLAG_SHORTPRE; + rx_status.enc_flags |= RX_ENC_FLAG_SHORTPRE; if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR) rx_status.flag |= RX_FLAG_FAILED_FCS_CRC; diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c index 7dd18896d35a..35fe991dcc56 100644 --- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c @@ -389,7 +389,7 @@ static void rtl8187_rx_cb(struct urb *urb) rx_status.band = dev->conf.chandef.chan->band; rx_status.flag |= RX_FLAG_MACTIME_START; if (flags & RTL818X_RX_DESC_FLAG_SPLCP) - rx_status.flag |= RX_FLAG_SHORTPRE; + rx_status.enc_flags |= RX_ENC_FLAG_SHORTPRE; if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR) rx_status.flag |= RX_FLAG_FAILED_FCS_CRC; memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index 9b4a9a00be64..39d56313bc94 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -5041,7 +5041,7 @@ static void rtl8xxxu_rx_parse_phystats(struct rtl8xxxu_priv *priv, u32 rxmcs) { if (phy_stats->sgi_en) - rx_status->flag |= RX_FLAG_SHORT_GI; + rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; if (rxmcs < DESC_RATE_6M) { /* @@ -5267,10 +5267,10 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb) if (rx_desc->crc32) rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; if (rx_desc->bw) - rx_status->flag |= RX_FLAG_40MHZ; + rx_status->bw = RATE_INFO_BW_40; if (rx_desc->rxht) { - rx_status->flag |= RX_FLAG_HT; + rx_status->encoding = RX_ENC_HT; rx_status->rate_idx = rx_desc->rxmcs - DESC_RATE_MCS0; } else { rx_status->rate_idx = rx_desc->rxmcs; @@ -5337,10 +5337,10 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb) if (rx_desc->crc32) rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; if (rx_desc->bw) - rx_status->flag |= RX_FLAG_40MHZ; + rx_status->bw = RATE_INFO_BW_40; if (rx_desc->rxmcs >= DESC_RATE_MCS0) { - rx_status->flag |= RX_FLAG_HT; + rx_status->encoding = RX_ENC_HT; rx_status->rate_idx = rx_desc->rxmcs - DESC_RATE_MCS0; } else { rx_status->rate_idx = rx_desc->rxmcs; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c index 09c908d4cf91..dd3e12b74447 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c @@ -444,10 +444,10 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw, rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; if (status->rx_is40Mhzpacket) - rx_status->flag |= RX_FLAG_40MHZ; + rx_status->bw = RATE_INFO_BW_40; if (status->is_ht) - rx_status->flag |= RX_FLAG_HT; + rx_status->encoding = RX_ENC_HT; rx_status->flag |= RX_FLAG_MACTIME_START; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c index 3616ba21959d..94a4b39437cd 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c @@ -369,10 +369,10 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw, rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; if (stats->rx_is40Mhzpacket) - rx_status->flag |= RX_FLAG_40MHZ; + rx_status->bw = RATE_INFO_BW_40; if (stats->is_ht) - rx_status->flag |= RX_FLAG_HT; + rx_status->encoding = RX_ENC_HT; rx_status->flag |= RX_FLAG_MACTIME_START; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c index 1611e42479d9..41422e4da8b7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c @@ -329,9 +329,9 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw, if (!GET_RX_DESC_SWDEC(pdesc)) rx_status->flag |= RX_FLAG_DECRYPTED; if (GET_RX_DESC_BW(pdesc)) - rx_status->flag |= RX_FLAG_40MHZ; + rx_status->bw = RATE_INFO_BW_40; if (GET_RX_DESC_RX_HT(pdesc)) - rx_status->flag |= RX_FLAG_HT; + rx_status->encoding = RX_ENC_HT; rx_status->flag |= RX_FLAG_MACTIME_START; if (stats->decrypted) rx_status->flag |= RX_FLAG_DECRYPTED; @@ -398,9 +398,9 @@ static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb) if (!GET_RX_DESC_SWDEC(rxdesc)) rx_status->flag |= RX_FLAG_DECRYPTED; if (GET_RX_DESC_BW(rxdesc)) - rx_status->flag |= RX_FLAG_40MHZ; + rx_status->bw = RATE_INFO_BW_40; if (GET_RX_DESC_RX_HT(rxdesc)) - rx_status->flag |= RX_FLAG_HT; + rx_status->encoding = RX_ENC_HT; /* Data rate */ rx_status->rate_idx = rtlwifi_rate_mapping(hw, stats.is_ht, false, stats.rate); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c index 5c9c8741134f..86019f654428 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c @@ -503,9 +503,9 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats, if (!GET_RX_DESC_SWDEC(pdesc)) rx_status->flag |= RX_FLAG_DECRYPTED; if (GET_RX_DESC_BW(pdesc)) - rx_status->flag |= RX_FLAG_40MHZ; + rx_status->bw = RATE_INFO_BW_40; if (GET_RX_DESC_RXHT(pdesc)) - rx_status->flag |= RX_FLAG_HT; + rx_status->encoding = RX_ENC_HT; rx_status->flag |= RX_FLAG_MACTIME_START; if (stats->decrypted) rx_status->flag |= RX_FLAG_DECRYPTED; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c index 07440e9a8ca2..b1864bb07c2c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c @@ -394,10 +394,10 @@ bool rtl92ee_rx_query_desc(struct ieee80211_hw *hw, rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; if (status->rx_is40Mhzpacket) - rx_status->flag |= RX_FLAG_40MHZ; + rx_status->bw = RATE_INFO_BW_40; if (status->is_ht) - rx_status->flag |= RX_FLAG_HT; + rx_status->encoding = RX_ENC_HT; rx_status->flag |= RX_FLAG_MACTIME_START; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c index 12cef01e593b..a01dbd31d1b4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c @@ -289,10 +289,10 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats, rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; if (stats->rx_is40Mhzpacket) - rx_status->flag |= RX_FLAG_40MHZ; + rx_status->bw = RATE_INFO_BW_40; if (stats->is_ht) - rx_status->flag |= RX_FLAG_HT; + rx_status->encoding = RX_ENC_HT; rx_status->flag |= RX_FLAG_MACTIME_START; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c index c9838f52a7ea..f713c7249fed 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c @@ -317,10 +317,10 @@ bool rtl8723e_rx_query_desc(struct ieee80211_hw *hw, rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; if (status->rx_is40Mhzpacket) - rx_status->flag |= RX_FLAG_40MHZ; + rx_status->bw = RATE_INFO_BW_40; if (status->is_ht) - rx_status->flag |= RX_FLAG_HT; + rx_status->encoding = RX_ENC_HT; rx_status->flag |= RX_FLAG_MACTIME_START; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c index 6f65003a895a..3c6ce994c6aa 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c @@ -373,10 +373,10 @@ bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw, rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; if (status->rx_is40Mhzpacket) - rx_status->flag |= RX_FLAG_40MHZ; + rx_status->bw = RATE_INFO_BW_40; if (status->is_ht) - rx_status->flag |= RX_FLAG_HT; + rx_status->encoding = RX_ENC_HT; rx_status->flag |= RX_FLAG_MACTIME_START; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c index 94a5e587a1cd..aa3ccc740521 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c @@ -358,6 +358,107 @@ bool rtl8821ae_phy_rf_config(struct ieee80211_hw *hw) return rtl8821ae_phy_rf6052_config(hw); } +static void _rtl8812ae_phy_set_rfe_reg_24g(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 tmp; + + switch (rtlhal->rfe_type) { + case 3: + rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x54337770); + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x54337770); + rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010); + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010); + rtl_set_bbreg(hw, 0x900, 0x00000303, 0x1); + break; + case 4: + rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77777777); + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777); + rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x001); + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x001); + break; + case 5: + rtl_write_byte(rtlpriv, RA_RFE_PINMUX + 2, 0x77); + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777); + tmp = rtl_read_byte(rtlpriv, RA_RFE_INV + 3); + rtl_write_byte(rtlpriv, RA_RFE_INV + 3, tmp & ~0x1); + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000); + break; + case 1: + if (rtlpriv->btcoexist.bt_coexistence) { + rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xffffff, 0x777777); + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, + 0x77777777); + rtl_set_bbreg(hw, RA_RFE_INV, 0x33f00000, 0x000); + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000); + break; + } + case 0: + case 2: + default: + rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77777777); + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777); + rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x000); + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000); + break; + } +} + +static void _rtl8812ae_phy_set_rfe_reg_5g(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 tmp; + + switch (rtlhal->rfe_type) { + case 0: + rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77337717); + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337717); + rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010); + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010); + break; + case 1: + if (rtlpriv->btcoexist.bt_coexistence) { + rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xffffff, 0x337717); + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, + 0x77337717); + rtl_set_bbreg(hw, RA_RFE_INV, 0x33f00000, 0x000); + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000); + } else { + rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, + 0x77337717); + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, + 0x77337717); + rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x000); + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000); + } + break; + case 3: + rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x54337717); + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x54337717); + rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010); + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010); + rtl_set_bbreg(hw, 0x900, 0x00000303, 0x1); + break; + case 5: + rtl_write_byte(rtlpriv, RA_RFE_PINMUX + 2, 0x33); + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337777); + tmp = rtl_read_byte(rtlpriv, RA_RFE_INV + 3); + rtl_write_byte(rtlpriv, RA_RFE_INV + 3, tmp | 0x1); + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010); + break; + case 2: + case 4: + default: + rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77337777); + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337777); + rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010); + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010); + break; + } +} + u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8 band, u8 rf_path) { @@ -552,14 +653,9 @@ void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band) /* 0x82C[1:0] = 2b'00 */ rtl_set_bbreg(hw, 0x82c, 0x3, 0); } - if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { - rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, - 0x77777777); - rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, - 0x77777777); - rtl_set_bbreg(hw, RA_RFE_INV, 0x3ff00000, 0x000); - rtl_set_bbreg(hw, RB_RFE_INV, 0x3ff00000, 0x000); - } + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + _rtl8812ae_phy_set_rfe_reg_24g(hw); rtl_set_bbreg(hw, RTXPATH, 0xf0, 0x1); rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, 0x1); @@ -614,14 +710,8 @@ void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band) /* 0x82C[1:0] = 2'b00 */ rtl_set_bbreg(hw, 0x82c, 0x3, 1); - if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { - rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, - 0x77337777); - rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, - 0x77337777); - rtl_set_bbreg(hw, RA_RFE_INV, 0x3ff00000, 0x010); - rtl_set_bbreg(hw, RB_RFE_INV, 0x3ff00000, 0x010); - } + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + _rtl8812ae_phy_set_rfe_reg_5g(hw); rtl_set_bbreg(hw, RTXPATH, 0xf0, 0); rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, 0xf); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h index 1d6110f9c1fb..ed69dbe178ff 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h @@ -2424,6 +2424,7 @@ #define BMASKH4BITS 0xf0000000 #define BMASKOFDM_D 0xffc00000 #define BMASKCCK 0x3f3f3f3f +#define BMASKRFEINV 0x3ff00000 #define BRFREGOFFSETMASK 0xfffff diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c index 108098152cf3..03665e82065f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c @@ -520,18 +520,18 @@ bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw, rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; if (status->rx_packet_bw == HT_CHANNEL_WIDTH_20_40) - rx_status->flag |= RX_FLAG_40MHZ; + rx_status->bw = RATE_INFO_BW_40; else if (status->rx_packet_bw == HT_CHANNEL_WIDTH_80) - rx_status->vht_flag |= RX_VHT_FLAG_80MHZ; + rx_status->bw = RATE_INFO_BW_80; if (status->is_ht) - rx_status->flag |= RX_FLAG_HT; + rx_status->encoding = RX_ENC_HT; if (status->is_vht) - rx_status->flag |= RX_FLAG_VHT; + rx_status->encoding = RX_ENC_VHT; if (status->is_short_gi) - rx_status->flag |= RX_FLAG_SHORT_GI; + rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; - rx_status->vht_nss = status->vht_nss; + rx_status->nss = status->vht_nss; rx_status->flag |= RX_FLAG_MACTIME_START; /* hw will set status->decrypted true, if it finds the diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index eb513628d801..9935bd09db1f 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -2830,15 +2830,22 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev) } if (priv->infra_mode == NDIS_80211_INFRA_INFRA) { - if (!roamed) + if (!roamed) { cfg80211_connect_result(usbdev->net, bssid, req_ie, req_ie_len, resp_ie, resp_ie_len, 0, GFP_KERNEL); - else - cfg80211_roamed(usbdev->net, - get_current_channel(usbdev, NULL), - bssid, req_ie, req_ie_len, - resp_ie, resp_ie_len, GFP_KERNEL); + } else { + struct cfg80211_roam_info roam_info = { + .channel = get_current_channel(usbdev, NULL), + .bssid = bssid, + .req_ie = req_ie, + .req_ie_len = req_ie_len, + .resp_ie = resp_ie, + .resp_ie_len = resp_ie_len, + }; + + cfg80211_roamed(usbdev->net, &roam_info, GFP_KERNEL); + } } else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC) cfg80211_ibss_joined(usbdev->net, bssid, get_current_channel(usbdev, NULL), @@ -3428,6 +3435,10 @@ static int rndis_wlan_bind(struct usbnet *usbdev, struct usb_interface *intf) /* because rndis_command() sleeps we need to use workqueue */ priv->workqueue = create_singlethread_workqueue("rndis_wlan"); + if (!priv->workqueue) { + wiphy_free(wiphy); + return -ENOMEM; + } INIT_WORK(&priv->work, rndis_wlan_worker); INIT_DELAYED_WORK(&priv->dev_poller_work, rndis_device_poller); INIT_DELAYED_WORK(&priv->scan_work, rndis_get_scan_results); diff --git a/drivers/net/wireless/st/cw1200/txrx.c b/drivers/net/wireless/st/cw1200/txrx.c index 3d170287cd0b..cd63ffef025a 100644 --- a/drivers/net/wireless/st/cw1200/txrx.c +++ b/drivers/net/wireless/st/cw1200/txrx.c @@ -1085,7 +1085,7 @@ void cw1200_rx_cb(struct cw1200_common *priv, hdr->band); if (arg->rx_rate >= 14) { - hdr->flag |= RX_FLAG_HT; + hdr->encoding = RX_ENC_HT; hdr->rate_idx = arg->rx_rate - 14; } else if (arg->rx_rate >= 4) { hdr->rate_idx = arg->rx_rate - 2; diff --git a/drivers/net/wireless/ti/wl1251/rx.c b/drivers/net/wireless/ti/wl1251/rx.c index a27d4c22b6e8..50fb2a4a5259 100644 --- a/drivers/net/wireless/ti/wl1251/rx.c +++ b/drivers/net/wireless/ti/wl1251/rx.c @@ -141,7 +141,7 @@ static void wl1251_rx_status(struct wl1251 *wl, } if (desc->mod_pre & SHORT_PREAMBLE_BIT) - status->flag |= RX_FLAG_SHORTPRE; + status->enc_flags |= RX_ENC_FLAG_SHORTPRE; } static void wl1251_rx_body(struct wl1251 *wl, diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index a21fda910529..382ec15ec1af 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -6128,6 +6128,7 @@ static int wl1271_init_ieee80211(struct wl1271 *wl) wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE - sizeof(struct ieee80211_header); + wl->hw->wiphy->max_sched_scan_reqs = 1; wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE - sizeof(struct ieee80211_header); @@ -6135,7 +6136,6 @@ static int wl1271_init_ieee80211(struct wl1271 *wl) wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD | WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | - WIPHY_FLAG_SUPPORTS_SCHED_SCAN | WIPHY_FLAG_HAS_CHANNEL_SWITCH; wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN; diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c index b9e14045195f..52a55f9acd80 100644 --- a/drivers/net/wireless/ti/wlcore/rx.c +++ b/drivers/net/wireless/ti/wlcore/rx.c @@ -72,7 +72,7 @@ static void wl1271_rx_status(struct wl1271 *wl, /* 11n support */ if (desc->rate <= wl->hw_min_ht_rate) - status->flag |= RX_FLAG_HT; + status->encoding = RX_ENC_HT; /* * Read the signal level and antenna diversity indication. diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c index cbb3388a9756..178f6f5d4613 100644 --- a/drivers/staging/wlan-ng/cfg80211.c +++ b/drivers/staging/wlan-ng/cfg80211.c @@ -666,8 +666,11 @@ void prism2_disconnected(struct wlandevice *wlandev) void prism2_roamed(struct wlandevice *wlandev) { - cfg80211_roamed(wlandev->netdev, NULL, wlandev->bssid, - NULL, 0, NULL, 0, GFP_KERNEL); + struct cfg80211_roam_info roam_info = { + .bssid = wlandev->bssid, + }; + + cfg80211_roamed(wlandev->netdev, &roam_info, GFP_KERNEL); } /* Structures for declaring wiphy interface */ diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 294fa6273a62..69033353d0d1 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -7,7 +7,7 @@ * Copyright (c) 2005, Devicescape Software, Inc. * Copyright (c) 2006, Michael Wu <[email protected]> * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright (c) 2016 Intel Deutschland GmbH + * Copyright (c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -2177,37 +2177,37 @@ enum ieee80211_tdls_actioncode { #define WLAN_BSS_COEX_INFORMATION_REQUEST BIT(0) /** - * enum - mesh synchronization method identifier + * enum ieee80211_mesh_sync_method - mesh synchronization method identifier * * @IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET: the default synchronization method * @IEEE80211_SYNC_METHOD_VENDOR: a vendor specific synchronization method * that will be specified in a vendor specific information element */ -enum { +enum ieee80211_mesh_sync_method { IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET = 1, IEEE80211_SYNC_METHOD_VENDOR = 255, }; /** - * enum - mesh path selection protocol identifier + * enum ieee80211_mesh_path_protocol - mesh path selection protocol identifier * * @IEEE80211_PATH_PROTOCOL_HWMP: the default path selection protocol * @IEEE80211_PATH_PROTOCOL_VENDOR: a vendor specific protocol that will * be specified in a vendor specific information element */ -enum { +enum ieee80211_mesh_path_protocol { IEEE80211_PATH_PROTOCOL_HWMP = 1, IEEE80211_PATH_PROTOCOL_VENDOR = 255, }; /** - * enum - mesh path selection metric identifier + * enum ieee80211_mesh_path_metric - mesh path selection metric identifier * * @IEEE80211_PATH_METRIC_AIRTIME: the default path selection metric * @IEEE80211_PATH_METRIC_VENDOR: a vendor specific metric that will be * specified in a vendor specific information element */ -enum { +enum ieee80211_mesh_path_metric { IEEE80211_PATH_METRIC_AIRTIME = 1, IEEE80211_PATH_METRIC_VENDOR = 255, }; @@ -2316,6 +2316,32 @@ struct ieee80211_timeout_interval_ie { __le32 value; } __packed; +/** + * enum ieee80211_idle_options - BSS idle options + * @WLAN_IDLE_OPTIONS_PROTECTED_KEEP_ALIVE: the station should send an RSN + * protected frame to the AP to reset the idle timer at the AP for + * the station. + */ +enum ieee80211_idle_options { + WLAN_IDLE_OPTIONS_PROTECTED_KEEP_ALIVE = BIT(0), +}; + +/** + * struct ieee80211_bss_max_idle_period_ie + * + * This structure refers to "BSS Max idle period element" + * + * @max_idle_period: indicates the time period during which a station can + * refrain from transmitting frames to its associated AP without being + * disassociated. In units of 1000 TUs. + * @idle_options: indicates the options associated with the BSS idle capability + * as specified in &enum ieee80211_idle_options. + */ +struct ieee80211_bss_max_idle_period_ie { + __le16 max_idle_period; + u8 idle_options; +} __packed; + /* BACK action code */ enum ieee80211_back_actioncode { WLAN_ACTION_ADDBA_REQ = 0, @@ -2356,18 +2382,21 @@ enum ieee80211_sa_query_action { #define WLAN_CIPHER_SUITE_SMS4 SUITE(0x001472, 1) /* AKM suite selectors */ -#define WLAN_AKM_SUITE_8021X SUITE(0x000FAC, 1) -#define WLAN_AKM_SUITE_PSK SUITE(0x000FAC, 2) -#define WLAN_AKM_SUITE_FT_PSK SUITE(0x000FAC, 4) -#define WLAN_AKM_SUITE_8021X_SHA256 SUITE(0x000FAC, 5) -#define WLAN_AKM_SUITE_PSK_SHA256 SUITE(0x000FAC, 6) -#define WLAN_AKM_SUITE_TDLS SUITE(0x000FAC, 7) -#define WLAN_AKM_SUITE_SAE SUITE(0x000FAC, 8) -#define WLAN_AKM_SUITE_FT_OVER_SAE SUITE(0x000FAC, 9) -#define WLAN_AKM_SUITE_FILS_SHA256 SUITE(0x000FAC, 14) -#define WLAN_AKM_SUITE_FILS_SHA384 SUITE(0x000FAC, 15) -#define WLAN_AKM_SUITE_FT_FILS_SHA256 SUITE(0x000FAC, 16) -#define WLAN_AKM_SUITE_FT_FILS_SHA384 SUITE(0x000FAC, 17) +#define WLAN_AKM_SUITE_8021X SUITE(0x000FAC, 1) +#define WLAN_AKM_SUITE_PSK SUITE(0x000FAC, 2) +#define WLAN_AKM_SUITE_FT_8021X SUITE(0x000FAC, 3) +#define WLAN_AKM_SUITE_FT_PSK SUITE(0x000FAC, 4) +#define WLAN_AKM_SUITE_8021X_SHA256 SUITE(0x000FAC, 5) +#define WLAN_AKM_SUITE_PSK_SHA256 SUITE(0x000FAC, 6) +#define WLAN_AKM_SUITE_TDLS SUITE(0x000FAC, 7) +#define WLAN_AKM_SUITE_SAE SUITE(0x000FAC, 8) +#define WLAN_AKM_SUITE_FT_OVER_SAE SUITE(0x000FAC, 9) +#define WLAN_AKM_SUITE_8021X_SUITE_B SUITE(0x000FAC, 11) +#define WLAN_AKM_SUITE_8021X_SUITE_B_192 SUITE(0x000FAC, 12) +#define WLAN_AKM_SUITE_FILS_SHA256 SUITE(0x000FAC, 14) +#define WLAN_AKM_SUITE_FILS_SHA384 SUITE(0x000FAC, 15) +#define WLAN_AKM_SUITE_FT_FILS_SHA256 SUITE(0x000FAC, 16) +#define WLAN_AKM_SUITE_FT_FILS_SHA384 SUITE(0x000FAC, 17) #define WLAN_MAX_KEY_LEN 32 diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index c5847dc75a93..0c16866a7aac 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -48,6 +48,7 @@ struct br_ip_list { #define BR_MCAST_FLOOD BIT(11) #define BR_MULTICAST_TO_UNICAST BIT(12) #define BR_VLAN_TUNNEL BIT(13) +#define BR_BCAST_FLOOD BIT(14) #define BR_DEFAULT_AGEING_TIME (300 * HZ) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 8c5c8cdc7b97..6847714a5ae3 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3408,10 +3408,10 @@ static inline void netif_dormant_off(struct net_device *dev) } /** - * netif_dormant - test if carrier present + * netif_dormant - test if device is dormant * @dev: network device * - * Check if carrier is present on device + * Check if device is dormant. */ static inline bool netif_dormant(const struct net_device *dev) { diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index ae93b65d13d7..45f89369c4c8 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -155,6 +155,7 @@ struct rhashtable_params { * @nelems: Number of elements in table * @key_len: Key length for hashfn * @p: Configuration parameters + * @max_elems: Maximum number of elements in table * @rhlist: True if this is an rhltable * @run_work: Deferred worker to expand/shrink asynchronously * @mutex: Mutex to protect current/future table swapping @@ -165,6 +166,7 @@ struct rhashtable { atomic_t nelems; unsigned int key_len; struct rhashtable_params p; + unsigned int max_elems; bool rhlist; struct work_struct run_work; struct mutex mutex; @@ -327,8 +329,7 @@ static inline bool rht_grow_above_100(const struct rhashtable *ht, static inline bool rht_grow_above_max(const struct rhashtable *ht, const struct bucket_table *tbl) { - return ht->p.max_size && - (atomic_read(&ht->nelems) / 2u) >= ht->p.max_size; + return atomic_read(&ht->nelems) >= ht->max_elems; } /* The bucket lock is selected based on the hash and protects mutations diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 2a200b964b7a..6e90f1a4950f 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1613,11 +1613,15 @@ static inline void get_random_mask_addr(u8 *buf, const u8 *addr, const u8 *mask) /** * struct cfg80211_match_set - sets of attributes to match * - * @ssid: SSID to be matched; may be zero-length for no match (RSSI only) + * @ssid: SSID to be matched; may be zero-length in case of BSSID match + * or no match (RSSI only) + * @bssid: BSSID to be matched; may be all-zero BSSID in case of SSID match + * or no match (RSSI only) * @rssi_thold: don't report scan results below this threshold (in s32 dBm) */ struct cfg80211_match_set { struct cfg80211_ssid ssid; + u8 bssid[ETH_ALEN]; s32 rssi_thold; }; @@ -1662,6 +1666,7 @@ struct cfg80211_bss_select_adjust { * (others are filtered out). * If ommited, all results are passed. * @n_match_sets: number of match sets + * @results_wk: worker for processing results notification. * @wiphy: the wiphy this was for * @dev: the interface * @scan_start: start time of the scheduled scan @@ -1678,6 +1683,8 @@ struct cfg80211_bss_select_adjust { * @rcu_head: RCU callback used to free the struct * @owner_nlportid: netlink portid of owner (if this should is a request * owned by a particular socket) + * @nl_owner_dead: netlink owner socket was closed - this request be freed + * @list: for keeping list of requests. * @delay: delay in seconds to use before starting the first scan * cycle. The driver may ignore this parameter and start * immediately (or at any other time), if this feature is not @@ -1720,8 +1727,11 @@ struct cfg80211_sched_scan_request { struct wiphy *wiphy; struct net_device *dev; unsigned long scan_start; + bool report_results; struct rcu_head rcu_head; u32 owner_nlportid; + bool nl_owner_dead; + struct list_head list; /* keep last */ struct ieee80211_channel *channels[0]; @@ -2678,8 +2688,7 @@ struct cfg80211_nan_func { * indication of requesting reassociation. * In both the driver-initiated and new connect() call initiated roaming * cases, the result of roaming is indicated with a call to - * cfg80211_roamed() or cfg80211_roamed_bss(). - * (invoked with the wireless_dev mutex held) + * cfg80211_roamed(). (invoked with the wireless_dev mutex held) * @update_connect_params: Update the connect parameters while connected to a * BSS. The updated parameters can be used by driver/firmware for * subsequent BSS selection (roaming) decisions and to form the @@ -2765,12 +2774,12 @@ struct cfg80211_nan_func { * @set_cqm_txe_config: Configure connection quality monitor TX error * thresholds. * @sched_scan_start: Tell the driver to start a scheduled scan. - * @sched_scan_stop: Tell the driver to stop an ongoing scheduled scan. This - * call must stop the scheduled scan and be ready for starting a new one - * before it returns, i.e. @sched_scan_start may be called immediately - * after that again and should not fail in that case. The driver should - * not call cfg80211_sched_scan_stopped() for a requested stop (when this - * method returns 0.) + * @sched_scan_stop: Tell the driver to stop an ongoing scheduled scan with + * given request id. This call must stop the scheduled scan and be ready + * for starting a new one before it returns, i.e. @sched_scan_start may be + * called immediately after that again and should not fail in that case. + * The driver should not call cfg80211_sched_scan_stopped() for a requested + * stop (when this method returns 0). * * @mgmt_frame_register: Notify driver that a management frame type was * registered. The callback is allowed to sleep. @@ -3068,7 +3077,8 @@ struct cfg80211_ops { int (*sched_scan_start)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_sched_scan_request *request); - int (*sched_scan_stop)(struct wiphy *wiphy, struct net_device *dev); + int (*sched_scan_stop)(struct wiphy *wiphy, struct net_device *dev, + u64 reqid); int (*set_rekey_data)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_gtk_rekey_data *data); @@ -3213,7 +3223,7 @@ enum wiphy_flags { WIPHY_FLAG_CONTROL_PORT_PROTOCOL = BIT(7), WIPHY_FLAG_IBSS_RSN = BIT(8), WIPHY_FLAG_MESH_AUTH = BIT(10), - WIPHY_FLAG_SUPPORTS_SCHED_SCAN = BIT(11), + /* use hole at 11 */ /* use hole at 12 */ WIPHY_FLAG_SUPPORTS_FW_ROAM = BIT(13), WIPHY_FLAG_AP_UAPSD = BIT(14), @@ -3551,6 +3561,8 @@ struct wiphy_iftype_ext_capab { * this variable determines its size * @max_scan_ssids: maximum number of SSIDs the device can scan for in * any given scan + * @max_sched_scan_reqs: maximum number of scheduled scan requests that + * the device can run concurrently. * @max_sched_scan_ssids: maximum number of SSIDs the device can scan * for in any given scheduled scan * @max_match_sets: maximum number of match sets the device can handle @@ -3687,6 +3699,7 @@ struct wiphy { int bss_priv_size; u8 max_scan_ssids; + u8 max_sched_scan_reqs; u8 max_sched_scan_ssids; u8 max_match_sets; u16 max_scan_ie_len; @@ -3988,6 +4001,7 @@ struct cfg80211_cqm_config; * @event_list: (private) list for internal event processing * @event_lock: (private) lock for event list * @owner_nlportid: (private) owner socket port ID + * @nl_owner_dead: (private) owner socket went away * @cqm_config: (private) nl80211 RSSI monitor state */ struct wireless_dev { @@ -4037,12 +4051,13 @@ struct wireless_dev { u32 ap_unexpected_nlportid; + u32 owner_nlportid; + bool nl_owner_dead; + bool cac_started; unsigned long cac_start_time; unsigned int cac_time_ms; - u32 owner_nlportid; - #ifdef CONFIG_CFG80211_WEXT /* wext data */ struct { @@ -4551,31 +4566,34 @@ void cfg80211_scan_done(struct cfg80211_scan_request *request, * cfg80211_sched_scan_results - notify that new scan results are available * * @wiphy: the wiphy which got scheduled scan results + * @reqid: identifier for the related scheduled scan request */ -void cfg80211_sched_scan_results(struct wiphy *wiphy); +void cfg80211_sched_scan_results(struct wiphy *wiphy, u64 reqid); /** * cfg80211_sched_scan_stopped - notify that the scheduled scan has stopped * * @wiphy: the wiphy on which the scheduled scan stopped + * @reqid: identifier for the related scheduled scan request * * The driver can call this function to inform cfg80211 that the * scheduled scan had to be stopped, for whatever reason. The driver * is then called back via the sched_scan_stop operation when done. */ -void cfg80211_sched_scan_stopped(struct wiphy *wiphy); +void cfg80211_sched_scan_stopped(struct wiphy *wiphy, u64 reqid); /** * cfg80211_sched_scan_stopped_rtnl - notify that the scheduled scan has stopped * * @wiphy: the wiphy on which the scheduled scan stopped + * @reqid: identifier for the related scheduled scan request * * The driver can call this function to inform cfg80211 that the * scheduled scan had to be stopped, for whatever reason. The driver * is then called back via the sched_scan_stop operation when done. * This function should be called with rtnl locked. */ -void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy); +void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy, u64 reqid); /** * cfg80211_inform_bss_frame_data - inform cfg80211 of a received BSS frame @@ -5376,51 +5394,46 @@ cfg80211_connect_timeout(struct net_device *dev, const u8 *bssid, } /** - * cfg80211_roamed - notify cfg80211 of roaming + * struct cfg80211_roam_info - driver initiated roaming information * - * @dev: network device * @channel: the channel of the new AP - * @bssid: the BSSID of the new AP + * @bss: entry of bss to which STA got roamed (may be %NULL if %bssid is set) + * @bssid: the BSSID of the new AP (may be %NULL if %bss is set) * @req_ie: association request IEs (maybe be %NULL) * @req_ie_len: association request IEs length * @resp_ie: association response IEs (may be %NULL) * @resp_ie_len: assoc response IEs length - * @gfp: allocation flags - * - * It should be called by the underlying driver whenever it roamed - * from one AP to another while connected. */ -void cfg80211_roamed(struct net_device *dev, - struct ieee80211_channel *channel, - const u8 *bssid, - const u8 *req_ie, size_t req_ie_len, - const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp); +struct cfg80211_roam_info { + struct ieee80211_channel *channel; + struct cfg80211_bss *bss; + const u8 *bssid; + const u8 *req_ie; + size_t req_ie_len; + const u8 *resp_ie; + size_t resp_ie_len; +}; /** - * cfg80211_roamed_bss - notify cfg80211 of roaming + * cfg80211_roamed - notify cfg80211 of roaming * * @dev: network device - * @bss: entry of bss to which STA got roamed - * @req_ie: association request IEs (maybe be %NULL) - * @req_ie_len: association request IEs length - * @resp_ie: association response IEs (may be %NULL) - * @resp_ie_len: assoc response IEs length + * @info: information about the new BSS. struct &cfg80211_roam_info. * @gfp: allocation flags * - * This is just a wrapper to notify cfg80211 of roaming event with driver - * passing bss to avoid a race in timeout of the bss entry. It should be - * called by the underlying driver whenever it roamed from one AP to another - * while connected. Drivers which have roaming implemented in firmware - * may use this function to avoid a race in bss entry timeout where the bss - * entry of the new AP is seen in the driver, but gets timed out by the time - * it is accessed in __cfg80211_roamed() due to delay in scheduling + * This function may be called with the driver passing either the BSSID of the + * new AP or passing the bss entry to avoid a race in timeout of the bss entry. + * It should be called by the underlying driver whenever it roamed from one AP + * to another while connected. Drivers which have roaming implemented in + * firmware should pass the bss entry to avoid a race in bss entry timeout where + * the bss entry of the new AP is seen in the driver, but gets timed out by the + * time it is accessed in __cfg80211_roamed() due to delay in scheduling * rdev->event_work. In case of any failures, the reference is released - * either in cfg80211_roamed_bss() or in __cfg80211_romed(), Otherwise, - * it will be released while diconneting from the current bss. + * either in cfg80211_roamed() or in __cfg80211_romed(), Otherwise, it will be + * released while diconneting from the current bss. */ -void cfg80211_roamed_bss(struct net_device *dev, struct cfg80211_bss *bss, - const u8 *req_ie, size_t req_ie_len, - const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp); +void cfg80211_roamed(struct net_device *dev, struct cfg80211_roam_info *info, + gfp_t gfp); /** * cfg80211_disconnected - notify cfg80211 that connection was dropped diff --git a/include/net/mac80211.h b/include/net/mac80211.h index b1ac872dc88a..4d05a9443344 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -5,7 +5,7 @@ * Copyright 2006-2007 Jiri Benc <[email protected]> * Copyright 2007-2010 Johannes Berg <[email protected]> * Copyright 2013-2014 Intel Mobile Communications GmbH - * Copyright (C) 2015 - 2016 Intel Deutschland GmbH + * Copyright (C) 2015 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -299,6 +299,8 @@ struct ieee80211_vif_chanctx_switch { * context had been assigned. * @BSS_CHANGED_OCB: OCB join status changed * @BSS_CHANGED_MU_GROUPS: VHT MU-MIMO group id or user position changed + * @BSS_CHANGED_KEEP_ALIVE: keep alive options (idle period or protected + * keep alive) changed. */ enum ieee80211_bss_change { BSS_CHANGED_ASSOC = 1<<0, @@ -325,6 +327,7 @@ enum ieee80211_bss_change { BSS_CHANGED_BANDWIDTH = 1<<21, BSS_CHANGED_OCB = 1<<22, BSS_CHANGED_MU_GROUPS = 1<<23, + BSS_CHANGED_KEEP_ALIVE = 1<<24, /* when adding here, make sure to change ieee80211_reconfig */ }; @@ -533,6 +536,13 @@ struct ieee80211_mu_group_data { * @allow_p2p_go_ps: indication for AP or P2P GO interface, whether it's allowed * to use P2P PS mechanism or not. AP/P2P GO is not allowed to use P2P PS * if it has associated clients without P2P PS support. + * @max_idle_period: the time period during which the station can refrain from + * transmitting frames to its associated AP without being disassociated. + * In units of 1000 TUs. Zero value indicates that the AP did not include + * a (valid) BSS Max Idle Period Element. + * @protected_keep_alive: if set, indicates that the station should send an RSN + * protected frame to the AP to reset the idle timer at the AP for the + * station. */ struct ieee80211_bss_conf { const u8 *bssid; @@ -573,6 +583,8 @@ struct ieee80211_bss_conf { enum nl80211_tx_power_setting txpower_type; struct ieee80211_p2p_noa_attr p2p_noa_attr; bool allow_p2p_go_ps; + u16 max_idle_period; + bool protected_keep_alive; }; /** @@ -949,6 +961,19 @@ struct ieee80211_tx_info { }; /** + * struct ieee80211_tx_status - extended tx staus info for rate control + * + * @sta: Station that the packet was transmitted for + * @info: Basic tx status information + * @skb: Packet skb (can be NULL if not provided by the driver) + */ +struct ieee80211_tx_status { + struct ieee80211_sta *sta; + struct ieee80211_tx_info *info; + struct sk_buff *skb; +}; + +/** * struct ieee80211_scan_ies - descriptors for different blocks of IEs * * This structure is used to point to different blocks of IEs in HW scan @@ -1045,16 +1070,8 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) * (including FCS) was received. * @RX_FLAG_MACTIME_PLCP_START: The timestamp passed in the RX status (@mactime * field) is valid and contains the time the SYNC preamble was received. - * @RX_FLAG_SHORTPRE: Short preamble was used for this frame - * @RX_FLAG_HT: HT MCS was used and rate_idx is MCS index - * @RX_FLAG_VHT: VHT MCS was used and rate_index is MCS index - * @RX_FLAG_40MHZ: HT40 (40 MHz) was used - * @RX_FLAG_SHORT_GI: Short guard interval was used * @RX_FLAG_NO_SIGNAL_VAL: The signal strength value is not present. * Valid only for data frames (mainly A-MPDU) - * @RX_FLAG_HT_GF: This frame was received in a HT-greenfield transmission, if - * the driver fills this value it should add %IEEE80211_RADIOTAP_MCS_HAVE_FMT - * to hw.radiotap_mcs_details to advertise that fact * @RX_FLAG_AMPDU_DETAILS: A-MPDU details are known, in particular the reference * number (@ampdu_reference) must be populated and be a distinct number for * each A-MPDU @@ -1067,7 +1084,6 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) * is stored in the @ampdu_delimiter_crc field) * @RX_FLAG_MIC_STRIPPED: The mic was stripped of this packet. Decryption was * done by the hardware - * @RX_FLAG_LDPC: LDPC was used * @RX_FLAG_ONLY_MONITOR: Report frame only to monitor interfaces without * processing it in any regular way. * This is useful if drivers offload some frames but still want to report @@ -1076,9 +1092,6 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) * monitor interfaces. * This is useful if drivers offload some frames but still want to report * them for sniffing purposes. - * @RX_FLAG_STBC_MASK: STBC 2 bit bitmask. 1 - Nss=1, 2 - Nss=2, 3 - Nss=3 - * @RX_FLAG_10MHZ: 10 MHz (half channel) was used - * @RX_FLAG_5MHZ: 5 MHz (quarter channel) was used * @RX_FLAG_AMSDU_MORE: Some drivers may prefer to report separate A-MSDU * subframes instead of a one huge frame for performance reasons. * All, but the last MSDU from an A-MSDU should have this flag set. E.g. @@ -1106,50 +1119,54 @@ enum mac80211_rx_flags { RX_FLAG_FAILED_FCS_CRC = BIT(5), RX_FLAG_FAILED_PLCP_CRC = BIT(6), RX_FLAG_MACTIME_START = BIT(7), - RX_FLAG_SHORTPRE = BIT(8), - RX_FLAG_HT = BIT(9), - RX_FLAG_40MHZ = BIT(10), - RX_FLAG_SHORT_GI = BIT(11), - RX_FLAG_NO_SIGNAL_VAL = BIT(12), - RX_FLAG_HT_GF = BIT(13), - RX_FLAG_AMPDU_DETAILS = BIT(14), - RX_FLAG_PN_VALIDATED = BIT(15), - RX_FLAG_DUP_VALIDATED = BIT(16), - RX_FLAG_AMPDU_LAST_KNOWN = BIT(17), - RX_FLAG_AMPDU_IS_LAST = BIT(18), - RX_FLAG_AMPDU_DELIM_CRC_ERROR = BIT(19), - RX_FLAG_AMPDU_DELIM_CRC_KNOWN = BIT(20), - RX_FLAG_MACTIME_END = BIT(21), - RX_FLAG_VHT = BIT(22), - RX_FLAG_LDPC = BIT(23), - RX_FLAG_ONLY_MONITOR = BIT(24), - RX_FLAG_SKIP_MONITOR = BIT(25), - RX_FLAG_STBC_MASK = BIT(26) | BIT(27), - RX_FLAG_10MHZ = BIT(28), - RX_FLAG_5MHZ = BIT(29), - RX_FLAG_AMSDU_MORE = BIT(30), - RX_FLAG_RADIOTAP_VENDOR_DATA = BIT(31), - RX_FLAG_MIC_STRIPPED = BIT_ULL(32), - RX_FLAG_ALLOW_SAME_PN = BIT_ULL(33), - RX_FLAG_ICV_STRIPPED = BIT_ULL(34), + RX_FLAG_NO_SIGNAL_VAL = BIT(8), + RX_FLAG_AMPDU_DETAILS = BIT(9), + RX_FLAG_PN_VALIDATED = BIT(10), + RX_FLAG_DUP_VALIDATED = BIT(11), + RX_FLAG_AMPDU_LAST_KNOWN = BIT(12), + RX_FLAG_AMPDU_IS_LAST = BIT(13), + RX_FLAG_AMPDU_DELIM_CRC_ERROR = BIT(14), + RX_FLAG_AMPDU_DELIM_CRC_KNOWN = BIT(15), + RX_FLAG_MACTIME_END = BIT(16), + RX_FLAG_ONLY_MONITOR = BIT(17), + RX_FLAG_SKIP_MONITOR = BIT(18), + RX_FLAG_AMSDU_MORE = BIT(19), + RX_FLAG_RADIOTAP_VENDOR_DATA = BIT(20), + RX_FLAG_MIC_STRIPPED = BIT(21), + RX_FLAG_ALLOW_SAME_PN = BIT(22), + RX_FLAG_ICV_STRIPPED = BIT(23), }; -#define RX_FLAG_STBC_SHIFT 26 - /** - * enum mac80211_rx_vht_flags - receive VHT flags + * enum mac80211_rx_encoding_flags - MCS & bandwidth flags * - * These flags are used with the @vht_flag member of - * &struct ieee80211_rx_status. - * @RX_VHT_FLAG_80MHZ: 80 MHz was used - * @RX_VHT_FLAG_160MHZ: 160 MHz was used - * @RX_VHT_FLAG_BF: packet was beamformed - */ + * @RX_ENC_FLAG_SHORTPRE: Short preamble was used for this frame + * @RX_ENC_FLAG_40MHZ: HT40 (40 MHz) was used + * @RX_ENC_FLAG_SHORT_GI: Short guard interval was used + * @RX_ENC_FLAG_HT_GF: This frame was received in a HT-greenfield transmission, + * if the driver fills this value it should add + * %IEEE80211_RADIOTAP_MCS_HAVE_FMT + * to hw.radiotap_mcs_details to advertise that fact + * @RX_ENC_FLAG_LDPC: LDPC was used + * @RX_ENC_FLAG_STBC_MASK: STBC 2 bit bitmask. 1 - Nss=1, 2 - Nss=2, 3 - Nss=3 + * @RX_ENC_FLAG_BF: packet was beamformed + */ +enum mac80211_rx_encoding_flags { + RX_ENC_FLAG_SHORTPRE = BIT(0), + RX_ENC_FLAG_40MHZ = BIT(1), + RX_ENC_FLAG_SHORT_GI = BIT(2), + RX_ENC_FLAG_HT_GF = BIT(3), + RX_ENC_FLAG_STBC_MASK = BIT(4) | BIT(5), + RX_ENC_FLAG_LDPC = BIT(6), + RX_ENC_FLAG_BF = BIT(7), +}; -enum mac80211_rx_vht_flags { - RX_VHT_FLAG_80MHZ = BIT(0), - RX_VHT_FLAG_160MHZ = BIT(1), - RX_VHT_FLAG_BF = BIT(2), +#define RX_ENC_FLAG_STBC_SHIFT 4 + +enum mac80211_rx_encoding { + RX_ENC_LEGACY = 0, + RX_ENC_HT, + RX_ENC_VHT, }; /** @@ -1179,9 +1196,11 @@ enum mac80211_rx_vht_flags { * @antenna: antenna used * @rate_idx: index of data rate into band's supported rates or MCS index if * HT or VHT is used (%RX_FLAG_HT/%RX_FLAG_VHT) - * @vht_nss: number of streams (VHT only) + * @nss: number of streams (VHT and HE only) * @flag: %RX_FLAG_\* - * @vht_flag: %RX_VHT_FLAG_\* + * @encoding: &enum mac80211_rx_encoding + * @bw: &enum rate_info_bw + * @enc_flags: uses bits from &enum mac80211_rx_encoding_flags * @rx_flags: internal RX flags for mac80211 * @ampdu_reference: A-MPDU reference number, must be a different value for * each A-MPDU but the same for each subframe within one A-MPDU @@ -1192,11 +1211,12 @@ struct ieee80211_rx_status { u64 boottime_ns; u32 device_timestamp; u32 ampdu_reference; - u64 flag; + u32 flag; u16 freq; - u8 vht_flag; + u8 enc_flags; + u8 encoding:2, bw:3; u8 rate_idx; - u8 vht_nss; + u8 nss; u8 rx_flags; u8 band; u8 antenna; @@ -4206,6 +4226,23 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb); /** + * ieee80211_tx_status_ext - extended transmit status callback + * + * This function can be used as a replacement for ieee80211_tx_status + * in drivers that may want to provide extra information that does not + * fit into &struct ieee80211_tx_info. + * + * Calls to this function for a single hardware must be synchronized + * against each other. Calls to this function, ieee80211_tx_status_ni() + * and ieee80211_tx_status_irqsafe() may not be mixed for a single hardware. + * + * @hw: the hardware the frame was transmitted by + * @status: tx status information + */ +void ieee80211_tx_status_ext(struct ieee80211_hw *hw, + struct ieee80211_tx_status *status); + +/** * ieee80211_tx_status_noskb - transmit status callback without skb * * This function can be used as a replacement for ieee80211_tx_status @@ -4221,9 +4258,17 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, * (NULL for multicast packets) * @info: tx status information */ -void ieee80211_tx_status_noskb(struct ieee80211_hw *hw, - struct ieee80211_sta *sta, - struct ieee80211_tx_info *info); +static inline void ieee80211_tx_status_noskb(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + struct ieee80211_tx_info *info) +{ + struct ieee80211_tx_status status = { + .sta = sta, + .info = info, + }; + + ieee80211_tx_status_ext(hw, &status); +} /** * ieee80211_tx_status_ni - transmit status callback (in process context) @@ -5476,10 +5521,9 @@ struct rate_control_ops { void (*free_sta)(void *priv, struct ieee80211_sta *sta, void *priv_sta); - void (*tx_status_noskb)(void *priv, - struct ieee80211_supported_band *sband, - struct ieee80211_sta *sta, void *priv_sta, - struct ieee80211_tx_info *info); + void (*tx_status_ext)(void *priv, + struct ieee80211_supported_band *sband, + void *priv_sta, struct ieee80211_tx_status *st); void (*tx_status)(void *priv, struct ieee80211_supported_band *sband, struct ieee80211_sta *sta, void *priv_sta, struct sk_buff *skb); diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index e553529929f6..945a1f5f63c5 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -481,8 +481,7 @@ union bpf_attr { * u32 bpf_get_socket_uid(skb) * Get the owner uid of the socket stored inside sk_buff. * @skb: pointer to skb - * Return: uid of the socket owner on success or 0 if the socket pointer - * inside sk_buff is NULL + * Return: uid of the socket owner on success or overflowuid if failed. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 633aa0276d32..8e56ac70e0d1 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -323,6 +323,7 @@ enum { IFLA_BRPORT_MCAST_FLOOD, IFLA_BRPORT_MCAST_TO_UCAST, IFLA_BRPORT_VLAN_TUNNEL, + IFLA_BRPORT_BCAST_FLOOD, __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 6095a6c4c412..b8c44b98f12d 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -387,7 +387,9 @@ * are used. Extra IEs can also be passed from the userspace by * using the %NL80211_ATTR_IE attribute. The first cycle of the * scheduled scan can be delayed by %NL80211_ATTR_SCHED_SCAN_DELAY - * is supplied. + * is supplied. If the device supports multiple concurrent scheduled + * scans, it will allow such when the caller provides the flag attribute + * %NL80211_ATTR_SCHED_SCAN_MULTI to indicate user-space support for it. * @NL80211_CMD_STOP_SCHED_SCAN: stop a scheduled scan. Returns -ENOENT if * scheduled scan is not running. The caller may assume that as soon * as the call returns, it is safe to start a new scheduled scan again. @@ -2081,6 +2083,11 @@ enum nl80211_commands { * @NL80211_ATTR_PMK: PMK for the PMKSA identified by %NL80211_ATTR_PMKID. * This is used with @NL80211_CMD_SET_PMKSA. * + * @NL80211_ATTR_SCHED_SCAN_MULTI: flag attribute which user-space shall use to + * indicate that it supports multiple active scheduled scan requests. + * @NL80211_ATTR_SCHED_SCAN_MAX_REQS: indicates maximum number of scheduled + * scan request that may be active for the device (u32). + * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use @@ -2500,6 +2507,9 @@ enum nl80211_attrs { NL80211_ATTR_PMK, + NL80211_ATTR_SCHED_SCAN_MULTI, + NL80211_ATTR_SCHED_SCAN_MAX_REQS, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -3184,6 +3194,7 @@ enum nl80211_reg_rule_attr { * @__NL80211_SCHED_SCAN_MATCH_ATTR_INVALID: attribute number 0 is reserved * @NL80211_SCHED_SCAN_MATCH_ATTR_SSID: SSID to be used for matching, * only report BSS with matching SSID. + * (This cannot be used together with BSSID.) * @NL80211_SCHED_SCAN_MATCH_ATTR_RSSI: RSSI threshold (in dBm) for reporting a * BSS in scan results. Filtering is turned off if not specified. Note that * if this attribute is in a match set of its own, then it is treated as @@ -3199,6 +3210,8 @@ enum nl80211_reg_rule_attr { * BSS-es in the specified band is to be adjusted before doing * RSSI-based BSS selection. The attribute value is a packed structure * value as specified by &struct nl80211_bss_select_rssi_adjust. + * @NL80211_SCHED_SCAN_MATCH_ATTR_BSSID: BSSID to be used for matching + * (this cannot be used together with SSID). * @NL80211_SCHED_SCAN_MATCH_ATTR_MAX: highest scheduled scan filter * attribute number currently defined * @__NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST: internal use @@ -3210,6 +3223,7 @@ enum nl80211_sched_scan_match_attr { NL80211_SCHED_SCAN_MATCH_ATTR_RSSI, NL80211_SCHED_SCAN_MATCH_ATTR_RELATIVE_RSSI, NL80211_SCHED_SCAN_MATCH_ATTR_RSSI_ADJUST, + NL80211_SCHED_SCAN_MATCH_ATTR_BSSID, /* keep last */ __NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST, diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index b4f1cb0c5ac7..6f81e0f5a0fa 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -394,27 +394,23 @@ static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp) void bpf_prog_kallsyms_add(struct bpf_prog *fp) { - unsigned long flags; - if (!bpf_prog_kallsyms_candidate(fp) || !capable(CAP_SYS_ADMIN)) return; - spin_lock_irqsave(&bpf_lock, flags); + spin_lock_bh(&bpf_lock); bpf_prog_ksym_node_add(fp->aux); - spin_unlock_irqrestore(&bpf_lock, flags); + spin_unlock_bh(&bpf_lock); } void bpf_prog_kallsyms_del(struct bpf_prog *fp) { - unsigned long flags; - if (!bpf_prog_kallsyms_candidate(fp)) return; - spin_lock_irqsave(&bpf_lock, flags); + spin_lock_bh(&bpf_lock); bpf_prog_ksym_node_del(fp->aux); - spin_unlock_irqrestore(&bpf_lock, flags); + spin_unlock_bh(&bpf_lock); } static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr) diff --git a/lib/rhashtable.c b/lib/rhashtable.c index f3b82e0d417b..3895486ef551 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -958,8 +958,14 @@ int rhashtable_init(struct rhashtable *ht, if (params->min_size) ht->p.min_size = roundup_pow_of_two(params->min_size); - if (params->max_size) + /* Cap total entries at 2^31 to avoid nelems overflow. */ + ht->max_elems = 1u << 31; + + if (params->max_size) { ht->p.max_size = rounddown_pow_of_two(params->max_size); + if (ht->p.max_size < ht->max_elems / 2) + ht->max_elems = ht->p.max_size * 2; + } ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE); diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 902af6ba481c..48fb17417fac 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -183,13 +183,23 @@ void br_flood(struct net_bridge *br, struct sk_buff *skb, struct net_bridge_port *p; list_for_each_entry_rcu(p, &br->port_list, list) { - /* Do not flood unicast traffic to ports that turn it off */ - if (pkt_type == BR_PKT_UNICAST && !(p->flags & BR_FLOOD)) - continue; - /* Do not flood if mc off, except for traffic we originate */ - if (pkt_type == BR_PKT_MULTICAST && - !(p->flags & BR_MCAST_FLOOD) && skb->dev != br->dev) - continue; + /* Do not flood unicast traffic to ports that turn it off, nor + * other traffic if flood off, except for traffic we originate + */ + switch (pkt_type) { + case BR_PKT_UNICAST: + if (!(p->flags & BR_FLOOD)) + continue; + break; + case BR_PKT_MULTICAST: + if (!(p->flags & BR_MCAST_FLOOD) && skb->dev != br->dev) + continue; + break; + case BR_PKT_BROADCAST: + if (!(p->flags & BR_BCAST_FLOOD) && skb->dev != br->dev) + continue; + break; + } /* Do not flood to ports that enable proxy ARP */ if (p->flags & BR_PROXYARP) diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index f3544d96155c..7f8d05cf9065 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -361,7 +361,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br, p->path_cost = port_cost(dev); p->priority = 0x8000 >> BR_PORT_BITS; p->port_no = index; - p->flags = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD; + p->flags = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; br_init_port(p); br_set_state(p, BR_STATE_DISABLED); br_stp_port_timer_init(p); diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 650986473577..a572db710d4e 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -189,6 +189,8 @@ static int br_port_fill_attrs(struct sk_buff *skb, !!(p->flags & BR_FLOOD)) || nla_put_u8(skb, IFLA_BRPORT_MCAST_FLOOD, !!(p->flags & BR_MCAST_FLOOD)) || + nla_put_u8(skb, IFLA_BRPORT_BCAST_FLOOD, + !!(p->flags & BR_BCAST_FLOOD)) || nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)) || nla_put_u8(skb, IFLA_BRPORT_PROXYARP_WIFI, !!(p->flags & BR_PROXYARP_WIFI)) || @@ -683,6 +685,7 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[]) br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD); br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD); br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_TO_UCAST, BR_MULTICAST_TO_UNICAST); + br_set_port_flag(p, tb, IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD); br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP); br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI); diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c index 79aee759aba5..5d5d413a6cf8 100644 --- a/net/bridge/br_sysfs_if.c +++ b/net/bridge/br_sysfs_if.c @@ -173,6 +173,7 @@ BRPORT_ATTR_FLAG(unicast_flood, BR_FLOOD); BRPORT_ATTR_FLAG(proxyarp, BR_PROXYARP); BRPORT_ATTR_FLAG(proxyarp_wifi, BR_PROXYARP_WIFI); BRPORT_ATTR_FLAG(multicast_flood, BR_MCAST_FLOOD); +BRPORT_ATTR_FLAG(broadcast_flood, BR_BCAST_FLOOD); #ifdef CONFIG_BRIDGE_IGMP_SNOOPING static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf) @@ -221,6 +222,7 @@ static const struct brport_attribute *brport_attrs[] = { &brport_attr_proxyarp, &brport_attr_proxyarp_wifi, &brport_attr_multicast_flood, + &brport_attr_broadcast_flood, NULL }; diff --git a/net/core/dev.c b/net/core/dev.c index 3361ee87fcc2..8371a01eee87 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -7245,13 +7245,10 @@ void netif_stacked_transfer_operstate(const struct net_device *rootdev, else netif_dormant_off(dev); - if (netif_carrier_ok(rootdev)) { - if (!netif_carrier_ok(dev)) - netif_carrier_on(dev); - } else { - if (netif_carrier_ok(dev)) - netif_carrier_off(dev); - } + if (netif_carrier_ok(rootdev)) + netif_carrier_on(dev); + else + netif_carrier_off(dev); } EXPORT_SYMBOL(netif_stacked_transfer_operstate); diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index c58c1df6f92b..f21c4d3aeae0 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -440,6 +440,7 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, if (tb[FRA_TUN_ID]) rule->tun_id = nla_get_be64(tb[FRA_TUN_ID]); + err = -EINVAL; if (tb[FRA_L3MDEV]) { #ifdef CONFIG_NET_L3_MASTER_DEV rule->l3mdev = nla_get_u8(tb[FRA_L3MDEV]); @@ -461,7 +462,6 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, else rule->suppress_ifgroup = -1; - err = -EINVAL; if (tb[FRA_GOTO]) { if (rule->action != FR_ACT_GOTO) goto errout_free; @@ -592,8 +592,10 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, if (tb[FRA_UID_RANGE]) { range = nla_get_kuid_range(tb); - if (!uid_range_set(&range)) + if (!uid_range_set(&range)) { + err = -EINVAL; goto errout; + } } else { range = fib_kuid_range_unset; } diff --git a/net/core/filter.c b/net/core/filter.c index 9a37860a80fc..a253a6197e6b 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -98,8 +98,8 @@ int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap) skb->sk = sk; pkt_len = bpf_prog_run_save_cb(filter->prog, skb); - err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM; skb->sk = save_sk; + err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM; } rcu_read_unlock(); diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 7e501adb5042..7f2caf71212b 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -356,11 +356,8 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * ivlen = crypto_aead_ivsize(aead); tmp = esp_alloc_tmp(aead, esp->nfrags + 2, extralen); - if (!tmp) { - spin_unlock_bh(&x->lock); - err = -ENOMEM; + if (!tmp) goto error; - } extra = esp_tmp_extra(tmp); iv = esp_tmp_iv(aead, tmp, extralen); @@ -389,7 +386,6 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * spin_lock_bh(&x->lock); if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { spin_unlock_bh(&x->lock); - err = -ENOMEM; goto error; } diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 5a0e456b5d58..39bd1edee676 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -1130,7 +1130,8 @@ static void fib_disable_ip(struct net_device *dev, unsigned long event, { if (fib_sync_down_dev(dev, event, force)) fib_flush(dev_net(dev)); - rt_cache_flush(dev_net(dev)); + else + rt_cache_flush(dev_net(dev)); arp_ifdown(dev); } diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 059dad7deefe..1e4c76d2b827 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -533,7 +533,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) if (tp->urg_data & TCP_URG_VALID) mask |= POLLPRI; - } else if (sk->sk_state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) { + } else if (state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) { /* Active TCP fastopen socket with defer_connect * Return POLLOUT so application can call write() * in order for kernel to generate SYN+data diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c index cd72b3d3879e..362b8c75bfab 100644 --- a/net/ipv4/tcp_recovery.c +++ b/net/ipv4/tcp_recovery.c @@ -166,6 +166,7 @@ void tcp_rack_reo_timeout(struct sock *sk) u32 timeout, prior_inflight; prior_inflight = tcp_packets_in_flight(tp); + skb_mstamp_get(&tp->tcp_mstamp); tcp_rack_detect_loss(sk, &timeout); if (prior_inflight != tcp_packets_in_flight(tp)) { if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) { diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 8b55abf1c45b..1fe99ba8066c 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -330,11 +330,8 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info ivlen = crypto_aead_ivsize(aead); tmp = esp_alloc_tmp(aead, esp->nfrags + 2, seqhilen); - if (!tmp) { - spin_unlock_bh(&x->lock); - err = -ENOMEM; + if (!tmp) goto error; - } seqhi = esp_tmp_seqhi(tmp); iv = esp_tmp_iv(aead, tmp, seqhilen); @@ -362,7 +359,6 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info spin_lock_bh(&x->lock); if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { spin_unlock_bh(&x->lock); - err = -ENOMEM; goto error; } diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index 59aba8aeac03..8b21af7321b9 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c @@ -280,12 +280,6 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p } if (cfg->ifname) { - dev = dev_get_by_name(net, cfg->ifname); - if (dev) { - dev_put(dev); - rc = -EEXIST; - goto out; - } strlcpy(name, cfg->ifname, IFNAMSIZ); name_assign_type = NET_NAME_USER; } else { diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index d041f78ecee6..6c2e6060cd54 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -660,10 +660,11 @@ void sta_set_rate_info_tx(struct sta_info *sta, int shift = ieee80211_vif_get_shift(&sta->sdata->vif); u16 brate; - sband = sta->local->hw.wiphy->bands[ - ieee80211_get_sdata_band(sta->sdata)]; - brate = sband->bitrates[rate->idx].bitrate; - rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift); + sband = ieee80211_get_sband(sta->sdata); + if (sband) { + brate = sband->bitrates[rate->idx].bitrate; + rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift); + } } if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) rinfo->bw = RATE_INFO_BW_40; @@ -739,11 +740,8 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy, return 0; mutex_lock(&local->mtx); - mutex_lock(&local->iflist_mtx); if (local->use_chanctx) { - sdata = rcu_dereference_protected( - local->monitor_sdata, - lockdep_is_held(&local->iflist_mtx)); + sdata = rtnl_dereference(local->monitor_sdata); if (sdata) { ieee80211_vif_release_channel(sdata); ret = ieee80211_vif_use_channel(sdata, chandef, @@ -756,7 +754,6 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy, if (ret == 0) local->monitor_chandef = *chandef; - mutex_unlock(&local->iflist_mtx); mutex_unlock(&local->mtx); return ret; @@ -1257,10 +1254,11 @@ static int sta_apply_parameters(struct ieee80211_local *local, int ret = 0; struct ieee80211_supported_band *sband; struct ieee80211_sub_if_data *sdata = sta->sdata; - enum nl80211_band band = ieee80211_get_sdata_band(sdata); u32 mask, set; - sband = local->hw.wiphy->bands[band]; + sband = ieee80211_get_sband(sdata); + if (!sband) + return -EINVAL; mask = params->sta_flags_mask; set = params->sta_flags_set; @@ -1393,7 +1391,7 @@ static int sta_apply_parameters(struct ieee80211_local *local, ieee80211_parse_bitrates(&sdata->vif.bss_conf.chandef, sband, params->supported_rates, params->supported_rates_len, - &sta->sta.supp_rates[band]); + &sta->sta.supp_rates[sband->band]); } if (params->ht_capa) @@ -1409,8 +1407,8 @@ static int sta_apply_parameters(struct ieee80211_local *local, /* returned value is only needed for rc update, but the * rc isn't initialized here yet, so ignore it */ - __ieee80211_vht_handle_opmode(sdata, sta, - params->opmode_notif, band); + __ieee80211_vht_handle_opmode(sdata, sta, params->opmode_notif, + sband->band); } if (params->support_p2p_ps >= 0) @@ -2048,13 +2046,15 @@ static int ieee80211_change_bss(struct wiphy *wiphy, struct bss_parameters *params) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); - enum nl80211_band band; + struct ieee80211_supported_band *sband; u32 changed = 0; if (!sdata_dereference(sdata->u.ap.beacon, sdata)) return -ENOENT; - band = ieee80211_get_sdata_band(sdata); + sband = ieee80211_get_sband(sdata); + if (!sband) + return -EINVAL; if (params->use_cts_prot >= 0) { sdata->vif.bss_conf.use_cts_prot = params->use_cts_prot; @@ -2067,7 +2067,7 @@ static int ieee80211_change_bss(struct wiphy *wiphy, } if (!sdata->vif.bss_conf.use_short_slot && - band == NL80211_BAND_5GHZ) { + sband->band == NL80211_BAND_5GHZ) { sdata->vif.bss_conf.use_short_slot = true; changed |= BSS_CHANGED_ERP_SLOT; } @@ -2080,7 +2080,7 @@ static int ieee80211_change_bss(struct wiphy *wiphy, if (params->basic_rates) { ieee80211_parse_bitrates(&sdata->vif.bss_conf.chandef, - wiphy->bands[band], + wiphy->bands[sband->band], params->basic_rates, params->basic_rates_len, &sdata->vif.bss_conf.basic_rates); @@ -2242,7 +2242,8 @@ ieee80211_sched_scan_start(struct wiphy *wiphy, } static int -ieee80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev) +ieee80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev, + u64 reqid) { struct ieee80211_local *local = wiphy_priv(wiphy); diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index e957351976a2..6db09fa18269 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -992,7 +992,7 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata, enum nl80211_band band = rx_status->band; enum nl80211_bss_scan_width scan_width; struct ieee80211_local *local = sdata->local; - struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band]; + struct ieee80211_supported_band *sband; bool rates_updated = false; u32 supp_rates = 0; @@ -1002,6 +1002,10 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata, if (!ether_addr_equal(mgmt->bssid, sdata->u.ibss.bssid)) return; + sband = local->hw.wiphy->bands[band]; + if (WARN_ON(!sband)) + return; + rcu_read_lock(); sta = sta_info_get(sdata, mgmt->sa); @@ -1014,9 +1018,9 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata, prev_rates = sta->sta.supp_rates[band]; /* make sure mandatory rates are always added */ scan_width = NL80211_BSS_CHAN_WIDTH_20; - if (rx_status->flag & RX_FLAG_5MHZ) + if (rx_status->bw == RATE_INFO_BW_5) scan_width = NL80211_BSS_CHAN_WIDTH_5; - if (rx_status->flag & RX_FLAG_10MHZ) + else if (rx_status->bw == RATE_INFO_BW_10) scan_width = NL80211_BSS_CHAN_WIDTH_10; sta->sta.supp_rates[band] = supp_rates | diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index cf6d5abb65a3..f8f6c148f554 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1001,21 +1001,6 @@ sdata_assert_lock(struct ieee80211_sub_if_data *sdata) lockdep_assert_held(&sdata->wdev.mtx); } -static inline enum nl80211_band -ieee80211_get_sdata_band(struct ieee80211_sub_if_data *sdata) -{ - enum nl80211_band band = NL80211_BAND_2GHZ; - struct ieee80211_chanctx_conf *chanctx_conf; - - rcu_read_lock(); - chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); - if (!WARN_ON(!chanctx_conf)) - band = chanctx_conf->def.chan->band; - rcu_read_unlock(); - - return band; -} - static inline int ieee80211_chandef_get_shift(struct cfg80211_chan_def *chandef) { @@ -1421,6 +1406,27 @@ IEEE80211_WDEV_TO_SUB_IF(struct wireless_dev *wdev) return container_of(wdev, struct ieee80211_sub_if_data, wdev); } +static inline struct ieee80211_supported_band * +ieee80211_get_sband(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_chanctx_conf *chanctx_conf; + enum nl80211_band band; + + rcu_read_lock(); + chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); + + if (WARN_ON(!chanctx_conf)) { + rcu_read_unlock(); + return NULL; + } + + band = chanctx_conf->def.chan->band; + rcu_read_unlock(); + + return local->hw.wiphy->bands[band]; +} + /* this struct represents 802.11n's RA/TID combination */ struct ieee80211_ra_tid { u8 ra[ETH_ALEN]; @@ -1477,6 +1483,7 @@ struct ieee802_11_elems { const u8 *opmode_notif; const struct ieee80211_sec_chan_offs_ie *sec_chan_offs; const struct ieee80211_mesh_chansw_params_ie *mesh_chansw_params_ie; + const struct ieee80211_bss_max_idle_period_ie *max_idle_period_ie; /* length of them, respectively */ u8 ext_capab_len; @@ -1530,9 +1537,9 @@ ieee80211_have_rx_timestamp(struct ieee80211_rx_status *status) status->flag & RX_FLAG_MACTIME_END); if (status->flag & (RX_FLAG_MACTIME_START | RX_FLAG_MACTIME_END)) return true; - /* can't handle HT/VHT preamble yet */ + /* can't handle non-legacy preamble yet */ if (status->flag & RX_FLAG_MACTIME_PLCP_START && - !(status->flag & (RX_FLAG_HT | RX_FLAG_VHT))) + status->encoding != RX_ENC_LEGACY) return true; return false; } diff --git a/net/mac80211/main.c b/net/mac80211/main.c index ae408a96c407..8aa1f5b6a051 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -253,6 +253,7 @@ static void ieee80211_restart_work(struct work_struct *work) WARN(test_bit(SCAN_HW_SCANNING, &local->scanning), "%s called with hardware scan in progress\n", __func__); + flush_work(&local->radar_detected_work); rtnl_lock(); list_for_each_entry(sdata, &local->interfaces, list) flush_delayed_work(&sdata->dec_tailroom_needed_wk); @@ -1187,6 +1188,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) cancel_work_sync(&local->reconfig_filter); cancel_work_sync(&local->tdls_chsw_work); flush_work(&local->sched_scan_stopped_work); + flush_work(&local->radar_detected_work); ieee80211_clear_tx_pending(local); rate_control_deinitialize(local); diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 281d834c7548..737e1f082b0d 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -63,6 +63,7 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata, struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; u32 basic_rates = 0; struct cfg80211_chan_def sta_chan_def; + struct ieee80211_supported_band *sband; /* * As support for each feature is added, check for matching @@ -83,7 +84,11 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata, (ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth))) return false; - ieee80211_sta_get_rates(sdata, ie, ieee80211_get_sdata_band(sdata), + sband = ieee80211_get_sband(sdata); + if (!sband) + return false; + + ieee80211_sta_get_rates(sdata, ie, sband->band, &basic_rates); if (sdata->vif.bss_conf.basic_rates != basic_rates) @@ -399,12 +404,13 @@ static int mesh_add_ds_params_ie(struct ieee80211_sub_if_data *sdata, int mesh_add_ht_cap_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { - struct ieee80211_local *local = sdata->local; - enum nl80211_band band = ieee80211_get_sdata_band(sdata); struct ieee80211_supported_band *sband; u8 *pos; - sband = local->hw.wiphy->bands[band]; + sband = ieee80211_get_sband(sdata); + if (!sband) + return -EINVAL; + if (!sband->ht_cap.ht_supported || sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT || sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 || @@ -462,12 +468,13 @@ int mesh_add_ht_oper_ie(struct ieee80211_sub_if_data *sdata, int mesh_add_vht_cap_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { - struct ieee80211_local *local = sdata->local; - enum nl80211_band band = ieee80211_get_sdata_band(sdata); struct ieee80211_supported_band *sband; u8 *pos; - sband = local->hw.wiphy->bands[band]; + sband = ieee80211_get_sband(sdata); + if (!sband) + return -EINVAL; + if (!sband->vht_cap.vht_supported || sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT || sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 || @@ -916,12 +923,16 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata, struct cfg80211_csa_settings params; struct ieee80211_csa_ie csa_ie; struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; - enum nl80211_band band = ieee80211_get_sdata_band(sdata); + struct ieee80211_supported_band *sband; int err; u32 sta_flags; sdata_assert_lock(sdata); + sband = ieee80211_get_sband(sdata); + if (!sband) + return false; + sta_flags = IEEE80211_STA_DISABLE_VHT; switch (sdata->vif.bss_conf.chandef.width) { case NL80211_CHAN_WIDTH_20_NOHT: @@ -935,7 +946,7 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata, memset(¶ms, 0, sizeof(params)); memset(&csa_ie, 0, sizeof(csa_ie)); - err = ieee80211_parse_ch_switch_ie(sdata, elems, band, + err = ieee80211_parse_ch_switch_ie(sdata, elems, sband->band, sta_flags, sdata->vif.addr, &csa_ie); if (err < 0) diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 953d71e784a9..1131cd504a15 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -95,19 +95,23 @@ static inline void mesh_plink_fsm_restart(struct sta_info *sta) static u32 mesh_set_short_slot_time(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; - enum nl80211_band band = ieee80211_get_sdata_band(sdata); - struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band]; + struct ieee80211_supported_band *sband; struct sta_info *sta; u32 erp_rates = 0, changed = 0; int i; bool short_slot = false; - if (band == NL80211_BAND_5GHZ) { + sband = ieee80211_get_sband(sdata); + if (!sband) + return changed; + + if (sband->band == NL80211_BAND_5GHZ) { /* (IEEE 802.11-2012 19.4.5) */ short_slot = true; goto out; - } else if (band != NL80211_BAND_2GHZ) + } else if (sband->band != NL80211_BAND_2GHZ) { goto out; + } for (i = 0; i < sband->n_bitrates; i++) if (sband->bitrates[i].flags & IEEE80211_RATE_ERP_G) @@ -123,7 +127,7 @@ static u32 mesh_set_short_slot_time(struct ieee80211_sub_if_data *sdata) continue; short_slot = false; - if (erp_rates & sta->sta.supp_rates[band]) + if (erp_rates & sta->sta.supp_rates[sband->band]) short_slot = true; else break; @@ -249,7 +253,15 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, mgmt->u.action.u.self_prot.action_code = action; if (action != WLAN_SP_MESH_PEERING_CLOSE) { - enum nl80211_band band = ieee80211_get_sdata_band(sdata); + struct ieee80211_supported_band *sband; + enum nl80211_band band; + + sband = ieee80211_get_sband(sdata); + if (!sband) { + err = -EINVAL; + goto free; + } + band = sband->band; /* capability info */ pos = skb_put(skb, 2); @@ -395,13 +407,16 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata, struct ieee802_11_elems *elems, bool insert) { struct ieee80211_local *local = sdata->local; - enum nl80211_band band = ieee80211_get_sdata_band(sdata); struct ieee80211_supported_band *sband; u32 rates, basic_rates = 0, changed = 0; enum ieee80211_sta_rx_bandwidth bw = sta->sta.bandwidth; - sband = local->hw.wiphy->bands[band]; - rates = ieee80211_sta_get_rates(sdata, elems, band, &basic_rates); + sband = ieee80211_get_sband(sdata); + if (!sband) + return; + + rates = ieee80211_sta_get_rates(sdata, elems, sband->band, + &basic_rates); spin_lock_bh(&sta->mesh->plink_lock); sta->rx_stats.last_rx = jiffies; @@ -412,9 +427,9 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata, goto out; sta->mesh->processed_beacon = true; - if (sta->sta.supp_rates[band] != rates) + if (sta->sta.supp_rates[sband->band] != rates) changed |= IEEE80211_RC_SUPP_RATES_CHANGED; - sta->sta.supp_rates[band] = rates; + sta->sta.supp_rates[sband->band] = rates; if (ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, elems->ht_cap_elem, sta)) diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 24d69bcf71ad..89dff563b1ec 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -6,7 +6,7 @@ * Copyright 2006-2007 Jiri Benc <[email protected]> * Copyright 2007, Michael Wu <[email protected]> * Copyright 2013-2014 Intel Mobile Communications GmbH - * Copyright (C) 2015 - 2016 Intel Deutschland GmbH + * Copyright (C) 2015 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -1855,11 +1855,16 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, u16 capab, bool erp_valid, u8 erp) { struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; + struct ieee80211_supported_band *sband; u32 changed = 0; bool use_protection; bool use_short_preamble; bool use_short_slot; + sband = ieee80211_get_sband(sdata); + if (!sband) + return changed; + if (erp_valid) { use_protection = (erp & WLAN_ERP_USE_PROTECTION) != 0; use_short_preamble = (erp & WLAN_ERP_BARKER_PREAMBLE) == 0; @@ -1869,7 +1874,7 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, } use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME); - if (ieee80211_get_sdata_band(sdata) == NL80211_BAND_5GHZ) + if (sband->band == NL80211_BAND_5GHZ) use_short_slot = true; if (use_protection != bss_conf->use_cts_prot) { @@ -3004,7 +3009,12 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, goto out; } - sband = local->hw.wiphy->bands[ieee80211_get_sdata_band(sdata)]; + sband = ieee80211_get_sband(sdata); + if (!sband) { + mutex_unlock(&sdata->local->sta_mtx); + ret = false; + goto out; + } /* Set up internal HT/VHT capabilities */ if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) @@ -3088,6 +3098,18 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, } changed |= BSS_CHANGED_QOS; + if (elems.max_idle_period_ie) { + bss_conf->max_idle_period = + le16_to_cpu(elems.max_idle_period_ie->max_idle_period); + bss_conf->protected_keep_alive = + !!(elems.max_idle_period_ie->idle_options & + WLAN_IDLE_OPTIONS_PROTECTED_KEEP_ALIVE); + changed |= BSS_CHANGED_KEEP_ALIVE; + } else { + bss_conf->max_idle_period = 0; + bss_conf->protected_keep_alive = false; + } + /* set AID and assoc capability, * ieee80211_set_associated() will tell the driver */ bss_conf->aid = aid; diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c index 76a8bcd8ef11..a87d195c4a61 100644 --- a/net/mac80211/pm.c +++ b/net/mac80211/pm.c @@ -10,7 +10,7 @@ static void ieee80211_sched_scan_cancel(struct ieee80211_local *local) { if (ieee80211_request_sched_scan_stop(local)) return; - cfg80211_sched_scan_stopped_rtnl(local->hw.wiphy); + cfg80211_sched_scan_stopped_rtnl(local->hw.wiphy, 0); } int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c index 9d7a1cd949fb..ea1f4315c521 100644 --- a/net/mac80211/rate.c +++ b/net/mac80211/rate.c @@ -62,6 +62,28 @@ void rate_control_rate_init(struct sta_info *sta) set_sta_flag(sta, WLAN_STA_RATE_CONTROL); } +void rate_control_tx_status(struct ieee80211_local *local, + struct ieee80211_supported_band *sband, + struct ieee80211_tx_status *st) +{ + struct rate_control_ref *ref = local->rate_ctrl; + struct sta_info *sta = container_of(st->sta, struct sta_info, sta); + void *priv_sta = sta->rate_ctrl_priv; + + if (!ref || !test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) + return; + + spin_lock_bh(&sta->rate_ctrl_lock); + if (ref->ops->tx_status_ext) + ref->ops->tx_status_ext(ref->priv, sband, priv_sta, st); + else if (st->skb) + ref->ops->tx_status(ref->priv, sband, st->sta, priv_sta, st->skb); + else + WARN_ON_ONCE(1); + + spin_unlock_bh(&sta->rate_ctrl_lock); +} + void rate_control_rate_update(struct ieee80211_local *local, struct ieee80211_supported_band *sband, struct sta_info *sta, u32 changed) @@ -904,7 +926,9 @@ int rate_control_set_rates(struct ieee80211_hw *hw, struct ieee80211_sta_rates *old; struct ieee80211_supported_band *sband; - sband = hw->wiphy->bands[ieee80211_get_sdata_band(sta->sdata)]; + sband = ieee80211_get_sband(sta->sdata); + if (!sband) + return -EINVAL; rate_control_apply_mask_ratetbl(sta, sband, rates); /* * mac80211 guarantees that this function will not be called diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h index f7825ef5f871..8212bfeb71d6 100644 --- a/net/mac80211/rate.h +++ b/net/mac80211/rate.h @@ -28,47 +28,9 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct ieee80211_tx_rate_control *txrc); -static inline void rate_control_tx_status(struct ieee80211_local *local, - struct ieee80211_supported_band *sband, - struct sta_info *sta, - struct sk_buff *skb) -{ - struct rate_control_ref *ref = local->rate_ctrl; - struct ieee80211_sta *ista = &sta->sta; - void *priv_sta = sta->rate_ctrl_priv; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - - if (!ref || !test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) - return; - - spin_lock_bh(&sta->rate_ctrl_lock); - if (ref->ops->tx_status) - ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb); - else - ref->ops->tx_status_noskb(ref->priv, sband, ista, priv_sta, info); - spin_unlock_bh(&sta->rate_ctrl_lock); -} - -static inline void -rate_control_tx_status_noskb(struct ieee80211_local *local, - struct ieee80211_supported_band *sband, - struct sta_info *sta, - struct ieee80211_tx_info *info) -{ - struct rate_control_ref *ref = local->rate_ctrl; - struct ieee80211_sta *ista = &sta->sta; - void *priv_sta = sta->rate_ctrl_priv; - - if (!ref || !test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) - return; - - if (WARN_ON_ONCE(!ref->ops->tx_status_noskb)) - return; - - spin_lock_bh(&sta->rate_ctrl_lock); - ref->ops->tx_status_noskb(ref->priv, sband, ista, priv_sta, info); - spin_unlock_bh(&sta->rate_ctrl_lock); -} +void rate_control_tx_status(struct ieee80211_local *local, + struct ieee80211_supported_band *sband, + struct ieee80211_tx_status *st); void rate_control_rate_init(struct sta_info *sta); void rate_control_rate_update(struct ieee80211_local *local, diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c index 3ebe4405a2d4..9766c1cc4b0a 100644 --- a/net/mac80211/rc80211_minstrel.c +++ b/net/mac80211/rc80211_minstrel.c @@ -264,9 +264,9 @@ minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi) static void minstrel_tx_status(void *priv, struct ieee80211_supported_band *sband, - struct ieee80211_sta *sta, void *priv_sta, - struct ieee80211_tx_info *info) + void *priv_sta, struct ieee80211_tx_status *st) { + struct ieee80211_tx_info *info = st->info; struct minstrel_priv *mp = priv; struct minstrel_sta_info *mi = priv_sta; struct ieee80211_tx_rate *ar = info->status.rates; @@ -726,7 +726,7 @@ static u32 minstrel_get_expected_throughput(void *priv_sta) const struct rate_control_ops mac80211_minstrel = { .name = "minstrel", - .tx_status_noskb = minstrel_tx_status, + .tx_status_ext = minstrel_tx_status, .get_rate = minstrel_get_rate, .rate_init = minstrel_rate_init, .alloc = minstrel_alloc, diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 8e783e197e93..4a5bdad9f303 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -678,9 +678,9 @@ minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb) static void minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband, - struct ieee80211_sta *sta, void *priv_sta, - struct ieee80211_tx_info *info) + void *priv_sta, struct ieee80211_tx_status *st) { + struct ieee80211_tx_info *info = st->info; struct minstrel_ht_sta_priv *msp = priv_sta; struct minstrel_ht_sta *mi = &msp->ht; struct ieee80211_tx_rate *ar = info->status.rates; @@ -690,8 +690,8 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband, int i; if (!msp->is_ht) - return mac80211_minstrel.tx_status_noskb(priv, sband, sta, - &msp->legacy, info); + return mac80211_minstrel.tx_status_ext(priv, sband, + &msp->legacy, st); /* This packet was aggregated but doesn't carry status info */ if ((info->flags & IEEE80211_TX_CTL_AMPDU) && @@ -1374,7 +1374,7 @@ static u32 minstrel_ht_get_expected_throughput(void *priv_sta) static const struct rate_control_ops mac80211_minstrel_ht = { .name = "minstrel_ht", - .tx_status_noskb = minstrel_ht_tx_status, + .tx_status_ext = minstrel_ht_tx_status, .get_rate = minstrel_ht_get_rate, .rate_init = minstrel_ht_rate_init, .rate_update = minstrel_ht_rate_update, diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 2142074d9fb0..35f4c7d7a500 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -156,7 +156,7 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local, /* padding for RX_FLAGS if necessary */ len = ALIGN(len, 2); - if (status->flag & RX_FLAG_HT) /* HT info */ + if (status->encoding == RX_ENC_HT) /* HT info */ len += 3; if (status->flag & RX_FLAG_AMPDU_DETAILS) { @@ -164,7 +164,7 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local, len += 8; } - if (status->flag & RX_FLAG_VHT) { + if (status->encoding == RX_ENC_VHT) { len = ALIGN(len, 2); len += 12; } @@ -329,12 +329,12 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, *pos |= IEEE80211_RADIOTAP_F_FCS; if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) *pos |= IEEE80211_RADIOTAP_F_BADFCS; - if (status->flag & RX_FLAG_SHORTPRE) + if (status->enc_flags & RX_ENC_FLAG_SHORTPRE) *pos |= IEEE80211_RADIOTAP_F_SHORTPRE; pos++; /* IEEE80211_RADIOTAP_RATE */ - if (!rate || status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) { + if (!rate || status->encoding != RX_ENC_LEGACY) { /* * Without rate information don't add it. If we have, * MCS information is a separate field in radiotap, @@ -345,9 +345,9 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, } else { int shift = 0; rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE); - if (status->flag & RX_FLAG_10MHZ) + if (status->bw == RATE_INFO_BW_10) shift = 1; - else if (status->flag & RX_FLAG_5MHZ) + else if (status->bw == RATE_INFO_BW_5) shift = 2; *pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift)); } @@ -356,14 +356,14 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, /* IEEE80211_RADIOTAP_CHANNEL */ put_unaligned_le16(status->freq, pos); pos += 2; - if (status->flag & RX_FLAG_10MHZ) + if (status->bw == RATE_INFO_BW_10) channel_flags |= IEEE80211_CHAN_HALF; - else if (status->flag & RX_FLAG_5MHZ) + else if (status->bw == RATE_INFO_BW_5) channel_flags |= IEEE80211_CHAN_QUARTER; if (status->band == NL80211_BAND_5GHZ) channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ; - else if (status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) + else if (status->encoding != RX_ENC_LEGACY) channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ; else if (rate && rate->flags & IEEE80211_RATE_ERP_G) channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ; @@ -402,21 +402,21 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, put_unaligned_le16(rx_flags, pos); pos += 2; - if (status->flag & RX_FLAG_HT) { + if (status->encoding == RX_ENC_HT) { unsigned int stbc; rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS); *pos++ = local->hw.radiotap_mcs_details; *pos = 0; - if (status->flag & RX_FLAG_SHORT_GI) + if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) *pos |= IEEE80211_RADIOTAP_MCS_SGI; - if (status->flag & RX_FLAG_40MHZ) + if (status->bw == RATE_INFO_BW_40) *pos |= IEEE80211_RADIOTAP_MCS_BW_40; - if (status->flag & RX_FLAG_HT_GF) + if (status->enc_flags & RX_ENC_FLAG_HT_GF) *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF; - if (status->flag & RX_FLAG_LDPC) + if (status->enc_flags & RX_ENC_FLAG_LDPC) *pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC; - stbc = (status->flag & RX_FLAG_STBC_MASK) >> RX_FLAG_STBC_SHIFT; + stbc = (status->enc_flags & RX_ENC_FLAG_STBC_MASK) >> RX_ENC_FLAG_STBC_SHIFT; *pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT; pos++; *pos++ = status->rate_idx; @@ -449,35 +449,40 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, *pos++ = 0; } - if (status->flag & RX_FLAG_VHT) { + if (status->encoding == RX_ENC_VHT) { u16 known = local->hw.radiotap_vht_details; rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT); put_unaligned_le16(known, pos); pos += 2; /* flags */ - if (status->flag & RX_FLAG_SHORT_GI) + if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI; /* in VHT, STBC is binary */ - if (status->flag & RX_FLAG_STBC_MASK) + if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) *pos |= IEEE80211_RADIOTAP_VHT_FLAG_STBC; - if (status->vht_flag & RX_VHT_FLAG_BF) + if (status->enc_flags & RX_ENC_FLAG_BF) *pos |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED; pos++; /* bandwidth */ - if (status->vht_flag & RX_VHT_FLAG_80MHZ) + switch (status->bw) { + case RATE_INFO_BW_80: *pos++ = 4; - else if (status->vht_flag & RX_VHT_FLAG_160MHZ) + break; + case RATE_INFO_BW_160: *pos++ = 11; - else if (status->flag & RX_FLAG_40MHZ) + break; + case RATE_INFO_BW_40: *pos++ = 1; - else /* 20 MHz */ + break; + default: *pos++ = 0; + } /* MCS/NSS */ - *pos = (status->rate_idx << 4) | status->vht_nss; + *pos = (status->rate_idx << 4) | status->nss; pos += 4; /* coding field */ - if (status->flag & RX_FLAG_LDPC) + if (status->enc_flags & RX_ENC_FLAG_LDPC) *pos |= IEEE80211_RADIOTAP_CODING_LDPC_USER0; pos++; /* group ID */ @@ -533,6 +538,59 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, } } +static struct sk_buff * +ieee80211_make_monitor_skb(struct ieee80211_local *local, + struct sk_buff **origskb, + struct ieee80211_rate *rate, + int rtap_vendor_space, bool use_origskb) +{ + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(*origskb); + int rt_hdrlen, needed_headroom; + struct sk_buff *skb; + + /* room for the radiotap header based on driver features */ + rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, *origskb); + needed_headroom = rt_hdrlen - rtap_vendor_space; + + if (use_origskb) { + /* only need to expand headroom if necessary */ + skb = *origskb; + *origskb = NULL; + + /* + * This shouldn't trigger often because most devices have an + * RX header they pull before we get here, and that should + * be big enough for our radiotap information. We should + * probably export the length to drivers so that we can have + * them allocate enough headroom to start with. + */ + if (skb_headroom(skb) < needed_headroom && + pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) { + dev_kfree_skb(skb); + return NULL; + } + } else { + /* + * Need to make a copy and possibly remove radiotap header + * and FCS from the original. + */ + skb = skb_copy_expand(*origskb, needed_headroom, 0, GFP_ATOMIC); + + if (!skb) + return NULL; + } + + /* prepend radiotap information */ + ieee80211_add_rx_radiotap_header(local, skb, rate, rt_hdrlen, true); + + skb_reset_mac_header(skb); + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->pkt_type = PACKET_OTHERHOST; + skb->protocol = htons(ETH_P_802_2); + + return skb; +} + /* * This function copies a received frame to all monitor interfaces and * returns a cleaned-up SKB that no longer includes the FCS nor the @@ -544,13 +602,12 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, { struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb); struct ieee80211_sub_if_data *sdata; - int rt_hdrlen, needed_headroom; - struct sk_buff *skb, *skb2; - struct net_device *prev_dev = NULL; + struct sk_buff *monskb = NULL; int present_fcs_len = 0; unsigned int rtap_vendor_space = 0; struct ieee80211_sub_if_data *monitor_sdata = rcu_dereference(local->monitor_sdata); + bool only_monitor = false; if (unlikely(status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)) { struct ieee80211_vendor_radiotap *rtap = (void *)origskb->data; @@ -583,9 +640,11 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, return NULL; } + only_monitor = should_drop_frame(origskb, present_fcs_len, + rtap_vendor_space); + if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR)) { - if (should_drop_frame(origskb, present_fcs_len, - rtap_vendor_space)) { + if (only_monitor) { dev_kfree_skb(origskb); return NULL; } @@ -597,67 +656,46 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_vendor_space); - /* room for the radiotap header based on driver features */ - rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, origskb); - needed_headroom = rt_hdrlen - rtap_vendor_space; - - if (should_drop_frame(origskb, present_fcs_len, rtap_vendor_space)) { - /* only need to expand headroom if necessary */ - skb = origskb; - origskb = NULL; - - /* - * This shouldn't trigger often because most devices have an - * RX header they pull before we get here, and that should - * be big enough for our radiotap information. We should - * probably export the length to drivers so that we can have - * them allocate enough headroom to start with. - */ - if (skb_headroom(skb) < needed_headroom && - pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) { - dev_kfree_skb(skb); - return NULL; - } - } else { - /* - * Need to make a copy and possibly remove radiotap header - * and FCS from the original. - */ - skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC); - remove_monitor_info(origskb, present_fcs_len, - rtap_vendor_space); - - if (!skb) - return origskb; - } - - /* prepend radiotap information */ - ieee80211_add_rx_radiotap_header(local, skb, rate, rt_hdrlen, true); - - skb_reset_mac_header(skb); - skb->ip_summed = CHECKSUM_UNNECESSARY; - skb->pkt_type = PACKET_OTHERHOST; - skb->protocol = htons(ETH_P_802_2); - list_for_each_entry_rcu(sdata, &local->mon_list, u.mntr.list) { - if (prev_dev) { - skb2 = skb_clone(skb, GFP_ATOMIC); - if (skb2) { - skb2->dev = prev_dev; - netif_receive_skb(skb2); + bool last_monitor = list_is_last(&sdata->u.mntr.list, + &local->mon_list); + + if (!monskb) + monskb = ieee80211_make_monitor_skb(local, &origskb, + rate, + rtap_vendor_space, + only_monitor && + last_monitor); + + if (monskb) { + struct sk_buff *skb; + + if (last_monitor) { + skb = monskb; + monskb = NULL; + } else { + skb = skb_clone(monskb, GFP_ATOMIC); + } + + if (skb) { + skb->dev = sdata->dev; + ieee80211_rx_stats(skb->dev, skb->len); + netif_receive_skb(skb); } } - prev_dev = sdata->dev; - ieee80211_rx_stats(sdata->dev, skb->len); + if (last_monitor) + break; } - if (prev_dev) { - skb->dev = prev_dev; - netif_receive_skb(skb); - } else - dev_kfree_skb(skb); + /* this happens if last_monitor was erroneously false */ + dev_kfree_skb(monskb); + /* ditto */ + if (!origskb) + return NULL; + + remove_monitor_info(origskb, present_fcs_len, rtap_vendor_space); return origskb; } @@ -3303,8 +3341,8 @@ static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx, status = IEEE80211_SKB_RXCB((rx->skb)); sband = rx->local->hw.wiphy->bands[status->band]; - if (!(status->flag & RX_FLAG_HT) && - !(status->flag & RX_FLAG_VHT)) + if (!(status->encoding == RX_ENC_HT) && + !(status->encoding == RX_ENC_VHT)) rate = &sband->bitrates[status->rate_idx]; ieee80211_rx_cooked_monitor(rx, rate); @@ -3541,7 +3579,7 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx) struct ieee80211_hdr *hdr = (void *)skb->data; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type); - int multicast = is_multicast_ether_addr(hdr->addr1); + bool multicast = is_multicast_ether_addr(hdr->addr1); switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: @@ -3565,7 +3603,7 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx) return false; if (!rx->sta) { int rate_idx; - if (status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) + if (status->encoding != RX_ENC_LEGACY) rate_idx = 0; /* TODO: HT/VHT rates */ else rate_idx = status->rate_idx; @@ -3585,7 +3623,7 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx) return false; if (!rx->sta) { int rate_idx; - if (status->flag & RX_FLAG_HT) + if (status->encoding != RX_ENC_LEGACY) rate_idx = 0; /* TODO: HT rates */ else rate_idx = status->rate_idx; @@ -4248,7 +4286,8 @@ void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, * we probably can't have a valid rate here anyway. */ - if (status->flag & RX_FLAG_HT) { + switch (status->encoding) { + case RX_ENC_HT: /* * rate_idx is MCS index, which can be [0-76] * as documented on: @@ -4266,14 +4305,19 @@ void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, status->rate_idx, status->rate_idx)) goto drop; - } else if (status->flag & RX_FLAG_VHT) { + break; + case RX_ENC_VHT: if (WARN_ONCE(status->rate_idx > 9 || - !status->vht_nss || - status->vht_nss > 8, + !status->nss || + status->nss > 8, "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n", - status->rate_idx, status->vht_nss)) + status->rate_idx, status->nss)) goto drop; - } else { + break; + default: + WARN_ON_ONCE(1); + /* fall through */ + case RX_ENC_LEGACY: if (WARN_ON(status->rate_idx >= sband->n_bitrates)) goto drop; rate = &sband->bitrates[status->rate_idx]; diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index faab3c490d2b..47d2ed570470 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -79,9 +79,9 @@ ieee80211_bss_info_update(struct ieee80211_local *local, bss_meta.signal = (rx_status->signal * 100) / local->hw.max_signal; bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_20; - if (rx_status->flag & RX_FLAG_5MHZ) + if (rx_status->bw == RATE_INFO_BW_5) bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_5; - if (rx_status->flag & RX_FLAG_10MHZ) + else if (rx_status->bw == RATE_INFO_BW_10) bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_10; bss_meta.chan = channel; @@ -174,8 +174,8 @@ ieee80211_bss_info_update(struct ieee80211_local *local, if (beacon) { struct ieee80211_supported_band *sband = local->hw.wiphy->bands[rx_status->band]; - if (!(rx_status->flag & RX_FLAG_HT) && - !(rx_status->flag & RX_FLAG_VHT)) + if (!(rx_status->encoding == RX_ENC_HT) && + !(rx_status->encoding == RX_ENC_VHT)) bss->beacon_rate = &sband->bitrates[rx_status->rate_idx]; } @@ -1219,7 +1219,7 @@ void ieee80211_sched_scan_results(struct ieee80211_hw *hw) trace_api_sched_scan_results(local); - cfg80211_sched_scan_results(hw->wiphy); + cfg80211_sched_scan_results(hw->wiphy, 0); } EXPORT_SYMBOL(ieee80211_sched_scan_results); @@ -1239,7 +1239,7 @@ void ieee80211_sched_scan_end(struct ieee80211_local *local) mutex_unlock(&local->mtx); - cfg80211_sched_scan_stopped(local->hw.wiphy); + cfg80211_sched_scan_stopped(local->hw.wiphy, 0); } void ieee80211_sched_scan_stopped_work(struct work_struct *work) diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 81ec1f72518d..7cdf7a835bb0 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -2,7 +2,7 @@ * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2006-2007 Jiri Benc <[email protected]> * Copyright 2013-2014 Intel Mobile Communications GmbH - * Copyright (C) 2015 - 2016 Intel Deutschland GmbH + * Copyright (C) 2015 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -395,10 +395,15 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, sta->sta.smps_mode = IEEE80211_SMPS_OFF; if (sdata->vif.type == NL80211_IFTYPE_AP || sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { - struct ieee80211_supported_band *sband = - hw->wiphy->bands[ieee80211_get_sdata_band(sdata)]; - u8 smps = (sband->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> - IEEE80211_HT_CAP_SM_PS_SHIFT; + struct ieee80211_supported_band *sband; + u8 smps; + + sband = ieee80211_get_sband(sdata); + if (!sband) + goto free_txq; + + smps = (sband->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> + IEEE80211_HT_CAP_SM_PS_SHIFT; /* * Assume that hostapd advertises our caps in the beacon and * this is the known_smps_mode for a station that just assciated @@ -1957,27 +1962,32 @@ sta_get_last_rx_stats(struct sta_info *sta) static void sta_stats_decode_rate(struct ieee80211_local *local, u16 rate, struct rate_info *rinfo) { - rinfo->bw = (rate & STA_STATS_RATE_BW_MASK) >> - STA_STATS_RATE_BW_SHIFT; + rinfo->bw = STA_STATS_GET(BW, rate); - switch (rate & STA_STATS_RATE_TYPE_MASK) { + switch (STA_STATS_GET(TYPE, rate)) { case STA_STATS_RATE_TYPE_VHT: rinfo->flags = RATE_INFO_FLAGS_VHT_MCS; - rinfo->mcs = rate & 0xf; - rinfo->nss = (rate & 0xf0) >> 4; + rinfo->mcs = STA_STATS_GET(VHT_MCS, rate); + rinfo->nss = STA_STATS_GET(VHT_NSS, rate); + if (STA_STATS_GET(SGI, rate)) + rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; break; case STA_STATS_RATE_TYPE_HT: rinfo->flags = RATE_INFO_FLAGS_MCS; - rinfo->mcs = rate & 0xff; + rinfo->mcs = STA_STATS_GET(HT_MCS, rate); + if (STA_STATS_GET(SGI, rate)) + rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; break; case STA_STATS_RATE_TYPE_LEGACY: { struct ieee80211_supported_band *sband; u16 brate; unsigned int shift; + int band = STA_STATS_GET(LEGACY_BAND, rate); + int rate_idx = STA_STATS_GET(LEGACY_IDX, rate); rinfo->flags = 0; - sband = local->hw.wiphy->bands[(rate >> 4) & 0xf]; - brate = sband->bitrates[rate & 0xf].bitrate; + sband = local->hw.wiphy->bands[band]; + brate = sband->bitrates[rate_idx].bitrate; if (rinfo->bw == RATE_INFO_BW_5) shift = 2; else if (rinfo->bw == RATE_INFO_BW_10) @@ -1988,9 +1998,6 @@ static void sta_stats_decode_rate(struct ieee80211_local *local, u16 rate, break; } } - - if (rate & STA_STATS_RATE_SGI) - rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; } static int sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo) diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 8949266d7bc3..5609cacb20d5 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -1,7 +1,7 @@ /* * Copyright 2002-2005, Devicescape Software, Inc. * Copyright 2013-2014 Intel Mobile Communications GmbH - * Copyright(c) 2015-2016 Intel Deutschland GmbH + * Copyright(c) 2015-2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -16,6 +16,7 @@ #include <linux/if_ether.h> #include <linux/workqueue.h> #include <linux/average.h> +#include <linux/bitfield.h> #include <linux/etherdevice.h> #include <linux/rhashtable.h> #include <linux/u64_stats_sync.h> @@ -727,41 +728,55 @@ void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta); unsigned long ieee80211_sta_last_active(struct sta_info *sta); +enum sta_stats_type { + STA_STATS_RATE_TYPE_INVALID = 0, + STA_STATS_RATE_TYPE_LEGACY, + STA_STATS_RATE_TYPE_HT, + STA_STATS_RATE_TYPE_VHT, +}; + +#define STA_STATS_FIELD_HT_MCS GENMASK( 7, 0) +#define STA_STATS_FIELD_LEGACY_IDX GENMASK( 3, 0) +#define STA_STATS_FIELD_LEGACY_BAND GENMASK( 7, 4) +#define STA_STATS_FIELD_VHT_MCS GENMASK( 3, 0) +#define STA_STATS_FIELD_VHT_NSS GENMASK( 7, 4) +#define STA_STATS_FIELD_BW GENMASK(11, 8) +#define STA_STATS_FIELD_SGI GENMASK(12, 12) +#define STA_STATS_FIELD_TYPE GENMASK(15, 13) + +#define STA_STATS_FIELD(_n, _v) FIELD_PREP(STA_STATS_FIELD_ ## _n, _v) +#define STA_STATS_GET(_n, _v) FIELD_GET(STA_STATS_FIELD_ ## _n, _v) + #define STA_STATS_RATE_INVALID 0 -#define STA_STATS_RATE_TYPE_MASK 0xC000 -#define STA_STATS_RATE_TYPE_LEGACY 0x4000 -#define STA_STATS_RATE_TYPE_HT 0x8000 -#define STA_STATS_RATE_TYPE_VHT 0xC000 -#define STA_STATS_RATE_SGI 0x1000 -#define STA_STATS_RATE_BW_SHIFT 9 -#define STA_STATS_RATE_BW_MASK (0x7 << STA_STATS_RATE_BW_SHIFT) - -static inline u16 sta_stats_encode_rate(struct ieee80211_rx_status *s) + +static inline u32 sta_stats_encode_rate(struct ieee80211_rx_status *s) { - u16 r = s->rate_idx; - - if (s->vht_flag & RX_VHT_FLAG_80MHZ) - r |= RATE_INFO_BW_80 << STA_STATS_RATE_BW_SHIFT; - else if (s->vht_flag & RX_VHT_FLAG_160MHZ) - r |= RATE_INFO_BW_160 << STA_STATS_RATE_BW_SHIFT; - else if (s->flag & RX_FLAG_40MHZ) - r |= RATE_INFO_BW_40 << STA_STATS_RATE_BW_SHIFT; - else if (s->flag & RX_FLAG_10MHZ) - r |= RATE_INFO_BW_10 << STA_STATS_RATE_BW_SHIFT; - else if (s->flag & RX_FLAG_5MHZ) - r |= RATE_INFO_BW_5 << STA_STATS_RATE_BW_SHIFT; - else - r |= RATE_INFO_BW_20 << STA_STATS_RATE_BW_SHIFT; - - if (s->flag & RX_FLAG_SHORT_GI) - r |= STA_STATS_RATE_SGI; - - if (s->flag & RX_FLAG_VHT) - r |= STA_STATS_RATE_TYPE_VHT | (s->vht_nss << 4); - else if (s->flag & RX_FLAG_HT) - r |= STA_STATS_RATE_TYPE_HT; - else - r |= STA_STATS_RATE_TYPE_LEGACY | (s->band << 4); + u16 r; + + r = STA_STATS_FIELD(BW, s->bw); + + if (s->enc_flags & RX_ENC_FLAG_SHORT_GI) + r |= STA_STATS_FIELD(SGI, 1); + + switch (s->encoding) { + case RX_ENC_VHT: + r |= STA_STATS_FIELD(TYPE, STA_STATS_RATE_TYPE_VHT); + r |= STA_STATS_FIELD(VHT_NSS, s->nss); + r |= STA_STATS_FIELD(VHT_MCS, s->rate_idx); + break; + case RX_ENC_HT: + r |= STA_STATS_FIELD(TYPE, STA_STATS_RATE_TYPE_HT); + r |= STA_STATS_FIELD(HT_MCS, s->rate_idx); + break; + case RX_ENC_LEGACY: + r |= STA_STATS_FIELD(TYPE, STA_STATS_RATE_TYPE_LEGACY); + r |= STA_STATS_FIELD(LEGACY_BAND, s->band); + r |= STA_STATS_FIELD(LEGACY_IDX, s->rate_idx); + break; + default: + WARN_ON(1); + return STA_STATS_RATE_INVALID; + } return r; } diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 83b8b11f24ea..be47ac5cd8c8 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -200,6 +200,7 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb) } if (ieee80211_is_action(mgmt->frame_control) && + !ieee80211_has_protected(mgmt->frame_control) && mgmt->u.action.category == WLAN_CATEGORY_HT && mgmt->u.action.u.ht_smps.action == WLAN_HT_ACTION_SMPS && ieee80211_sdata_running(sdata)) { @@ -630,61 +631,6 @@ static int ieee80211_tx_get_rates(struct ieee80211_hw *hw, return rates_idx; } -void ieee80211_tx_status_noskb(struct ieee80211_hw *hw, - struct ieee80211_sta *pubsta, - struct ieee80211_tx_info *info) -{ - struct ieee80211_local *local = hw_to_local(hw); - struct ieee80211_supported_band *sband; - int retry_count; - bool acked, noack_success; - - ieee80211_tx_get_rates(hw, info, &retry_count); - - sband = hw->wiphy->bands[info->band]; - - acked = !!(info->flags & IEEE80211_TX_STAT_ACK); - noack_success = !!(info->flags & IEEE80211_TX_STAT_NOACK_TRANSMITTED); - - if (pubsta) { - struct sta_info *sta; - - sta = container_of(pubsta, struct sta_info, sta); - - if (!acked) - sta->status_stats.retry_failed++; - sta->status_stats.retry_count += retry_count; - - if (acked) { - sta->status_stats.last_ack = jiffies; - - if (sta->status_stats.lost_packets) - sta->status_stats.lost_packets = 0; - - /* Track when last TDLS packet was ACKed */ - if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) - sta->status_stats.last_tdls_pkt_time = jiffies; - } else { - ieee80211_lost_packet(sta, info); - } - - rate_control_tx_status_noskb(local, sband, sta, info); - } - - if (acked || noack_success) { - I802_DEBUG_INC(local->dot11TransmittedFrameCount); - if (!pubsta) - I802_DEBUG_INC(local->dot11MulticastTransmittedFrameCount); - if (retry_count > 0) - I802_DEBUG_INC(local->dot11RetryCount); - if (retry_count > 1) - I802_DEBUG_INC(local->dot11MultipleRetryCount); - } else { - I802_DEBUG_INC(local->dot11FailedCount); - } -} -EXPORT_SYMBOL(ieee80211_tx_status_noskb); - void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb, struct ieee80211_supported_band *sband, int retry_count, int shift, bool send_to_cooked) @@ -742,15 +688,16 @@ void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb, dev_kfree_skb(skb); } -void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) +static void __ieee80211_tx_status(struct ieee80211_hw *hw, + struct ieee80211_tx_status *status) { + struct sk_buff *skb = status->skb; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_local *local = hw_to_local(hw); - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_tx_info *info = status->info; + struct sta_info *sta; __le16 fc; struct ieee80211_supported_band *sband; - struct rhlist_head *tmp; - struct sta_info *sta; int retry_count; int rates_idx; bool send_to_cooked; @@ -761,16 +708,11 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) rates_idx = ieee80211_tx_get_rates(hw, info, &retry_count); - rcu_read_lock(); - sband = local->hw.wiphy->bands[info->band]; fc = hdr->frame_control; - for_each_sta_info(local, hdr->addr1, sta, tmp) { - /* skip wrong virtual interface */ - if (!ether_addr_equal(hdr->addr2, sta->sdata->vif.addr)) - continue; - + if (status->sta) { + sta = container_of(status->sta, struct sta_info, sta); shift = ieee80211_vif_get_shift(&sta->sdata->vif); if (info->flags & IEEE80211_TX_STATUS_EOSP) @@ -790,7 +732,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) * that this TX packet failed because of that. */ ieee80211_handle_filtered_frame(local, sta, skb); - rcu_read_unlock(); return; } @@ -840,7 +781,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) { ieee80211_handle_filtered_frame(local, sta, skb); - rcu_read_unlock(); return; } else { if (!acked) @@ -856,7 +796,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) } } - rate_control_tx_status(local, sband, sta, skb); + rate_control_tx_status(local, sband, status); if (ieee80211_vif_is_mesh(&sta->sdata->vif)) ieee80211s_update_metric(local, sta, skb); @@ -883,8 +823,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) } } - rcu_read_unlock(); - ieee80211_led_tx(local); /* SNMP counters @@ -949,8 +887,96 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) /* send to monitor interfaces */ ieee80211_tx_monitor(local, skb, sband, retry_count, shift, send_to_cooked); } + +void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_tx_status status = { + .skb = skb, + .info = IEEE80211_SKB_CB(skb), + }; + struct rhlist_head *tmp; + struct sta_info *sta; + + rcu_read_lock(); + + for_each_sta_info(local, hdr->addr1, sta, tmp) { + /* skip wrong virtual interface */ + if (!ether_addr_equal(hdr->addr2, sta->sdata->vif.addr)) + continue; + + status.sta = &sta->sta; + break; + } + + __ieee80211_tx_status(hw, &status); + rcu_read_unlock(); +} EXPORT_SYMBOL(ieee80211_tx_status); +void ieee80211_tx_status_ext(struct ieee80211_hw *hw, + struct ieee80211_tx_status *status) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_tx_info *info = status->info; + struct ieee80211_sta *pubsta = status->sta; + struct ieee80211_supported_band *sband; + int retry_count; + bool acked, noack_success; + + if (status->skb) + return __ieee80211_tx_status(hw, status); + + if (!status->sta) + return; + + ieee80211_tx_get_rates(hw, info, &retry_count); + + sband = hw->wiphy->bands[info->band]; + + acked = !!(info->flags & IEEE80211_TX_STAT_ACK); + noack_success = !!(info->flags & IEEE80211_TX_STAT_NOACK_TRANSMITTED); + + if (pubsta) { + struct sta_info *sta; + + sta = container_of(pubsta, struct sta_info, sta); + + if (!acked) + sta->status_stats.retry_failed++; + sta->status_stats.retry_count += retry_count; + + if (acked) { + sta->status_stats.last_ack = jiffies; + + if (sta->status_stats.lost_packets) + sta->status_stats.lost_packets = 0; + + /* Track when last TDLS packet was ACKed */ + if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) + sta->status_stats.last_tdls_pkt_time = jiffies; + } else { + ieee80211_lost_packet(sta, info); + } + + rate_control_tx_status(local, sband, status); + } + + if (acked || noack_success) { + I802_DEBUG_INC(local->dot11TransmittedFrameCount); + if (!pubsta) + I802_DEBUG_INC(local->dot11MulticastTransmittedFrameCount); + if (retry_count > 0) + I802_DEBUG_INC(local->dot11RetryCount); + if (retry_count > 1) + I802_DEBUG_INC(local->dot11MultipleRetryCount); + } else { + I802_DEBUG_INC(local->dot11FailedCount); + } +} +EXPORT_SYMBOL(ieee80211_tx_status_ext); + void ieee80211_report_low_ack(struct ieee80211_sta *pubsta, u32 num_packets) { struct sta_info *sta = container_of(pubsta, struct sta_info, sta); diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c index afca7d103684..f20dcf1b1830 100644 --- a/net/mac80211/tdls.c +++ b/net/mac80211/tdls.c @@ -47,8 +47,7 @@ static void ieee80211_tdls_add_ext_capab(struct ieee80211_sub_if_data *sdata, NL80211_FEATURE_TDLS_CHANNEL_SWITCH; bool wider_band = ieee80211_hw_check(&local->hw, TDLS_WIDER_BW) && !ifmgd->tdls_wider_bw_prohibited; - enum nl80211_band band = ieee80211_get_sdata_band(sdata); - struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band]; + struct ieee80211_supported_band *sband = ieee80211_get_sband(sdata); bool vht = sband && sband->vht_cap.vht_supported; u8 *pos = (void *)skb_put(skb, 10); @@ -180,11 +179,14 @@ static void ieee80211_tdls_add_bss_coex_ie(struct sk_buff *skb) static u16 ieee80211_get_tdls_sta_capab(struct ieee80211_sub_if_data *sdata, u16 status_code) { + struct ieee80211_supported_band *sband; + /* The capability will be 0 when sending a failure code */ if (status_code != 0) return 0; - if (ieee80211_get_sdata_band(sdata) == NL80211_BAND_2GHZ) { + sband = ieee80211_get_sband(sdata); + if (sband && sband->band == NL80211_BAND_2GHZ) { return WLAN_CAPABILITY_SHORT_SLOT_TIME | WLAN_CAPABILITY_SHORT_PREAMBLE; } @@ -358,17 +360,20 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata, u8 action_code, bool initiator, const u8 *extra_ies, size_t extra_ies_len) { - enum nl80211_band band = ieee80211_get_sdata_band(sdata); - struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband; + struct ieee80211_local *local = sdata->local; struct ieee80211_sta_ht_cap ht_cap; struct ieee80211_sta_vht_cap vht_cap; struct sta_info *sta = NULL; size_t offset = 0, noffset; u8 *pos; - ieee80211_add_srates_ie(sdata, skb, false, band); - ieee80211_add_ext_srates_ie(sdata, skb, false, band); + sband = ieee80211_get_sband(sdata); + if (!sband) + return; + + ieee80211_add_srates_ie(sdata, skb, false, sband->band); + ieee80211_add_ext_srates_ie(sdata, skb, false, sband->band); ieee80211_tdls_add_supp_channels(sdata, skb); /* add any custom IEs that go before Extended Capabilities */ @@ -439,7 +444,6 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata, * the same on all bands. The specification limits the setup to a * single HT-cap, so use the current band for now. */ - sband = local->hw.wiphy->bands[band]; memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap)); if ((action_code == WLAN_TDLS_SETUP_REQUEST || @@ -545,9 +549,13 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata, struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; size_t offset = 0, noffset; struct sta_info *sta, *ap_sta; - enum nl80211_band band = ieee80211_get_sdata_band(sdata); + struct ieee80211_supported_band *sband; u8 *pos; + sband = ieee80211_get_sband(sdata); + if (!sband) + return; + mutex_lock(&local->sta_mtx); sta = sta_info_get(sdata, peer); @@ -612,7 +620,8 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata, ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator); /* only include VHT-operation if not on the 2.4GHz band */ - if (band != NL80211_BAND_2GHZ && sta->sta.vht_cap.vht_supported) { + if (sband->band != NL80211_BAND_2GHZ && + sta->sta.vht_cap.vht_supported) { /* * if both peers support WIDER_BW, we can expand the chandef to * a wider compatible one, up to 80MHz diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index f27719eeeed7..04b22f8982fe 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -4297,7 +4297,10 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, return bcn; shift = ieee80211_vif_get_shift(vif); - sband = hw->wiphy->bands[ieee80211_get_sdata_band(vif_to_sdata(vif))]; + sband = ieee80211_get_sband(vif_to_sdata(vif)); + if (!sband) + return bcn; + ieee80211_tx_monitor(hw_to_local(hw), copy, sband, 1, shift, false); return bcn; diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 7a37ce78bb38..ac9ac6c35594 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -4,7 +4,7 @@ * Copyright 2006-2007 Jiri Benc <[email protected]> * Copyright 2007 Johannes Berg <[email protected]> * Copyright 2013-2014 Intel Mobile Communications GmbH - * Copyright (C) 2015-2016 Intel Deutschland GmbH + * Copyright (C) 2015-2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -828,6 +828,7 @@ u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, case WLAN_EID_EXT_CAPABILITY: case WLAN_EID_CHAN_SWITCH_TIMING: case WLAN_EID_LINK_ID: + case WLAN_EID_BSS_MAX_IDLE_PERIOD: /* * not listing WLAN_EID_CHANNEL_SWITCH_WRAPPER -- it seems possible * that if the content gets bigger it might be needed more than once @@ -1089,6 +1090,10 @@ u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, else elem_parse_failed = true; break; + case WLAN_EID_BSS_MAX_IDLE_PERIOD: + if (elen >= sizeof(*elems->max_idle_period_ie)) + elems->max_idle_period_ie = (void *)pos; + break; default: break; } @@ -1590,14 +1595,14 @@ u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata, size_t num_rates; u32 supp_rates, rate_flags; int i, j, shift; + sband = sdata->local->hw.wiphy->bands[band]; + if (WARN_ON(!sband)) + return 1; rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef); shift = ieee80211_vif_get_shift(&sdata->vif); - if (WARN_ON(!sband)) - return 1; - num_rates = sband->n_bitrates; supp_rates = 0; for (i = 0; i < elems->supp_rates_len + @@ -1983,6 +1988,10 @@ int ieee80211_reconfig(struct ieee80211_local *local) if (sdata->u.mgd.have_beacon) changed |= BSS_CHANGED_BEACON_INFO; + if (sdata->vif.bss_conf.max_idle_period || + sdata->vif.bss_conf.protected_keep_alive) + changed |= BSS_CHANGED_KEEP_ALIVE; + sdata_lock(sdata); ieee80211_bss_info_change_notify(sdata, changed); sdata_unlock(sdata); @@ -2103,7 +2112,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) mutex_unlock(&local->mtx); if (sched_scan_stopped) - cfg80211_sched_scan_stopped_rtnl(local->hw.wiphy); + cfg80211_sched_scan_stopped_rtnl(local->hw.wiphy, 0); wake_up: if (local->in_reconfig) { @@ -2715,42 +2724,39 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local, memset(&ri, 0, sizeof(ri)); /* Fill cfg80211 rate info */ - if (status->flag & RX_FLAG_HT) { + switch (status->encoding) { + case RX_ENC_HT: ri.mcs = status->rate_idx; ri.flags |= RATE_INFO_FLAGS_MCS; - if (status->flag & RX_FLAG_40MHZ) - ri.bw = RATE_INFO_BW_40; - else - ri.bw = RATE_INFO_BW_20; - if (status->flag & RX_FLAG_SHORT_GI) + ri.bw = status->bw; + if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) ri.flags |= RATE_INFO_FLAGS_SHORT_GI; - } else if (status->flag & RX_FLAG_VHT) { + break; + case RX_ENC_VHT: ri.flags |= RATE_INFO_FLAGS_VHT_MCS; ri.mcs = status->rate_idx; - ri.nss = status->vht_nss; - if (status->flag & RX_FLAG_40MHZ) - ri.bw = RATE_INFO_BW_40; - else if (status->vht_flag & RX_VHT_FLAG_80MHZ) - ri.bw = RATE_INFO_BW_80; - else if (status->vht_flag & RX_VHT_FLAG_160MHZ) - ri.bw = RATE_INFO_BW_160; - else - ri.bw = RATE_INFO_BW_20; - if (status->flag & RX_FLAG_SHORT_GI) + ri.nss = status->nss; + ri.bw = status->bw; + if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) ri.flags |= RATE_INFO_FLAGS_SHORT_GI; - } else { + break; + default: + WARN_ON(1); + /* fall through */ + case RX_ENC_LEGACY: { struct ieee80211_supported_band *sband; int shift = 0; int bitrate; - if (status->flag & RX_FLAG_10MHZ) { + ri.bw = status->bw; + + switch (status->bw) { + case RATE_INFO_BW_10: shift = 1; - ri.bw = RATE_INFO_BW_10; - } else if (status->flag & RX_FLAG_5MHZ) { + break; + case RATE_INFO_BW_5: shift = 2; - ri.bw = RATE_INFO_BW_5; - } else { - ri.bw = RATE_INFO_BW_20; + break; } sband = local->hw.wiphy->bands[status->band]; @@ -2762,19 +2768,21 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local, if (status->band == NL80211_BAND_5GHZ) { ts += 20 << shift; mpdu_offset += 2; - } else if (status->flag & RX_FLAG_SHORTPRE) { + } else if (status->enc_flags & RX_ENC_FLAG_SHORTPRE) { ts += 96; } else { ts += 192; } } + break; + } } rate = cfg80211_calculate_bitrate(&ri); if (WARN_ONCE(!rate, "Invalid bitrate: flags=0x%llx, idx=%d, vht_nss=%d\n", (unsigned long long)status->flag, status->rate_idx, - status->vht_nss)) + status->nss)) return 0; /* rewind from end of MPDU */ @@ -2791,8 +2799,10 @@ void ieee80211_dfs_cac_cancel(struct ieee80211_local *local) struct ieee80211_sub_if_data *sdata; struct cfg80211_chan_def chandef; + /* for interface list, to avoid linking iflist_mtx and chanctx_mtx */ + ASSERT_RTNL(); + mutex_lock(&local->mtx); - mutex_lock(&local->iflist_mtx); list_for_each_entry(sdata, &local->interfaces, list) { /* it might be waiting for the local->mtx, but then * by the time it gets it, sdata->wdev.cac_started @@ -2809,7 +2819,6 @@ void ieee80211_dfs_cac_cancel(struct ieee80211_local *local) GFP_KERNEL); } } - mutex_unlock(&local->iflist_mtx); mutex_unlock(&local->mtx); } @@ -2831,7 +2840,9 @@ void ieee80211_dfs_radar_detected_work(struct work_struct *work) } mutex_unlock(&local->chanctx_mtx); + rtnl_lock(); ieee80211_dfs_cac_cancel(local); + rtnl_unlock(); if (num_chanctx > 1) /* XXX: multi-channel is not supported yet */ @@ -2846,7 +2857,7 @@ void ieee80211_radar_detected(struct ieee80211_hw *hw) trace_api_radar_detected(local); - ieee80211_queue_work(hw, &local->radar_detected_work); + schedule_work(&local->radar_detected_work); } EXPORT_SYMBOL(ieee80211_radar_detected); diff --git a/net/wireless/core.c b/net/wireless/core.c index b0d6761f0cdd..83ea164f16b3 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -305,30 +305,14 @@ static void cfg80211_event_work(struct work_struct *work) void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev) { - struct cfg80211_iface_destroy *item; + struct wireless_dev *wdev, *tmp; ASSERT_RTNL(); - spin_lock_irq(&rdev->destroy_list_lock); - while ((item = list_first_entry_or_null(&rdev->destroy_list, - struct cfg80211_iface_destroy, - list))) { - struct wireless_dev *wdev, *tmp; - u32 nlportid = item->nlportid; - - list_del(&item->list); - kfree(item); - spin_unlock_irq(&rdev->destroy_list_lock); - - list_for_each_entry_safe(wdev, tmp, - &rdev->wiphy.wdev_list, list) { - if (nlportid == wdev->owner_nlportid) - rdev_del_virtual_intf(rdev, wdev); - } - - spin_lock_irq(&rdev->destroy_list_lock); + list_for_each_entry_safe(wdev, tmp, &rdev->wiphy.wdev_list, list) { + if (wdev->nl_owner_dead) + rdev_del_virtual_intf(rdev, wdev); } - spin_unlock_irq(&rdev->destroy_list_lock); } static void cfg80211_destroy_iface_wk(struct work_struct *work) @@ -346,14 +330,16 @@ static void cfg80211_destroy_iface_wk(struct work_struct *work) static void cfg80211_sched_scan_stop_wk(struct work_struct *work) { struct cfg80211_registered_device *rdev; + struct cfg80211_sched_scan_request *req, *tmp; rdev = container_of(work, struct cfg80211_registered_device, sched_scan_stop_wk); rtnl_lock(); - - __cfg80211_stop_sched_scan(rdev, false); - + list_for_each_entry_safe(req, tmp, &rdev->sched_scan_req_list, list) { + if (req->nl_owner_dead) + cfg80211_stop_sched_scan_req(rdev, req, false); + } rtnl_unlock(); } @@ -468,8 +454,8 @@ use_default_name: spin_lock_init(&rdev->beacon_registrations_lock); spin_lock_init(&rdev->bss_lock); INIT_LIST_HEAD(&rdev->bss_list); + INIT_LIST_HEAD(&rdev->sched_scan_req_list); INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done); - INIT_WORK(&rdev->sched_scan_results_wk, __cfg80211_sched_scan_results); INIT_LIST_HEAD(&rdev->mlme_unreg); spin_lock_init(&rdev->mlme_unreg_lock); INIT_WORK(&rdev->mlme_unreg_wk, cfg80211_mlme_unreg_wk); @@ -484,10 +470,9 @@ use_default_name: rdev->wiphy.dev.platform_data = rdev; device_enable_async_suspend(&rdev->wiphy.dev); - INIT_LIST_HEAD(&rdev->destroy_list); - spin_lock_init(&rdev->destroy_list_lock); INIT_WORK(&rdev->destroy_work, cfg80211_destroy_iface_wk); INIT_WORK(&rdev->sched_scan_stop_wk, cfg80211_sched_scan_stop_wk); + INIT_WORK(&rdev->sched_scan_res_wk, cfg80211_sched_scan_results_wk); INIT_WORK(&rdev->propagate_radar_detect_wk, cfg80211_propagate_radar_detect_wk); INIT_WORK(&rdev->propagate_cac_done_wk, cfg80211_propagate_cac_done_wk); @@ -1046,7 +1031,7 @@ void __cfg80211_leave(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { struct net_device *dev = wdev->netdev; - struct cfg80211_sched_scan_request *sched_scan_req; + struct cfg80211_sched_scan_request *pos, *tmp; ASSERT_RTNL(); ASSERT_WDEV_LOCK(wdev); @@ -1057,9 +1042,11 @@ void __cfg80211_leave(struct cfg80211_registered_device *rdev, break; case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_STATION: - sched_scan_req = rtnl_dereference(rdev->sched_scan_req); - if (sched_scan_req && dev == sched_scan_req->dev) - __cfg80211_stop_sched_scan(rdev, false); + list_for_each_entry_safe(pos, tmp, &rdev->sched_scan_req_list, + list) { + if (dev == pos->dev) + cfg80211_stop_sched_scan_req(rdev, pos, false); + } #ifdef CONFIG_CFG80211_WEXT kfree(wdev->wext.ie); @@ -1134,7 +1121,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev; - struct cfg80211_sched_scan_request *sched_scan_req; + struct cfg80211_sched_scan_request *pos, *tmp; if (!wdev) return NOTIFY_DONE; @@ -1211,10 +1198,10 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, ___cfg80211_scan_done(rdev, false); } - sched_scan_req = rtnl_dereference(rdev->sched_scan_req); - if (WARN_ON(sched_scan_req && - sched_scan_req->dev == wdev->netdev)) { - __cfg80211_stop_sched_scan(rdev, false); + list_for_each_entry_safe(pos, tmp, + &rdev->sched_scan_req_list, list) { + if (WARN_ON(pos && pos->dev == wdev->netdev)) + cfg80211_stop_sched_scan_req(rdev, pos, false); } rdev->opencount--; diff --git a/net/wireless/core.h b/net/wireless/core.h index 5d27eca57d3b..6e809325af3b 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -74,10 +74,9 @@ struct cfg80211_registered_device { u32 bss_entries; struct cfg80211_scan_request *scan_req; /* protected by RTNL */ struct sk_buff *scan_msg; - struct cfg80211_sched_scan_request __rcu *sched_scan_req; + struct list_head sched_scan_req_list; unsigned long suspend_at; struct work_struct scan_done_wk; - struct work_struct sched_scan_results_wk; struct genl_info *cur_cmd_info; @@ -91,11 +90,9 @@ struct cfg80211_registered_device { struct cfg80211_coalesce *coalesce; - spinlock_t destroy_list_lock; - struct list_head destroy_list; struct work_struct destroy_work; - struct work_struct sched_scan_stop_wk; + struct work_struct sched_scan_res_wk; struct cfg80211_chan_def radar_chandef; struct work_struct propagate_radar_detect_wk; @@ -227,13 +224,7 @@ struct cfg80211_event { union { struct cfg80211_connect_resp_params cr; - struct { - const u8 *req_ie; - const u8 *resp_ie; - size_t req_ie_len; - size_t resp_ie_len; - struct cfg80211_bss *bss; - } rm; + struct cfg80211_roam_info rm; struct { const u8 *ie; size_t ie_len; @@ -264,11 +255,6 @@ struct cfg80211_beacon_registration { u32 nlportid; }; -struct cfg80211_iface_destroy { - struct list_head list; - u32 nlportid; -}; - struct cfg80211_cqm_config { u32 rssi_hyst; s32 last_rssi_event_value; @@ -398,9 +384,7 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev, struct net_device *dev, u16 reason, bool wextev); void __cfg80211_roamed(struct wireless_dev *wdev, - struct cfg80211_bss *bss, - const u8 *req_ie, size_t req_ie_len, - const u8 *resp_ie, size_t resp_ie_len); + struct cfg80211_roam_info *info); int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev); void cfg80211_autodisconnect_wk(struct work_struct *work); @@ -424,9 +408,16 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev, void __cfg80211_scan_done(struct work_struct *wk); void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool send_message); -void __cfg80211_sched_scan_results(struct work_struct *wk); +void cfg80211_add_sched_scan_req(struct cfg80211_registered_device *rdev, + struct cfg80211_sched_scan_request *req); +int cfg80211_sched_scan_req_possible(struct cfg80211_registered_device *rdev, + bool want_multi); +void cfg80211_sched_scan_results_wk(struct work_struct *work); +int cfg80211_stop_sched_scan_req(struct cfg80211_registered_device *rdev, + struct cfg80211_sched_scan_request *req, + bool driver_initiated); int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev, - bool driver_initiated); + u64 reqid, bool driver_initiated); void cfg80211_upload_connect_keys(struct wireless_dev *wdev); int cfg80211_change_iface(struct cfg80211_registered_device *rdev, struct net_device *dev, enum nl80211_iftype ntype, diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 50c35affccad..570fc95dc507 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -419,6 +419,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { .len = FILS_ERP_MAX_RRK_LEN }, [NL80211_ATTR_FILS_CACHE_ID] = { .len = 2 }, [NL80211_ATTR_PMK] = { .type = NLA_BINARY, .len = PMK_MAX_LEN }, + [NL80211_ATTR_SCHED_SCAN_MULTI] = { .type = NLA_FLAG }, }; /* policy for the key attributes */ @@ -496,6 +497,7 @@ static const struct nla_policy nl80211_match_policy[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1] = { [NL80211_SCHED_SCAN_MATCH_ATTR_SSID] = { .type = NLA_BINARY, .len = IEEE80211_MAX_SSID_LEN }, + [NL80211_SCHED_SCAN_MATCH_ATTR_BSSID] = { .len = ETH_ALEN }, [NL80211_SCHED_SCAN_MATCH_ATTR_RSSI] = { .type = NLA_U32 }, }; @@ -1376,7 +1378,7 @@ static int nl80211_add_commands_unsplit(struct cfg80211_registered_device *rdev, CMD(tdls_mgmt, TDLS_MGMT); CMD(tdls_oper, TDLS_OPER); } - if (rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) + if (rdev->wiphy.max_sched_scan_reqs) CMD(sched_scan_start, START_SCHED_SCAN); CMD(probe_client, PROBE_CLIENT); CMD(set_noack_map, SET_NOACK_MAP); @@ -1815,6 +1817,11 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, nla_put_flag(msg, NL80211_ATTR_WIPHY_SELF_MANAGED_REG)) goto nla_put_failure; + if (rdev->wiphy.max_sched_scan_reqs && + nla_put_u32(msg, NL80211_ATTR_SCHED_SCAN_MAX_REQS, + rdev->wiphy.max_sched_scan_reqs)) + goto nla_put_failure; + if (nla_put(msg, NL80211_ATTR_EXT_FEATURES, sizeof(rdev->wiphy.ext_features), rdev->wiphy.ext_features)) @@ -7030,8 +7037,15 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev, NULL); if (err) return ERR_PTR(err); + + /* SSID and BSSID are mutually exclusive */ + if (tb[NL80211_SCHED_SCAN_MATCH_ATTR_SSID] && + tb[NL80211_SCHED_SCAN_MATCH_ATTR_BSSID]) + return ERR_PTR(-EINVAL); + /* add other standalone attributes here */ - if (tb[NL80211_SCHED_SCAN_MATCH_ATTR_SSID]) { + if (tb[NL80211_SCHED_SCAN_MATCH_ATTR_SSID] || + tb[NL80211_SCHED_SCAN_MATCH_ATTR_BSSID]) { n_match_sets++; continue; } @@ -7202,7 +7216,7 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev, nla_for_each_nested(attr, attrs[NL80211_ATTR_SCHED_SCAN_MATCH], tmp) { - struct nlattr *ssid, *rssi; + struct nlattr *ssid, *bssid, *rssi; err = nla_parse_nested(tb, NL80211_SCHED_SCAN_MATCH_ATTR_MAX, @@ -7211,7 +7225,8 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev, if (err) goto out_free; ssid = tb[NL80211_SCHED_SCAN_MATCH_ATTR_SSID]; - if (ssid) { + bssid = tb[NL80211_SCHED_SCAN_MATCH_ATTR_BSSID]; + if (ssid || bssid) { if (WARN_ON(i >= n_match_sets)) { /* this indicates a programming error, * the loop above should have verified @@ -7221,14 +7236,25 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev, goto out_free; } - if (nla_len(ssid) > IEEE80211_MAX_SSID_LEN) { - err = -EINVAL; - goto out_free; + if (ssid) { + if (nla_len(ssid) > IEEE80211_MAX_SSID_LEN) { + err = -EINVAL; + goto out_free; + } + memcpy(request->match_sets[i].ssid.ssid, + nla_data(ssid), nla_len(ssid)); + request->match_sets[i].ssid.ssid_len = + nla_len(ssid); + } + if (bssid) { + if (nla_len(bssid) != ETH_ALEN) { + err = -EINVAL; + goto out_free; + } + memcpy(request->match_sets[i].bssid, + nla_data(bssid), ETH_ALEN); } - memcpy(request->match_sets[i].ssid.ssid, - nla_data(ssid), nla_len(ssid)); - request->match_sets[i].ssid.ssid_len = - nla_len(ssid); + /* special attribute - old implementation w/a */ request->match_sets[i].rssi_thold = default_match_rssi; @@ -7336,14 +7362,16 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_sched_scan_request *sched_scan_req; + bool want_multi; int err; - if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) || - !rdev->ops->sched_scan_start) + if (!rdev->wiphy.max_sched_scan_reqs || !rdev->ops->sched_scan_start) return -EOPNOTSUPP; - if (rdev->sched_scan_req) - return -EINPROGRESS; + want_multi = info->attrs[NL80211_ATTR_SCHED_SCAN_MULTI]; + err = cfg80211_sched_scan_req_possible(rdev, want_multi); + if (err) + return err; sched_scan_req = nl80211_parse_sched_scan(&rdev->wiphy, wdev, info->attrs, @@ -7353,6 +7381,14 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, if (err) goto out_err; + /* leave request id zero for legacy request + * or if driver does not support multi-scheduled scan + */ + if (want_multi && rdev->wiphy.max_sched_scan_reqs > 1) { + while (!sched_scan_req->reqid) + sched_scan_req->reqid = rdev->wiphy.cookie_counter++; + } + err = rdev_sched_scan_start(rdev, dev, sched_scan_req); if (err) goto out_free; @@ -7363,7 +7399,7 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, if (info->attrs[NL80211_ATTR_SOCKET_OWNER]) sched_scan_req->owner_nlportid = info->snd_portid; - rcu_assign_pointer(rdev->sched_scan_req, sched_scan_req); + cfg80211_add_sched_scan_req(rdev, sched_scan_req); nl80211_send_sched_scan(sched_scan_req, NL80211_CMD_START_SCHED_SCAN); return 0; @@ -7377,13 +7413,27 @@ out_err: static int nl80211_stop_sched_scan(struct sk_buff *skb, struct genl_info *info) { + struct cfg80211_sched_scan_request *req; struct cfg80211_registered_device *rdev = info->user_ptr[0]; + u64 cookie; - if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) || - !rdev->ops->sched_scan_stop) + if (!rdev->wiphy.max_sched_scan_reqs || !rdev->ops->sched_scan_stop) return -EOPNOTSUPP; - return __cfg80211_stop_sched_scan(rdev, false); + if (info->attrs[NL80211_ATTR_COOKIE]) { + cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]); + return __cfg80211_stop_sched_scan(rdev, cookie, false); + } + + req = list_first_or_null_rcu(&rdev->sched_scan_req_list, + struct cfg80211_sched_scan_request, + list); + if (!req || req->reqid || + (req->owner_nlportid && + req->owner_nlportid != info->snd_portid)) + return -ENOENT; + + return cfg80211_stop_sched_scan_req(rdev, req, false); } static int nl80211_start_radar_detection(struct sk_buff *skb, @@ -13596,14 +13646,14 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev, } void nl80211_send_roamed(struct cfg80211_registered_device *rdev, - struct net_device *netdev, const u8 *bssid, - const u8 *req_ie, size_t req_ie_len, - const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp) + struct net_device *netdev, + struct cfg80211_roam_info *info, gfp_t gfp) { struct sk_buff *msg; void *hdr; + const u8 *bssid = info->bss ? info->bss->bssid : info->bssid; - msg = nlmsg_new(100 + req_ie_len + resp_ie_len, gfp); + msg = nlmsg_new(100 + info->req_ie_len + info->resp_ie_len, gfp); if (!msg) return; @@ -13616,10 +13666,12 @@ void nl80211_send_roamed(struct cfg80211_registered_device *rdev, if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid) || - (req_ie && - nla_put(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie)) || - (resp_ie && - nla_put(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie))) + (info->req_ie && + nla_put(msg, NL80211_ATTR_REQ_IE, info->req_ie_len, + info->req_ie)) || + (info->resp_ie && + nla_put(msg, NL80211_ATTR_RESP_IE, info->resp_ie_len, + info->resp_ie))) goto nla_put_failure; genlmsg_end(msg, hdr); @@ -14883,26 +14935,26 @@ static int nl80211_netlink_notify(struct notifier_block * nb, rcu_read_lock(); list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) { - bool schedule_destroy_work = false; - struct cfg80211_sched_scan_request *sched_scan_req = - rcu_dereference(rdev->sched_scan_req); - - if (sched_scan_req && notify->portid && - sched_scan_req->owner_nlportid == notify->portid) { - sched_scan_req->owner_nlportid = 0; + struct cfg80211_sched_scan_request *sched_scan_req; - if (rdev->ops->sched_scan_stop && - rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) + list_for_each_entry_rcu(sched_scan_req, + &rdev->sched_scan_req_list, + list) { + if (sched_scan_req->owner_nlportid == notify->portid) { + sched_scan_req->nl_owner_dead = true; schedule_work(&rdev->sched_scan_stop_wk); + } } list_for_each_entry_rcu(wdev, &rdev->wiphy.wdev_list, list) { cfg80211_mlme_unregister_socket(wdev, notify->portid); - if (wdev->owner_nlportid == notify->portid) - schedule_destroy_work = true; - else if (wdev->conn_owner_nlportid == notify->portid) + if (wdev->owner_nlportid == notify->portid) { + wdev->nl_owner_dead = true; + schedule_work(&rdev->destroy_work); + } else if (wdev->conn_owner_nlportid == notify->portid) { schedule_work(&wdev->disconnect_wk); + } } spin_lock_bh(&rdev->beacon_registrations_lock); @@ -14915,19 +14967,6 @@ static int nl80211_netlink_notify(struct notifier_block * nb, } } spin_unlock_bh(&rdev->beacon_registrations_lock); - - if (schedule_destroy_work) { - struct cfg80211_iface_destroy *destroy; - - destroy = kzalloc(sizeof(*destroy), GFP_ATOMIC); - if (destroy) { - destroy->nlportid = notify->portid; - spin_lock(&rdev->destroy_list_lock); - list_add(&destroy->list, &rdev->destroy_list); - spin_unlock(&rdev->destroy_list_lock); - schedule_work(&rdev->destroy_work); - } - } } rcu_read_unlock(); diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h index d5f6860e62ab..b96933322077 100644 --- a/net/wireless/nl80211.h +++ b/net/wireless/nl80211.h @@ -56,9 +56,8 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev, struct cfg80211_connect_resp_params *params, gfp_t gfp); void nl80211_send_roamed(struct cfg80211_registered_device *rdev, - struct net_device *netdev, const u8 *bssid, - const u8 *req_ie, size_t req_ie_len, - const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp); + struct net_device *netdev, + struct cfg80211_roam_info *info, gfp_t gfp); void nl80211_send_disconnected(struct cfg80211_registered_device *rdev, struct net_device *netdev, u16 reason, const u8 *ie, size_t ie_len, bool from_ap); diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h index e4a99989dd06..0598c1e5d0ad 100644 --- a/net/wireless/rdev-ops.h +++ b/net/wireless/rdev-ops.h @@ -813,18 +813,18 @@ rdev_sched_scan_start(struct cfg80211_registered_device *rdev, struct cfg80211_sched_scan_request *request) { int ret; - trace_rdev_sched_scan_start(&rdev->wiphy, dev, request); + trace_rdev_sched_scan_start(&rdev->wiphy, dev, request->reqid); ret = rdev->ops->sched_scan_start(&rdev->wiphy, dev, request); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_sched_scan_stop(struct cfg80211_registered_device *rdev, - struct net_device *dev) + struct net_device *dev, u64 reqid) { int ret; - trace_rdev_sched_scan_stop(&rdev->wiphy, dev); - ret = rdev->ops->sched_scan_stop(&rdev->wiphy, dev); + trace_rdev_sched_scan_stop(&rdev->wiphy, dev, reqid); + ret = rdev->ops->sched_scan_stop(&rdev->wiphy, dev, reqid); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } diff --git a/net/wireless/reg.c b/net/wireless/reg.c index a38f315819cd..5fae296a6a58 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -3244,9 +3244,6 @@ void regulatory_propagate_dfs_state(struct wiphy *wiphy, if (WARN_ON(!cfg80211_chandef_valid(chandef))) return; - if (WARN_ON(!(chandef->chan->flags & IEEE80211_CHAN_RADAR))) - return; - list_for_each_entry(rdev, &cfg80211_rdev_list, list) { if (wiphy == &rdev->wiphy) continue; diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 6f4996c0f4df..14d5f0c8c45f 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -300,92 +300,168 @@ void cfg80211_scan_done(struct cfg80211_scan_request *request, } EXPORT_SYMBOL(cfg80211_scan_done); -void __cfg80211_sched_scan_results(struct work_struct *wk) +void cfg80211_add_sched_scan_req(struct cfg80211_registered_device *rdev, + struct cfg80211_sched_scan_request *req) { - struct cfg80211_registered_device *rdev; - struct cfg80211_sched_scan_request *request; + ASSERT_RTNL(); - rdev = container_of(wk, struct cfg80211_registered_device, - sched_scan_results_wk); + list_add_rcu(&req->list, &rdev->sched_scan_req_list); +} - rtnl_lock(); +static void cfg80211_del_sched_scan_req(struct cfg80211_registered_device *rdev, + struct cfg80211_sched_scan_request *req) +{ + ASSERT_RTNL(); - request = rtnl_dereference(rdev->sched_scan_req); + list_del_rcu(&req->list); + kfree_rcu(req, rcu_head); +} - /* we don't have sched_scan_req anymore if the scan is stopping */ - if (request) { - if (request->flags & NL80211_SCAN_FLAG_FLUSH) { - /* flush entries from previous scans */ - spin_lock_bh(&rdev->bss_lock); - __cfg80211_bss_expire(rdev, request->scan_start); - spin_unlock_bh(&rdev->bss_lock); - request->scan_start = jiffies; - } - nl80211_send_sched_scan(request, NL80211_CMD_SCHED_SCAN_RESULTS); +static struct cfg80211_sched_scan_request * +cfg80211_find_sched_scan_req(struct cfg80211_registered_device *rdev, u64 reqid) +{ + struct cfg80211_sched_scan_request *pos; + + ASSERT_RTNL(); + + list_for_each_entry(pos, &rdev->sched_scan_req_list, list) { + if (pos->reqid == reqid) + return pos; } + return NULL; +} + +/* + * Determines if a scheduled scan request can be handled. When a legacy + * scheduled scan is running no other scheduled scan is allowed regardless + * whether the request is for legacy or multi-support scan. When a multi-support + * scheduled scan is running a request for legacy scan is not allowed. In this + * case a request for multi-support scan can be handled if resources are + * available, ie. struct wiphy::max_sched_scan_reqs limit is not yet reached. + */ +int cfg80211_sched_scan_req_possible(struct cfg80211_registered_device *rdev, + bool want_multi) +{ + struct cfg80211_sched_scan_request *pos; + int i = 0; + + list_for_each_entry(pos, &rdev->sched_scan_req_list, list) { + /* request id zero means legacy in progress */ + if (!i && !pos->reqid) + return -EINPROGRESS; + i++; + } + + if (i) { + /* no legacy allowed when multi request(s) are active */ + if (!want_multi) + return -EINPROGRESS; + + /* resource limit reached */ + if (i == rdev->wiphy.max_sched_scan_reqs) + return -ENOSPC; + } + return 0; +} + +void cfg80211_sched_scan_results_wk(struct work_struct *work) +{ + struct cfg80211_registered_device *rdev; + struct cfg80211_sched_scan_request *req, *tmp; + rdev = container_of(work, struct cfg80211_registered_device, + sched_scan_res_wk); + + rtnl_lock(); + list_for_each_entry_safe(req, tmp, &rdev->sched_scan_req_list, list) { + if (req->report_results) { + req->report_results = false; + if (req->flags & NL80211_SCAN_FLAG_FLUSH) { + /* flush entries from previous scans */ + spin_lock_bh(&rdev->bss_lock); + __cfg80211_bss_expire(rdev, req->scan_start); + spin_unlock_bh(&rdev->bss_lock); + req->scan_start = jiffies; + } + nl80211_send_sched_scan(req, + NL80211_CMD_SCHED_SCAN_RESULTS); + } + } rtnl_unlock(); } -void cfg80211_sched_scan_results(struct wiphy *wiphy) +void cfg80211_sched_scan_results(struct wiphy *wiphy, u64 reqid) { - trace_cfg80211_sched_scan_results(wiphy); + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); + struct cfg80211_sched_scan_request *request; + + trace_cfg80211_sched_scan_results(wiphy, reqid); /* ignore if we're not scanning */ - if (rcu_access_pointer(wiphy_to_rdev(wiphy)->sched_scan_req)) - queue_work(cfg80211_wq, - &wiphy_to_rdev(wiphy)->sched_scan_results_wk); + rtnl_lock(); + request = cfg80211_find_sched_scan_req(rdev, reqid); + if (request) { + request->report_results = true; + queue_work(cfg80211_wq, &rdev->sched_scan_res_wk); + } + rtnl_unlock(); } EXPORT_SYMBOL(cfg80211_sched_scan_results); -void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy) +void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy, u64 reqid) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); ASSERT_RTNL(); - trace_cfg80211_sched_scan_stopped(wiphy); + trace_cfg80211_sched_scan_stopped(wiphy, reqid); - __cfg80211_stop_sched_scan(rdev, true); + __cfg80211_stop_sched_scan(rdev, reqid, true); } EXPORT_SYMBOL(cfg80211_sched_scan_stopped_rtnl); -void cfg80211_sched_scan_stopped(struct wiphy *wiphy) +void cfg80211_sched_scan_stopped(struct wiphy *wiphy, u64 reqid) { rtnl_lock(); - cfg80211_sched_scan_stopped_rtnl(wiphy); + cfg80211_sched_scan_stopped_rtnl(wiphy, reqid); rtnl_unlock(); } EXPORT_SYMBOL(cfg80211_sched_scan_stopped); -int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev, - bool driver_initiated) +int cfg80211_stop_sched_scan_req(struct cfg80211_registered_device *rdev, + struct cfg80211_sched_scan_request *req, + bool driver_initiated) { - struct cfg80211_sched_scan_request *sched_scan_req; - struct net_device *dev; - ASSERT_RTNL(); - if (!rdev->sched_scan_req) - return -ENOENT; - - sched_scan_req = rtnl_dereference(rdev->sched_scan_req); - dev = sched_scan_req->dev; - if (!driver_initiated) { - int err = rdev_sched_scan_stop(rdev, dev); + int err = rdev_sched_scan_stop(rdev, req->dev, req->reqid); if (err) return err; } - nl80211_send_sched_scan(sched_scan_req, NL80211_CMD_SCHED_SCAN_STOPPED); + nl80211_send_sched_scan(req, NL80211_CMD_SCHED_SCAN_STOPPED); - RCU_INIT_POINTER(rdev->sched_scan_req, NULL); - kfree_rcu(sched_scan_req, rcu_head); + cfg80211_del_sched_scan_req(rdev, req); return 0; } +int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev, + u64 reqid, bool driver_initiated) +{ + struct cfg80211_sched_scan_request *sched_scan_req; + + ASSERT_RTNL(); + + sched_scan_req = cfg80211_find_sched_scan_req(rdev, reqid); + if (!sched_scan_req) + return -ENOENT; + + return cfg80211_stop_sched_scan_req(rdev, sched_scan_req, + driver_initiated); +} + void cfg80211_bss_age(struct cfg80211_registered_device *rdev, unsigned long age_secs) { diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 6459bb7c21f7..532a0007ce82 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -5,6 +5,7 @@ * * Copyright 2009 Johannes Berg <[email protected]> * Copyright (C) 2009 Intel Corporation. All rights reserved. + * Copyright 2017 Intel Deutschland GmbH */ #include <linux/etherdevice.h> @@ -870,9 +871,7 @@ EXPORT_SYMBOL(cfg80211_connect_done); /* Consumes bss object one way or another */ void __cfg80211_roamed(struct wireless_dev *wdev, - struct cfg80211_bss *bss, - const u8 *req_ie, size_t req_ie_len, - const u8 *resp_ie, size_t resp_ie_len) + struct cfg80211_roam_info *info) { #ifdef CONFIG_CFG80211_WEXT union iwreq_data wrqu; @@ -890,97 +889,84 @@ void __cfg80211_roamed(struct wireless_dev *wdev, cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub); wdev->current_bss = NULL; - cfg80211_hold_bss(bss_from_pub(bss)); - wdev->current_bss = bss_from_pub(bss); + if (WARN_ON(!info->bss)) + return; + + cfg80211_hold_bss(bss_from_pub(info->bss)); + wdev->current_bss = bss_from_pub(info->bss); nl80211_send_roamed(wiphy_to_rdev(wdev->wiphy), - wdev->netdev, bss->bssid, - req_ie, req_ie_len, resp_ie, resp_ie_len, - GFP_KERNEL); + wdev->netdev, info, GFP_KERNEL); #ifdef CONFIG_CFG80211_WEXT - if (req_ie) { + if (info->req_ie) { memset(&wrqu, 0, sizeof(wrqu)); - wrqu.data.length = req_ie_len; + wrqu.data.length = info->req_ie_len; wireless_send_event(wdev->netdev, IWEVASSOCREQIE, - &wrqu, req_ie); + &wrqu, info->req_ie); } - if (resp_ie) { + if (info->resp_ie) { memset(&wrqu, 0, sizeof(wrqu)); - wrqu.data.length = resp_ie_len; + wrqu.data.length = info->resp_ie_len; wireless_send_event(wdev->netdev, IWEVASSOCRESPIE, - &wrqu, resp_ie); + &wrqu, info->resp_ie); } memset(&wrqu, 0, sizeof(wrqu)); wrqu.ap_addr.sa_family = ARPHRD_ETHER; - memcpy(wrqu.ap_addr.sa_data, bss->bssid, ETH_ALEN); - memcpy(wdev->wext.prev_bssid, bss->bssid, ETH_ALEN); + memcpy(wrqu.ap_addr.sa_data, info->bss->bssid, ETH_ALEN); + memcpy(wdev->wext.prev_bssid, info->bss->bssid, ETH_ALEN); wdev->wext.prev_bssid_valid = true; wireless_send_event(wdev->netdev, SIOCGIWAP, &wrqu, NULL); #endif return; out: - cfg80211_put_bss(wdev->wiphy, bss); -} - -void cfg80211_roamed(struct net_device *dev, - struct ieee80211_channel *channel, - const u8 *bssid, - const u8 *req_ie, size_t req_ie_len, - const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp) -{ - struct wireless_dev *wdev = dev->ieee80211_ptr; - struct cfg80211_bss *bss; - - bss = cfg80211_get_bss(wdev->wiphy, channel, bssid, wdev->ssid, - wdev->ssid_len, - wdev->conn_bss_type, IEEE80211_PRIVACY_ANY); - if (WARN_ON(!bss)) - return; - - cfg80211_roamed_bss(dev, bss, req_ie, req_ie_len, resp_ie, - resp_ie_len, gfp); + cfg80211_put_bss(wdev->wiphy, info->bss); } -EXPORT_SYMBOL(cfg80211_roamed); -/* Consumes bss object one way or another */ -void cfg80211_roamed_bss(struct net_device *dev, - struct cfg80211_bss *bss, const u8 *req_ie, - size_t req_ie_len, const u8 *resp_ie, - size_t resp_ie_len, gfp_t gfp) +/* Consumes info->bss object one way or another */ +void cfg80211_roamed(struct net_device *dev, struct cfg80211_roam_info *info, + gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_event *ev; unsigned long flags; - if (WARN_ON(!bss)) + if (!info->bss) { + info->bss = cfg80211_get_bss(wdev->wiphy, info->channel, + info->bssid, wdev->ssid, + wdev->ssid_len, + wdev->conn_bss_type, + IEEE80211_PRIVACY_ANY); + } + + if (WARN_ON(!info->bss)) return; - ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp); + ev = kzalloc(sizeof(*ev) + info->req_ie_len + info->resp_ie_len, gfp); if (!ev) { - cfg80211_put_bss(wdev->wiphy, bss); + cfg80211_put_bss(wdev->wiphy, info->bss); return; } ev->type = EVENT_ROAMED; ev->rm.req_ie = ((u8 *)ev) + sizeof(*ev); - ev->rm.req_ie_len = req_ie_len; - memcpy((void *)ev->rm.req_ie, req_ie, req_ie_len); - ev->rm.resp_ie = ((u8 *)ev) + sizeof(*ev) + req_ie_len; - ev->rm.resp_ie_len = resp_ie_len; - memcpy((void *)ev->rm.resp_ie, resp_ie, resp_ie_len); - ev->rm.bss = bss; + ev->rm.req_ie_len = info->req_ie_len; + memcpy((void *)ev->rm.req_ie, info->req_ie, info->req_ie_len); + ev->rm.resp_ie = ((u8 *)ev) + sizeof(*ev) + info->req_ie_len; + ev->rm.resp_ie_len = info->resp_ie_len; + memcpy((void *)ev->rm.resp_ie, info->resp_ie, info->resp_ie_len); + ev->rm.bss = info->bss; spin_lock_irqsave(&wdev->event_lock, flags); list_add_tail(&ev->list, &wdev->event_list); spin_unlock_irqrestore(&wdev->event_lock, flags); queue_work(cfg80211_wq, &rdev->event_work); } -EXPORT_SYMBOL(cfg80211_roamed_bss); +EXPORT_SYMBOL(cfg80211_roamed); void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, size_t ie_len, u16 reason, bool from_ap) diff --git a/net/wireless/trace.h b/net/wireless/trace.h index fd55786f0462..ca8b2059f92c 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -576,11 +576,6 @@ DEFINE_EVENT(wiphy_netdev_evt, rdev_stop_ap, TP_ARGS(wiphy, netdev) ); -DEFINE_EVENT(wiphy_netdev_evt, rdev_sched_scan_stop, - TP_PROTO(struct wiphy *wiphy, struct net_device *netdev), - TP_ARGS(wiphy, netdev) -); - DEFINE_EVENT(wiphy_netdev_evt, rdev_set_rekey_data, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev), TP_ARGS(wiphy, netdev) @@ -1610,20 +1605,31 @@ DEFINE_EVENT(tx_rx_evt, rdev_set_antenna, TP_ARGS(wiphy, rx, tx) ); -TRACE_EVENT(rdev_sched_scan_start, - TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, - struct cfg80211_sched_scan_request *request), - TP_ARGS(wiphy, netdev, request), +DECLARE_EVENT_CLASS(wiphy_netdev_id_evt, + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u64 id), + TP_ARGS(wiphy, netdev, id), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY + __field(u64, id) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; + __entry->id = id; ), - TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT, - WIPHY_PR_ARG, NETDEV_PR_ARG) + TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", id: %llu", + WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->id) +); + +DEFINE_EVENT(wiphy_netdev_id_evt, rdev_sched_scan_start, + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u64 id), + TP_ARGS(wiphy, netdev, id) +); + +DEFINE_EVENT(wiphy_netdev_id_evt, rdev_sched_scan_stop, + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u64 id), + TP_ARGS(wiphy, netdev, id) ); TRACE_EVENT(rdev_tdls_mgmt, @@ -2814,14 +2820,28 @@ TRACE_EVENT(cfg80211_scan_done, MAC_PR_ARG(tsf_bssid)) ); -DEFINE_EVENT(wiphy_only_evt, cfg80211_sched_scan_results, - TP_PROTO(struct wiphy *wiphy), - TP_ARGS(wiphy) +DECLARE_EVENT_CLASS(wiphy_id_evt, + TP_PROTO(struct wiphy *wiphy, u64 id), + TP_ARGS(wiphy, id), + TP_STRUCT__entry( + WIPHY_ENTRY + __field(u64, id) + ), + TP_fast_assign( + WIPHY_ASSIGN; + __entry->id = id; + ), + TP_printk(WIPHY_PR_FMT ", id: %llu", WIPHY_PR_ARG, __entry->id) ); -DEFINE_EVENT(wiphy_only_evt, cfg80211_sched_scan_stopped, - TP_PROTO(struct wiphy *wiphy), - TP_ARGS(wiphy) +DEFINE_EVENT(wiphy_id_evt, cfg80211_sched_scan_stopped, + TP_PROTO(struct wiphy *wiphy, u64 id), + TP_ARGS(wiphy, id) +); + +DEFINE_EVENT(wiphy_id_evt, cfg80211_sched_scan_results, + TP_PROTO(struct wiphy *wiphy, u64 id), + TP_ARGS(wiphy, id) ); TRACE_EVENT(cfg80211_get_bss, diff --git a/net/wireless/util.c b/net/wireless/util.c index a46bc42d0910..7198373e2920 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -946,9 +946,7 @@ void cfg80211_process_wdev_events(struct wireless_dev *wdev) ev->cr.status == WLAN_STATUS_SUCCESS); break; case EVENT_ROAMED: - __cfg80211_roamed(wdev, ev->rm.bss, ev->rm.req_ie, - ev->rm.req_ie_len, ev->rm.resp_ie, - ev->rm.resp_ie_len); + __cfg80211_roamed(wdev, &ev->rm); break; case EVENT_DISCONNECTED: __cfg80211_disconnected(wdev->netdev, diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c index 0d449d8032d1..d4433a47e6c3 100644 --- a/samples/bpf/bpf_load.c +++ b/samples/bpf/bpf_load.c @@ -563,7 +563,7 @@ struct ksym *ksym_search(long key) return &syms[0]; } -int set_link_xdp_fd(int ifindex, int fd) +int set_link_xdp_fd(int ifindex, int fd, int flags) { struct sockaddr_nl sa; int sock, seq = 0, len, ret = -1; @@ -599,15 +599,28 @@ int set_link_xdp_fd(int ifindex, int fd) req.nh.nlmsg_seq = ++seq; req.ifinfo.ifi_family = AF_UNSPEC; req.ifinfo.ifi_index = ifindex; + + /* started nested attribute for XDP */ nla = (struct nlattr *)(((char *)&req) + NLMSG_ALIGN(req.nh.nlmsg_len)); nla->nla_type = NLA_F_NESTED | 43/*IFLA_XDP*/; + nla->nla_len = NLA_HDRLEN; - nla_xdp = (struct nlattr *)((char *)nla + NLA_HDRLEN); + /* add XDP fd */ + nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len); nla_xdp->nla_type = 1/*IFLA_XDP_FD*/; nla_xdp->nla_len = NLA_HDRLEN + sizeof(int); memcpy((char *)nla_xdp + NLA_HDRLEN, &fd, sizeof(fd)); - nla->nla_len = NLA_HDRLEN + nla_xdp->nla_len; + nla->nla_len += nla_xdp->nla_len; + + /* if user passed in any flags, add those too */ + if (flags) { + nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len); + nla_xdp->nla_type = 3/*IFLA_XDP_FLAGS*/; + nla_xdp->nla_len = NLA_HDRLEN + sizeof(flags); + memcpy((char *)nla_xdp + NLA_HDRLEN, &flags, sizeof(flags)); + nla->nla_len += nla_xdp->nla_len; + } req.nh.nlmsg_len += NLA_ALIGN(nla->nla_len); diff --git a/samples/bpf/bpf_load.h b/samples/bpf/bpf_load.h index 68f6b2d22507..6bfd75ec6a16 100644 --- a/samples/bpf/bpf_load.h +++ b/samples/bpf/bpf_load.h @@ -47,5 +47,5 @@ struct ksym { int load_kallsyms(void); struct ksym *ksym_search(long key); -int set_link_xdp_fd(int ifindex, int fd); +int set_link_xdp_fd(int ifindex, int fd, int flags); #endif diff --git a/samples/bpf/xdp1_user.c b/samples/bpf/xdp1_user.c index d2be65d1fd86..deb05e630d84 100644 --- a/samples/bpf/xdp1_user.c +++ b/samples/bpf/xdp1_user.c @@ -5,6 +5,7 @@ * License as published by the Free Software Foundation. */ #include <linux/bpf.h> +#include <linux/if_link.h> #include <assert.h> #include <errno.h> #include <signal.h> @@ -12,16 +13,18 @@ #include <stdlib.h> #include <string.h> #include <unistd.h> +#include <libgen.h> #include "bpf_load.h" #include "bpf_util.h" #include "libbpf.h" static int ifindex; +static int flags; static void int_exit(int sig) { - set_link_xdp_fd(ifindex, -1); + set_link_xdp_fd(ifindex, -1, flags); exit(0); } @@ -54,18 +57,39 @@ static void poll_stats(int interval) } } -int main(int ac, char **argv) +static void usage(const char *prog) { - char filename[256]; + fprintf(stderr, + "usage: %s [OPTS] IFINDEX\n\n" + "OPTS:\n" + " -S use skb-mode\n", + prog); +} - snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); +int main(int argc, char **argv) +{ + const char *optstr = "S"; + char filename[256]; + int opt; + + while ((opt = getopt(argc, argv, optstr)) != -1) { + switch (opt) { + case 'S': + flags |= XDP_FLAGS_SKB_MODE; + break; + default: + usage(basename(argv[0])); + return 1; + } + } - if (ac != 2) { - printf("usage: %s IFINDEX\n", argv[0]); + if (optind == argc) { + usage(basename(argv[0])); return 1; } + ifindex = strtoul(argv[optind], NULL, 0); - ifindex = strtoul(argv[1], NULL, 0); + snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); if (load_bpf_file(filename)) { printf("%s", bpf_log_buf); @@ -79,7 +103,7 @@ int main(int ac, char **argv) signal(SIGINT, int_exit); - if (set_link_xdp_fd(ifindex, prog_fd[0]) < 0) { + if (set_link_xdp_fd(ifindex, prog_fd[0], flags) < 0) { printf("link set xdp fd failed\n"); return 1; } diff --git a/samples/bpf/xdp_tx_iptunnel_user.c b/samples/bpf/xdp_tx_iptunnel_user.c index 70e192fc61aa..cb2bda7b5346 100644 --- a/samples/bpf/xdp_tx_iptunnel_user.c +++ b/samples/bpf/xdp_tx_iptunnel_user.c @@ -5,6 +5,7 @@ * License as published by the Free Software Foundation. */ #include <linux/bpf.h> +#include <linux/if_link.h> #include <assert.h> #include <errno.h> #include <signal.h> @@ -28,7 +29,7 @@ static int ifindex = -1; static void int_exit(int sig) { if (ifindex > -1) - set_link_xdp_fd(ifindex, -1); + set_link_xdp_fd(ifindex, -1, 0); exit(0); } @@ -136,12 +137,13 @@ int main(int argc, char **argv) { unsigned char opt_flags[256] = {}; unsigned int kill_after_s = 0; - const char *optstr = "i:a:p:s:d:m:T:P:h"; + const char *optstr = "i:a:p:s:d:m:T:P:Sh"; int min_port = 0, max_port = 0; struct iptnl_info tnl = {}; struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY}; struct vip vip = {}; char filename[256]; + int flags = 0; int opt; int i; @@ -201,6 +203,9 @@ int main(int argc, char **argv) case 'T': kill_after_s = atoi(optarg); break; + case 'S': + flags |= XDP_FLAGS_SKB_MODE; + break; default: usage(argv[0]); return 1; @@ -243,14 +248,14 @@ int main(int argc, char **argv) } } - if (set_link_xdp_fd(ifindex, prog_fd[0]) < 0) { + if (set_link_xdp_fd(ifindex, prog_fd[0], flags) < 0) { printf("link set xdp fd failed\n"); return 1; } poll_stats(kill_after_s); - set_link_xdp_fd(ifindex, -1); + set_link_xdp_fd(ifindex, -1, flags); return 0; } diff --git a/tools/testing/selftests/bpf/bpf_util.h b/tools/testing/selftests/bpf/bpf_util.h index 84a5d1823f02..369e7d7bba80 100644 --- a/tools/testing/selftests/bpf/bpf_util.h +++ b/tools/testing/selftests/bpf/bpf_util.h @@ -6,6 +6,25 @@ #include <string.h> #include <errno.h> +#include <asm/byteorder.h> + +#if __BYTE_ORDER == __LITTLE_ENDIAN +# define __bpf_ntohs(x) __builtin_bswap16(x) +# define __bpf_htons(x) __builtin_bswap16(x) +#elif __BYTE_ORDER == __BIG_ENDIAN +# define __bpf_ntohs(x) (x) +# define __bpf_htons(x) (x) +#else +# error "Fix your __BYTE_ORDER?!" +#endif + +#define bpf_htons(x) \ + (__builtin_constant_p(x) ? \ + __constant_htons(x) : __bpf_htons(x)) +#define bpf_ntohs(x) \ + (__builtin_constant_p(x) ? \ + __constant_ntohs(x) : __bpf_ntohs(x)) + static inline unsigned int bpf_num_possible_cpus(void) { static const char *fcpu = "/sys/devices/system/cpu/possible"; @@ -35,4 +54,11 @@ static inline unsigned int bpf_num_possible_cpus(void) return possible_cpus; } +#define __bpf_percpu_val_align __attribute__((__aligned__(8))) + +#define BPF_DECLARE_PERCPU(type, name) \ + struct { type v; /* padding */ } __bpf_percpu_val_align \ + name[bpf_num_possible_cpus()] +#define bpf_percpu(name, cpu) name[(cpu)].v + #endif /* __BPF_UTIL__ */ diff --git a/tools/testing/selftests/bpf/test_l4lb.c b/tools/testing/selftests/bpf/test_l4lb.c index 368bfe8b9842..b68b21274bac 100644 --- a/tools/testing/selftests/bpf/test_l4lb.c +++ b/tools/testing/selftests/bpf/test_l4lb.c @@ -19,9 +19,8 @@ #include <linux/udp.h> #include "bpf_helpers.h" #include "test_iptunnel_common.h" +#include "bpf_util.h" -#define htons __builtin_bswap16 -#define ntohs __builtin_bswap16 int _version SEC("version") = 1; static inline __u32 rol32(__u32 word, unsigned int shift) @@ -355,7 +354,7 @@ static __always_inline int process_packet(void *data, __u64 off, void *data_end, iph_len = sizeof(struct ipv6hdr); protocol = ip6h->nexthdr; pckt.proto = protocol; - pkt_bytes = ntohs(ip6h->payload_len); + pkt_bytes = bpf_ntohs(ip6h->payload_len); off += iph_len; if (protocol == IPPROTO_FRAGMENT) { return TC_ACT_SHOT; @@ -377,7 +376,7 @@ static __always_inline int process_packet(void *data, __u64 off, void *data_end, protocol = iph->protocol; pckt.proto = protocol; - pkt_bytes = ntohs(iph->tot_len); + pkt_bytes = bpf_ntohs(iph->tot_len); off += IPV4_HDR_LEN_NO_OPT; if (iph->frag_off & PCKT_FRAGMENTED) @@ -464,9 +463,9 @@ int balancer_ingress(struct __sk_buff *ctx) if (data + nh_off > data_end) return TC_ACT_SHOT; eth_proto = eth->eth_proto; - if (eth_proto == htons(ETH_P_IP)) + if (eth_proto == bpf_htons(ETH_P_IP)) return process_packet(data, nh_off, data_end, false, ctx); - else if (eth_proto == htons(ETH_P_IPV6)) + else if (eth_proto == bpf_htons(ETH_P_IPV6)) return process_packet(data, nh_off, data_end, true, ctx); else return TC_ACT_SHOT; diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index a977c4f7b0ce..93314524de0d 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -137,20 +137,20 @@ static void test_hashmap_sizes(int task, void *data) static void test_hashmap_percpu(int task, void *data) { unsigned int nr_cpus = bpf_num_possible_cpus(); - long long value[nr_cpus]; + BPF_DECLARE_PERCPU(long, value); long long key, next_key, first_key; int expected_key_mask = 0; int fd, i; fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_HASH, sizeof(key), - sizeof(value[0]), 2, map_flags); + sizeof(bpf_percpu(value, 0)), 2, map_flags); if (fd < 0) { printf("Failed to create hashmap '%s'!\n", strerror(errno)); exit(1); } for (i = 0; i < nr_cpus; i++) - value[i] = i + 100; + bpf_percpu(value, i) = i + 100; key = 1; /* Insert key=1 element. */ @@ -170,8 +170,9 @@ static void test_hashmap_percpu(int task, void *data) /* Check that key=1 can be found. Value could be 0 if the lookup * was run from a different CPU. */ - value[0] = 1; - assert(bpf_map_lookup_elem(fd, &key, value) == 0 && value[0] == 100); + bpf_percpu(value, 0) = 1; + assert(bpf_map_lookup_elem(fd, &key, value) == 0 && + bpf_percpu(value, 0) == 100); key = 2; /* Check that key=2 is not found. */ @@ -211,7 +212,7 @@ static void test_hashmap_percpu(int task, void *data) assert(bpf_map_lookup_elem(fd, &next_key, value) == 0); for (i = 0; i < nr_cpus; i++) - assert(value[i] == i + 100); + assert(bpf_percpu(value, i) == i + 100); key = next_key; } @@ -296,34 +297,36 @@ static void test_arraymap(int task, void *data) static void test_arraymap_percpu(int task, void *data) { unsigned int nr_cpus = bpf_num_possible_cpus(); + BPF_DECLARE_PERCPU(long, values); int key, next_key, fd, i; - long long values[nr_cpus]; fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key), - sizeof(values[0]), 2, 0); + sizeof(bpf_percpu(values, 0)), 2, 0); if (fd < 0) { printf("Failed to create arraymap '%s'!\n", strerror(errno)); exit(1); } for (i = 0; i < nr_cpus; i++) - values[i] = i + 100; + bpf_percpu(values, i) = i + 100; key = 1; /* Insert key=1 element. */ assert(bpf_map_update_elem(fd, &key, values, BPF_ANY) == 0); - values[0] = 0; + bpf_percpu(values, 0) = 0; assert(bpf_map_update_elem(fd, &key, values, BPF_NOEXIST) == -1 && errno == EEXIST); /* Check that key=1 can be found. */ - assert(bpf_map_lookup_elem(fd, &key, values) == 0 && values[0] == 100); + assert(bpf_map_lookup_elem(fd, &key, values) == 0 && + bpf_percpu(values, 0) == 100); key = 0; /* Check that key=0 is also found and zero initialized. */ assert(bpf_map_lookup_elem(fd, &key, values) == 0 && - values[0] == 0 && values[nr_cpus - 1] == 0); + bpf_percpu(values, 0) == 0 && + bpf_percpu(values, nr_cpus - 1) == 0); /* Check that key=2 cannot be inserted due to max_entries limit. */ key = 2; @@ -353,15 +356,15 @@ static void test_arraymap_percpu(int task, void *data) static void test_arraymap_percpu_many_keys(void) { unsigned int nr_cpus = bpf_num_possible_cpus(); + BPF_DECLARE_PERCPU(long, values); /* nr_keys is not too large otherwise the test stresses percpu * allocator more than anything else */ unsigned int nr_keys = 2000; - long long values[nr_cpus]; int key, fd, i; fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key), - sizeof(values[0]), nr_keys, 0); + sizeof(bpf_percpu(values, 0)), nr_keys, 0); if (fd < 0) { printf("Failed to create per-cpu arraymap '%s'!\n", strerror(errno)); @@ -369,19 +372,19 @@ static void test_arraymap_percpu_many_keys(void) } for (i = 0; i < nr_cpus; i++) - values[i] = i + 10; + bpf_percpu(values, i) = i + 10; for (key = 0; key < nr_keys; key++) assert(bpf_map_update_elem(fd, &key, values, BPF_ANY) == 0); for (key = 0; key < nr_keys; key++) { for (i = 0; i < nr_cpus; i++) - values[i] = 0; + bpf_percpu(values, i) = 0; assert(bpf_map_lookup_elem(fd, &key, values) == 0); for (i = 0; i < nr_cpus; i++) - assert(values[i] == i + 10); + assert(bpf_percpu(values, i) == i + 10); } close(fd); diff --git a/tools/testing/selftests/bpf/test_pkt_access.c b/tools/testing/selftests/bpf/test_pkt_access.c index fd1e0832d409..711300508ee0 100644 --- a/tools/testing/selftests/bpf/test_pkt_access.c +++ b/tools/testing/selftests/bpf/test_pkt_access.c @@ -14,8 +14,8 @@ #include <linux/tcp.h> #include <linux/pkt_cls.h> #include "bpf_helpers.h" +#include "bpf_util.h" -#define _htons __builtin_bswap16 #define barrier() __asm__ __volatile__("": : :"memory") int _version SEC("version") = 1; @@ -32,7 +32,7 @@ int process(struct __sk_buff *skb) if (eth + 1 > data_end) return TC_ACT_SHOT; - if (eth->h_proto == _htons(ETH_P_IP)) { + if (eth->h_proto == bpf_htons(ETH_P_IP)) { struct iphdr *iph = (struct iphdr *)(eth + 1); if (iph + 1 > data_end) @@ -40,7 +40,7 @@ int process(struct __sk_buff *skb) ihl_len = iph->ihl * 4; proto = iph->protocol; tcp = (struct tcphdr *)((void *)(iph) + ihl_len); - } else if (eth->h_proto == _htons(ETH_P_IPV6)) { + } else if (eth->h_proto == bpf_htons(ETH_P_IPV6)) { struct ipv6hdr *ip6h = (struct ipv6hdr *)(eth + 1); if (ip6h + 1 > data_end) diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 5275d4a1df24..7c2d899c8f43 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -30,8 +30,6 @@ typedef __u16 __sum16; #include "test_iptunnel_common.h" #include "bpf_util.h" -#define _htons __builtin_bswap16 - static int error_cnt, pass_cnt; #define MAGIC_BYTES 123 @@ -42,10 +40,10 @@ static struct { struct iphdr iph; struct tcphdr tcp; } __packed pkt_v4 = { - .eth.h_proto = _htons(ETH_P_IP), + .eth.h_proto = bpf_htons(ETH_P_IP), .iph.ihl = 5, .iph.protocol = 6, - .iph.tot_len = _htons(MAGIC_BYTES), + .iph.tot_len = bpf_htons(MAGIC_BYTES), .tcp.urg_ptr = 123, }; @@ -55,9 +53,9 @@ static struct { struct ipv6hdr iph; struct tcphdr tcp; } __packed pkt_v6 = { - .eth.h_proto = _htons(ETH_P_IPV6), + .eth.h_proto = bpf_htons(ETH_P_IPV6), .iph.nexthdr = 6, - .iph.payload_len = _htons(MAGIC_BYTES), + .iph.payload_len = bpf_htons(MAGIC_BYTES), .tcp.urg_ptr = 123, }; diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 95a8d5f3ab80..d3395c192a24 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -191,6 +191,86 @@ static struct bpf_test tests[] = { .result = REJECT, }, { + "test6 ld_imm64", + .insns = { + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0), + BPF_RAW_INSN(0, 0, 0, 0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + }, + { + "test7 ld_imm64", + .insns = { + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1), + BPF_RAW_INSN(0, 0, 0, 0, 1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + }, + { + "test8 ld_imm64", + .insns = { + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1), + BPF_RAW_INSN(0, 0, 0, 0, 1), + BPF_EXIT_INSN(), + }, + .errstr = "uses reserved fields", + .result = REJECT, + }, + { + "test9 ld_imm64", + .insns = { + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1), + BPF_RAW_INSN(0, 0, 0, 1, 1), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_ld_imm64 insn", + .result = REJECT, + }, + { + "test10 ld_imm64", + .insns = { + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1), + BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_ld_imm64 insn", + .result = REJECT, + }, + { + "test11 ld_imm64", + .insns = { + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1), + BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_ld_imm64 insn", + .result = REJECT, + }, + { + "test12 ld_imm64", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1), + BPF_RAW_INSN(0, 0, 0, 0, 1), + BPF_EXIT_INSN(), + }, + .errstr = "not pointing to valid bpf_map", + .result = REJECT, + }, + { + "test13 ld_imm64", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1), + BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_ld_imm64 insn", + .result = REJECT, + }, + { "no bpf_exit", .insns = { BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2), @@ -331,6 +411,30 @@ static struct bpf_test tests[] = { .result = REJECT, }, { + "invalid fp arithmetic", + /* If this gets ever changed, make sure JITs can deal with it. */ + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "R1 pointer arithmetic", + .result_unpriv = REJECT, + .errstr = "R1 invalid mem access", + .result = REJECT, + }, + { + "non-invalid fp arithmetic", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + }, + { "invalid argument register", .insns = { BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, @@ -1801,6 +1905,20 @@ static struct bpf_test tests[] = { .result = ACCEPT, }, { + "unpriv: adding of fp", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_1, 0), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "pointer arithmetic prohibited", + .result_unpriv = REJECT, + .errstr = "R1 invalid mem access", + .result = REJECT, + }, + { "unpriv: cmp of stack pointer", .insns = { BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), @@ -2472,6 +2590,25 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, { + "direct packet access: test16 (arith on data_end)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid access to packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, + { "helper access to packet: test1, valid packet_ptr range", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, |