diff options
Diffstat (limited to 'drivers/net/ethernet/intel/i40evf')
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40e_adminq.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40e_common.c | 58 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40e_prototype.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40e_register.h | 50 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 424 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40e_txrx.h | 17 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40e_type.h | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h | 42 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c | 21 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40evf_main.c | 76 |
10 files changed, 481 insertions, 217 deletions
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h index 60f04e96a80e..ef43d68f67b3 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h @@ -93,6 +93,7 @@ struct i40e_adminq_info { u16 asq_buf_size; /* send queue buffer size */ u16 fw_maj_ver; /* firmware major version */ u16 fw_min_ver; /* firmware minor version */ + u32 fw_build; /* firmware build number */ u16 api_maj_ver; /* api major version */ u16 api_min_ver; /* api minor version */ bool nvm_release_on_done; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 28c40c57d4f5..f07b9ff2b823 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -85,46 +85,53 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, { struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; u16 len = le16_to_cpu(aq_desc->datalen); - u8 *aq_buffer = (u8 *)buffer; - u32 data[4]; - u32 i = 0; + u8 *buf = (u8 *)buffer; + u16 i = 0; if ((!(mask & hw->debug_mask)) || (desc == NULL)) return; i40e_debug(hw, mask, "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", - aq_desc->opcode, aq_desc->flags, aq_desc->datalen, - aq_desc->retval); + le16_to_cpu(aq_desc->opcode), + le16_to_cpu(aq_desc->flags), + le16_to_cpu(aq_desc->datalen), + le16_to_cpu(aq_desc->retval)); i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", - aq_desc->cookie_high, aq_desc->cookie_low); + le32_to_cpu(aq_desc->cookie_high), + le32_to_cpu(aq_desc->cookie_low)); i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", - aq_desc->params.internal.param0, - aq_desc->params.internal.param1); + le32_to_cpu(aq_desc->params.internal.param0), + le32_to_cpu(aq_desc->params.internal.param1)); i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", - aq_desc->params.external.addr_high, - aq_desc->params.external.addr_low); + le32_to_cpu(aq_desc->params.external.addr_high), + le32_to_cpu(aq_desc->params.external.addr_low)); if ((buffer != NULL) && (aq_desc->datalen != 0)) { - memset(data, 0, sizeof(data)); i40e_debug(hw, mask, "AQ CMD Buffer:\n"); if (buf_len < len) len = buf_len; - for (i = 0; i < len; i++) { - data[((i % 16) / 4)] |= - ((u32)aq_buffer[i]) << (8 * (i % 4)); - if ((i % 16) == 15) { - i40e_debug(hw, mask, - "\t0x%04X %08X %08X %08X %08X\n", - i - 15, data[0], data[1], data[2], - data[3]); - memset(data, 0, sizeof(data)); - } + /* write the full 16-byte chunks */ + for (i = 0; i < (len - 16); i += 16) + i40e_debug(hw, mask, + "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", + i, buf[i], buf[i + 1], buf[i + 2], + buf[i + 3], buf[i + 4], buf[i + 5], + buf[i + 6], buf[i + 7], buf[i + 8], + buf[i + 9], buf[i + 10], buf[i + 11], + buf[i + 12], buf[i + 13], buf[i + 14], + buf[i + 15]); + /* write whatever's left over without overrunning the buffer */ + if (i < len) { + char d_buf[80]; + int j = 0; + + memset(d_buf, 0, sizeof(d_buf)); + j += sprintf(d_buf, "\t0x%04X ", i); + while (i < len) + j += sprintf(&d_buf[j], " %02X", buf[i++]); + i40e_debug(hw, mask, "%s\n", d_buf); } - if ((i % 16) != 0) - i40e_debug(hw, mask, "\t0x%04X %08X %08X %08X %08X\n", - i - (i % 16), data[0], data[1], data[2], - data[3]); } } @@ -535,7 +542,6 @@ struct i40e_rx_ptype_decoded i40evf_ptype_lookup[] = { I40E_PTT_UNUSED_ENTRY(255) }; - /** * i40e_aq_send_msg_to_pf * @hw: pointer to the hardware structure diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h index 9173834825ac..58e37a44b80a 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h @@ -59,8 +59,7 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void i40e_idle_aq(struct i40e_hw *hw); void i40evf_resume_aq(struct i40e_hw *hw); bool i40evf_check_asq_alive(struct i40e_hw *hw); -i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, - bool unloading); +i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); i40e_status i40e_set_mac_type(struct i40e_hw *hw); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h index c1f6a59bfea0..3cc737629bf7 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_register.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_register.h @@ -310,6 +310,10 @@ #define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT) #define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21 #define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT) +#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7 +#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0 +#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT) #define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */ #define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0 #define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT) @@ -421,6 +425,8 @@ #define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT) #define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20 #define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26 +#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT) #define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */ #define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0 #define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT) @@ -484,7 +490,9 @@ #define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17 #define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT) #define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18 -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x3FFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT) +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT) +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29 +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT) #define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ #define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3 #define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0 @@ -548,9 +556,6 @@ #define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT) #define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8 #define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT) -#define I40E_GLGEN_RSTENA_EMP 0x000B818C /* Reset: POR */ -#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0 -#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT) #define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */ #define I40E_GLGEN_RTRIG_CORER_SHIFT 0 #define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT) @@ -1066,7 +1071,7 @@ #define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT) #define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6 #define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT) -#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: PFR */ +#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */ #define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 #define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) #define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ @@ -1171,7 +1176,7 @@ #define I40E_VFINT_ITRN_MAX_INDEX 2 #define I40E_VFINT_ITRN_INTERVAL_SHIFT 0 #define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT) -#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_VFINT_STAT_CTL0_MAX_INDEX 127 #define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 #define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) @@ -1803,9 +1808,6 @@ #define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3 #define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0 #define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT) -#define I40E_GLPCI_LATCT 0x0009C4B4 /* Reset: PCIR */ -#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0 -#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT) #define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */ #define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0 #define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT) @@ -1902,6 +1904,11 @@ #define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT) #define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1 #define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT) +#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */ +#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9 +#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT) +#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11 +#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT) #define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */ #define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0 #define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT) @@ -2374,20 +2381,20 @@ #define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT) #define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPRCH_MAX_INDEX 3 -#define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0 -#define I40E_GLPRT_BPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_UPRCH_SHIFT) +#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0 +#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT) #define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPRCL_MAX_INDEX 3 -#define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0 -#define I40E_GLPRT_BPRCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_UPRCH_SHIFT) +#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0 +#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT) #define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPTCH_MAX_INDEX 3 -#define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0 -#define I40E_GLPRT_BPTCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_UPRCH_SHIFT) +#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0 +#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT) #define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPTCL_MAX_INDEX 3 -#define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0 -#define I40E_GLPRT_BPTCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_UPRCH_SHIFT) +#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0 +#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT) #define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_CRCERRS_MAX_INDEX 3 #define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0 @@ -2620,10 +2627,6 @@ #define I40E_GLPRT_TDOLD_MAX_INDEX 3 #define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0 #define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT) -#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_TDPC_MAX_INDEX 3 -#define I40E_GLPRT_TDPC_TDPC_SHIFT 0 -#define I40E_GLPRT_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDPC_TDPC_SHIFT) #define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_UPRCH_MAX_INDEX 3 #define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0 @@ -2990,9 +2993,6 @@ #define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */ #define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0 #define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT) -#define I40E_GLSCD_QUANTA 0x000B2080 /* Reset: CORER */ -#define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0 -#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK I40E_MASK(0x7, I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT) #define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */ #define I40E_GL_MDET_RX_FUNCTION_SHIFT 0 #define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT) @@ -3258,7 +3258,7 @@ #define I40E_VFINT_ITRN1_MAX_INDEX 2 #define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0 #define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT) -#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: VFR */ +#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */ #define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2 #define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT) #define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 708891571dae..f41da5d8047b 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -25,6 +25,7 @@ ******************************************************************************/ #include <linux/prefetch.h> +#include <net/busy_poll.h> #include "i40evf.h" #include "i40e_prototype.h" @@ -288,6 +289,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) tx_desc = I40E_TX_DESC(tx_ring, 0); } + prefetch(tx_desc); + /* update budget accounting */ budget--; } while (likely(budget)); @@ -368,6 +371,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) { u32 val = I40E_VFINT_DYN_CTLN_INTENA_MASK | + I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */ I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK | I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK; /* allow 00 to be written to the index */ @@ -529,6 +533,22 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) if (!rx_ring->rx_bi) return; + if (ring_is_ps_enabled(rx_ring)) { + int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count; + + rx_bi = &rx_ring->rx_bi[0]; + if (rx_bi->hdr_buf) { + dma_free_coherent(dev, + bufsz, + rx_bi->hdr_buf, + rx_bi->dma); + for (i = 0; i < rx_ring->count; i++) { + rx_bi = &rx_ring->rx_bi[i]; + rx_bi->dma = 0; + rx_bi->hdr_buf = NULL; + } + } + } /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { rx_bi = &rx_ring->rx_bi[i]; @@ -587,6 +607,37 @@ void i40evf_free_rx_resources(struct i40e_ring *rx_ring) } /** + * i40evf_alloc_rx_headers - allocate rx header buffers + * @rx_ring: ring to alloc buffers + * + * Allocate rx header buffers for the entire ring. As these are static, + * this is only called when setting up a new ring. + **/ +void i40evf_alloc_rx_headers(struct i40e_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + struct i40e_rx_buffer *rx_bi; + dma_addr_t dma; + void *buffer; + int buf_size; + int i; + + if (rx_ring->rx_bi[0].hdr_buf) + return; + /* Make sure the buffers don't cross cache line boundaries. */ + buf_size = ALIGN(rx_ring->rx_hdr_len, 256); + buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count, + &dma, GFP_KERNEL); + if (!buffer) + return; + for (i = 0; i < rx_ring->count; i++) { + rx_bi = &rx_ring->rx_bi[i]; + rx_bi->dma = dma + (i * buf_size); + rx_bi->hdr_buf = buffer + (i * buf_size); + } +} + +/** * i40evf_setup_rx_descriptors - Allocate Rx descriptors * @rx_ring: Rx descriptor ring (for a specific queue) to setup * @@ -646,11 +697,76 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) } /** - * i40evf_alloc_rx_buffers - Replace used receive buffers; packet split + * i40evf_alloc_rx_buffers_ps - Replace used receive buffers; packet split * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace **/ -void i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) +void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) +{ + u16 i = rx_ring->next_to_use; + union i40e_rx_desc *rx_desc; + struct i40e_rx_buffer *bi; + + /* do nothing if no valid netdev defined */ + if (!rx_ring->netdev || !cleaned_count) + return; + + while (cleaned_count--) { + rx_desc = I40E_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_bi[i]; + + if (bi->skb) /* desc is in use */ + goto no_buffers; + if (!bi->page) { + bi->page = alloc_page(GFP_ATOMIC); + if (!bi->page) { + rx_ring->rx_stats.alloc_page_failed++; + goto no_buffers; + } + } + + if (!bi->page_dma) { + /* use a half page if we're re-using */ + bi->page_offset ^= PAGE_SIZE / 2; + bi->page_dma = dma_map_page(rx_ring->dev, + bi->page, + bi->page_offset, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + if (dma_mapping_error(rx_ring->dev, + bi->page_dma)) { + rx_ring->rx_stats.alloc_page_failed++; + bi->page_dma = 0; + goto no_buffers; + } + } + + dma_sync_single_range_for_device(rx_ring->dev, + bi->dma, + 0, + rx_ring->rx_hdr_len, + DMA_FROM_DEVICE); + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); + rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); + i++; + if (i == rx_ring->count) + i = 0; + } + +no_buffers: + if (rx_ring->next_to_use != i) + i40e_release_rx_desc(rx_ring, i); +} + +/** + * i40evf_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) { u16 i = rx_ring->next_to_use; union i40e_rx_desc *rx_desc; @@ -690,40 +806,8 @@ void i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) } } - if (ring_is_ps_enabled(rx_ring)) { - if (!bi->page) { - bi->page = alloc_page(GFP_ATOMIC); - if (!bi->page) { - rx_ring->rx_stats.alloc_page_failed++; - goto no_buffers; - } - } - - if (!bi->page_dma) { - /* use a half page if we're re-using */ - bi->page_offset ^= PAGE_SIZE / 2; - bi->page_dma = dma_map_page(rx_ring->dev, - bi->page, - bi->page_offset, - PAGE_SIZE / 2, - DMA_FROM_DEVICE); - if (dma_mapping_error(rx_ring->dev, - bi->page_dma)) { - rx_ring->rx_stats.alloc_page_failed++; - bi->page_dma = 0; - goto no_buffers; - } - } - - /* Refresh the desc even if buffer_addrs didn't change - * because each write-back erases this info. - */ - rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); - rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); - } else { - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); - rx_desc->read.hdr_addr = 0; - } + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); + rx_desc->read.hdr_addr = 0; i++; if (i == rx_ring->count) i = 0; @@ -777,10 +861,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, struct iphdr *iph; __sum16 csum; - ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && - (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); - ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && - (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); + ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && + (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); + ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && + (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); skb->ip_summed = CHECKSUM_NONE; @@ -906,13 +990,13 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) } /** - * i40e_clean_rx_irq - Reclaim resources after receive completes + * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split * @rx_ring: rx ring to clean * @budget: how many cleans we're allowed * * Returns true if there's any budget left (e.g. the clean is finished) **/ -static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) +static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; @@ -925,20 +1009,46 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) u8 rx_ptype; u64 qword; - rx_desc = I40E_RX_DESC(rx_ring, i); - qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> - I40E_RXD_QW1_STATUS_SHIFT; - - while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) { - union i40e_rx_desc *next_rxd; + do { struct i40e_rx_buffer *rx_bi; struct sk_buff *skb; u16 vlan_tag; + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= I40E_RX_BUFFER_WRITE) { + i40evf_alloc_rx_buffers_ps(rx_ring, cleaned_count); + cleaned_count = 0; + } + + i = rx_ring->next_to_clean; + rx_desc = I40E_RX_DESC(rx_ring, i); + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> + I40E_RXD_QW1_STATUS_SHIFT; + + if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * DD bit is set. + */ + rmb(); rx_bi = &rx_ring->rx_bi[i]; skb = rx_bi->skb; - prefetch(skb->data); - + if (likely(!skb)) { + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_hdr_len); + if (!skb) + rx_ring->rx_stats.alloc_buff_failed++; + /* initialize queue mapping */ + skb_record_rx_queue(skb, rx_ring->queue_index); + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_bi->dma, + 0, + rx_ring->rx_hdr_len, + DMA_FROM_DEVICE); + } rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT; rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >> @@ -953,40 +1063,30 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; + prefetch(rx_bi->page); rx_bi->skb = NULL; - - /* This memory barrier is needed to keep us from reading - * any other fields out of the rx_desc until we know the - * STATUS_DD bit is set - */ - rmb(); - - /* Get the header and possibly the whole packet - * If this is an skb from previous receive dma will be 0 - */ - if (rx_bi->dma) { - u16 len; - + cleaned_count++; + if (rx_hbo || rx_sph) { + int len; if (rx_hbo) len = I40E_RX_HDR_SIZE; - else if (rx_sph) - len = rx_header_len; - else if (rx_packet_len) - len = rx_packet_len; /* 1buf/no split found */ else - len = rx_header_len; /* split always mode */ - - skb_put(skb, len); - dma_unmap_single(rx_ring->dev, - rx_bi->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - rx_bi->dma = 0; + len = rx_header_len; + memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len); + } else if (skb->len == 0) { + int len; + + len = (rx_packet_len > skb_headlen(skb) ? + skb_headlen(skb) : rx_packet_len); + memcpy(__skb_put(skb, len), + rx_bi->page + rx_bi->page_offset, + len); + rx_bi->page_offset += len; + rx_packet_len -= len; } /* Get the rest of the data if this was a header split */ - if (ring_is_ps_enabled(rx_ring) && rx_packet_len) { - + if (rx_packet_len) { skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, rx_bi->page, rx_bi->page_offset, @@ -1008,22 +1108,16 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) DMA_FROM_DEVICE); rx_bi->page_dma = 0; } - I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd); + I40E_RX_INCREMENT(rx_ring, i); if (unlikely( !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { struct i40e_rx_buffer *next_buffer; next_buffer = &rx_ring->rx_bi[i]; - - if (ring_is_ps_enabled(rx_ring)) { - rx_bi->skb = next_buffer->skb; - rx_bi->dma = next_buffer->dma; - next_buffer->skb = skb; - next_buffer->dma = 0; - } + next_buffer->skb = skb; rx_ring->rx_stats.non_eop_descs++; - goto next_desc; + continue; } /* ERR_MASK will only have valid bits if EOP set */ @@ -1032,7 +1126,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) /* TODO: shouldn't we increment a counter indicating the * drop? */ - goto next_desc; + continue; } skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), @@ -1048,30 +1142,134 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; +#ifdef I40E_FCOE + if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { + dev_kfree_skb_any(skb); + continue; + } +#endif + skb_mark_napi_id(skb, &rx_ring->q_vector->napi); i40e_receive_skb(rx_ring, skb, vlan_tag); rx_ring->netdev->last_rx = jiffies; - budget--; -next_desc: rx_desc->wb.qword1.status_error_len = 0; - if (!budget) - break; - cleaned_count++; + } while (likely(total_rx_packets < budget)); + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + rx_ring->q_vector->rx.total_packets += total_rx_packets; + rx_ring->q_vector->rx.total_bytes += total_rx_bytes; + + return total_rx_packets; +} + +/** + * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer + * @rx_ring: rx ring to clean + * @budget: how many cleans we're allowed + * + * Returns number of packets cleaned + **/ +static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); + struct i40e_vsi *vsi = rx_ring->vsi; + union i40e_rx_desc *rx_desc; + u32 rx_error, rx_status; + u16 rx_packet_len; + u8 rx_ptype; + u64 qword; + u16 i; + + do { + struct i40e_rx_buffer *rx_bi; + struct sk_buff *skb; + u16 vlan_tag; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= I40E_RX_BUFFER_WRITE) { - i40evf_alloc_rx_buffers(rx_ring, cleaned_count); + i40evf_alloc_rx_buffers_1buf(rx_ring, cleaned_count); cleaned_count = 0; } - /* use prefetched values */ - rx_desc = next_rxd; + i = rx_ring->next_to_clean; + rx_desc = I40E_RX_DESC(rx_ring, i); qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> - I40E_RXD_QW1_STATUS_SHIFT; - } + I40E_RXD_QW1_STATUS_SHIFT; + + if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * DD bit is set. + */ + rmb(); + + rx_bi = &rx_ring->rx_bi[i]; + skb = rx_bi->skb; + prefetch(skb->data); + + rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT; + + rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> + I40E_RXD_QW1_ERROR_SHIFT; + rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT); + + rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> + I40E_RXD_QW1_PTYPE_SHIFT; + rx_bi->skb = NULL; + cleaned_count++; + + /* Get the header and possibly the whole packet + * If this is an skb from previous receive dma will be 0 + */ + skb_put(skb, rx_packet_len); + dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + rx_bi->dma = 0; + + I40E_RX_INCREMENT(rx_ring, i); + + if (unlikely( + !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { + rx_ring->rx_stats.non_eop_descs++; + continue; + } + + /* ERR_MASK will only have valid bits if EOP set */ + if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { + dev_kfree_skb_any(skb); + /* TODO: shouldn't we increment a counter indicating the + * drop? + */ + continue; + } + + skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), + i40e_ptype_to_hash(rx_ptype)); + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + total_rx_packets++; + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); + + i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); + + vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) + ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) + : 0; + i40e_receive_skb(rx_ring, skb, vlan_tag); + + rx_ring->netdev->last_rx = jiffies; + rx_desc->wb.qword1.status_error_len = 0; + } while (likely(total_rx_packets < budget)); - rx_ring->next_to_clean = i; u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; rx_ring->stats.bytes += total_rx_bytes; @@ -1079,10 +1277,7 @@ next_desc: rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_bytes += total_rx_bytes; - if (cleaned_count) - i40evf_alloc_rx_buffers(rx_ring, cleaned_count); - - return budget > 0; + return total_rx_packets; } /** @@ -1103,6 +1298,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) bool clean_complete = true; bool arm_wb = false; int budget_per_ring; + int cleaned; if (test_bit(__I40E_DOWN, &vsi->state)) { napi_complete(napi); @@ -1122,8 +1318,14 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) */ budget_per_ring = max(budget/q_vector->num_ringpairs, 1); - i40e_for_each_ring(ring, q_vector->rx) - clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring); + i40e_for_each_ring(ring, q_vector->rx) { + if (ring_is_ps_enabled(ring)) + cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring); + else + cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring); + /* if we didn't clean as many as budgeted, we must be done */ + clean_complete &= (budget_per_ring != cleaned); + } /* If work not completed, return budget and polling will return */ if (!clean_complete) { @@ -1262,8 +1464,16 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, struct iphdr *this_ip_hdr; u32 network_hdr_len; u8 l4_hdr = 0; + u32 l4_tunnel = 0; if (skb->encapsulation) { + switch (ip_hdr(skb)->protocol) { + case IPPROTO_UDP: + l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; + break; + default: + return; + } network_hdr_len = skb_inner_network_header_len(skb); this_ip_hdr = inner_ip_hdr(skb); this_ipv6_hdr = inner_ipv6_hdr(skb); @@ -1286,8 +1496,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, /* Now set the ctx descriptor fields */ *cd_tunneling |= (skb_network_header_len(skb) >> 2) << - I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT | - I40E_TXD_CTX_UDP_TUNNELING | + I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT | + l4_tunnel | ((skb_inner_network_offset(skb) - skb_transport_offset(skb)) >> 1) << I40E_TXD_CTX_QW0_NATLEN_SHIFT; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index c950a038237c..1e49bb1fbac1 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -96,6 +96,14 @@ enum i40e_dyn_idx_t { /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */ +#define I40E_RX_INCREMENT(r, i) \ + do { \ + (i)++; \ + if ((i) == (r)->count) \ + i = 0; \ + r->next_to_clean = i; \ + } while (0) + #define I40E_RX_NEXT_DESC(r, i, n) \ do { \ (i)++; \ @@ -151,6 +159,7 @@ struct i40e_tx_buffer { struct i40e_rx_buffer { struct sk_buff *skb; + void *hdr_buf; dma_addr_t dma; struct page *page; dma_addr_t page_dma; @@ -223,8 +232,8 @@ struct i40e_ring { u16 rx_buf_len; u8 dtype; #define I40E_RX_DTYPE_NO_SPLIT 0 -#define I40E_RX_DTYPE_SPLIT_ALWAYS 1 -#define I40E_RX_DTYPE_HEADER_SPLIT 2 +#define I40E_RX_DTYPE_HEADER_SPLIT 1 +#define I40E_RX_DTYPE_SPLIT_ALWAYS 2 u8 hsplit; #define I40E_RX_SPLIT_L2 0x1 #define I40E_RX_SPLIT_IP 0x2 @@ -278,7 +287,9 @@ struct i40e_ring_container { #define i40e_for_each_ring(pos, head) \ for (pos = (head).ring; pos != NULL; pos = pos->next) -void i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); +void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); +void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count); +void i40evf_alloc_rx_headers(struct i40e_ring *rxr); netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev); void i40evf_clean_tx_ring(struct i40e_ring *tx_ring); void i40evf_clean_rx_ring(struct i40e_ring *rx_ring); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index 3d0fdaab5cc8..eba6e4b34f70 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -175,12 +175,12 @@ struct i40e_link_status { u8 an_info; u8 ext_info; u8 loopback; - bool an_enabled; /* is Link Status Event notification to SW enabled */ bool lse_enable; u16 max_frame_size; bool crc_enable; u8 pacing; + u8 requested_speeds; }; struct i40e_phy_info { @@ -1116,7 +1116,7 @@ struct i40e_hw_port_stats { /* Checksum and Shadow RAM pointers */ #define I40E_SR_NVM_CONTROL_WORD 0x00 #define I40E_SR_EMP_MODULE_PTR 0x0F -#define I40E_SR_NVM_IMAGE_VERSION 0x18 +#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18 #define I40E_SR_NVM_WAKE_ON_LAN 0x19 #define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27 #define I40E_SR_NVM_EETRACK_LO 0x2D diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h index e0c8208138f4..59f62f0e65dd 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h @@ -59,31 +59,29 @@ * of the virtchnl_msg structure. */ enum i40e_virtchnl_ops { -/* VF sends req. to pf for the following - * ops. +/* The PF sends status change events to VFs using + * the I40E_VIRTCHNL_OP_EVENT opcode. + * VFs send requests to the PF using the other ops. */ I40E_VIRTCHNL_OP_UNKNOWN = 0, I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ - I40E_VIRTCHNL_OP_RESET_VF, - I40E_VIRTCHNL_OP_GET_VF_RESOURCES, - I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE, - I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE, - I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, - I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, - I40E_VIRTCHNL_OP_ENABLE_QUEUES, - I40E_VIRTCHNL_OP_DISABLE_QUEUES, - I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, - I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, - I40E_VIRTCHNL_OP_ADD_VLAN, - I40E_VIRTCHNL_OP_DEL_VLAN, - I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, - I40E_VIRTCHNL_OP_GET_STATS, - I40E_VIRTCHNL_OP_FCOE, - I40E_VIRTCHNL_OP_CONFIG_RSS, -/* PF sends status change events to vfs using - * the following op. - */ - I40E_VIRTCHNL_OP_EVENT, + I40E_VIRTCHNL_OP_RESET_VF = 2, + I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3, + I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, + I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, + I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6, + I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, + I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8, + I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9, + I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10, + I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11, + I40E_VIRTCHNL_OP_ADD_VLAN = 12, + I40E_VIRTCHNL_OP_DEL_VLAN = 13, + I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, + I40E_VIRTCHNL_OP_GET_STATS = 15, + I40E_VIRTCHNL_OP_FCOE = 16, + I40E_VIRTCHNL_OP_EVENT = 17, + I40E_VIRTCHNL_OP_CONFIG_RSS = 18, }; /* Virtual channel message descriptor. This overlays the admin queue diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index 69b97bac182c..b68b73163311 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -29,7 +29,6 @@ #include <linux/uaccess.h> - struct i40evf_stats { char stat_string[ETH_GSTRING_LEN]; int stat_offset; @@ -180,7 +179,7 @@ static u32 i40evf_get_msglevel(struct net_device *netdev) } /** - * i40evf_get_msglevel - Set debug message level + * i40evf_set_msglevel - Set debug message level * @netdev: network interface device structure * @data: message level * @@ -191,6 +190,8 @@ static void i40evf_set_msglevel(struct net_device *netdev, u32 data) { struct i40evf_adapter *adapter = netdev_priv(netdev); + if (I40E_DEBUG_USER & data) + adapter->hw.debug_mask = data; adapter->msg_enable = data; } @@ -640,12 +641,14 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, if (!indir) return 0; - for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { - hlut_val = rd32(hw, I40E_VFQF_HLUT(i)); - indir[j++] = hlut_val & 0xff; - indir[j++] = (hlut_val >> 8) & 0xff; - indir[j++] = (hlut_val >> 16) & 0xff; - indir[j++] = (hlut_val >> 24) & 0xff; + if (indir) { + for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { + hlut_val = rd32(hw, I40E_VFQF_HLUT(i)); + indir[j++] = hlut_val & 0xff; + indir[j++] = (hlut_val >> 8) & 0xff; + indir[j++] = (hlut_val >> 16) & 0xff; + indir[j++] = (hlut_val >> 24) & 0xff; + } } return 0; } diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 8d8c201c63c1..812b1200f35c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -36,7 +36,7 @@ char i40evf_driver_name[] = "i40evf"; static const char i40evf_driver_string[] = "Intel(R) XL710/X710 Virtual Function Network Driver"; -#define DRV_VERSION "1.2.0" +#define DRV_VERSION "1.2.25" const char i40evf_driver_version[] = DRV_VERSION; static const char i40evf_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation."; @@ -244,6 +244,7 @@ void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask) if (mask & (1 << (i - 1))) { wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), I40E_VFINT_DYN_CTLN1_INTENA_MASK | + I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); } } @@ -263,6 +264,7 @@ static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask) if (mask & 1) { dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTL01); dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK | + I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | I40E_VFINT_DYN_CTLN_CLEARPBA_MASK; wr32(hw, I40E_VFINT_DYN_CTL01, dyn_ctl); } @@ -270,6 +272,7 @@ static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask) if (mask & (1 << i)) { dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1)); dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK | + I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | I40E_VFINT_DYN_CTLN_CLEARPBA_MASK; wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), dyn_ctl); } @@ -524,7 +527,8 @@ static int i40evf_request_misc_irq(struct i40evf_adapter *adapter) int err; snprintf(adapter->misc_vector_name, - sizeof(adapter->misc_vector_name) - 1, "i40evf:mbx"); + sizeof(adapter->misc_vector_name) - 1, "i40evf-%s:mbx", + dev_name(&adapter->pdev->dev)); err = request_irq(adapter->msix_entries[0].vector, &i40evf_msix_aq, 0, adapter->misc_vector_name, netdev); @@ -761,13 +765,17 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter, u8 *macaddr) { struct i40evf_mac_filter *f; + int count = 50; if (!macaddr) return NULL; while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, - &adapter->crit_section)) + &adapter->crit_section)) { udelay(1); + if (--count == 0) + return NULL; + } f = i40evf_find_filter(adapter, macaddr); if (!f) { @@ -828,6 +836,7 @@ static void i40evf_set_rx_mode(struct net_device *netdev) struct i40evf_mac_filter *f, *ftmp; struct netdev_hw_addr *uca; struct netdev_hw_addr *mca; + int count = 50; /* add addr if not already in the filter list */ netdev_for_each_uc_addr(uca, netdev) { @@ -838,8 +847,14 @@ static void i40evf_set_rx_mode(struct net_device *netdev) } while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, - &adapter->crit_section)) + &adapter->crit_section)) { udelay(1); + if (--count == 0) { + dev_err(&adapter->pdev->dev, + "Failed to get lock in %s\n", __func__); + return; + } + } /* remove filter if not in netdev list */ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { bool found = false; @@ -920,7 +935,7 @@ static void i40evf_configure(struct i40evf_adapter *adapter) for (i = 0; i < adapter->num_active_queues; i++) { struct i40e_ring *ring = adapter->rx_rings[i]; - i40evf_alloc_rx_buffers(ring, ring->count); + i40evf_alloc_rx_buffers_1buf(ring, ring->count); ring->next_to_use = ring->count - 1; writel(ring->next_to_use, ring->tail); } @@ -959,6 +974,7 @@ void i40evf_down(struct i40evf_adapter *adapter) usleep_range(500, 1000); i40evf_irq_disable(adapter); + i40evf_napi_disable_all(adapter); /* remove all MAC filters */ list_for_each_entry(f, &adapter->mac_filter_list, list) { @@ -985,8 +1001,6 @@ void i40evf_down(struct i40evf_adapter *adapter) netif_tx_stop_all_queues(netdev); - i40evf_napi_disable_all(adapter); - msleep(20); netif_carrier_off(netdev); @@ -1481,9 +1495,11 @@ static void i40evf_reset_task(struct work_struct *work) struct i40evf_adapter *adapter = container_of(work, struct i40evf_adapter, reset_task); + struct net_device *netdev = adapter->netdev; struct i40e_hw *hw = &adapter->hw; - int i = 0, err; + struct i40evf_mac_filter *f; uint32_t rstat_val; + int i = 0, err; while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section)) @@ -1528,7 +1544,11 @@ static void i40evf_reset_task(struct work_struct *work) if (netif_running(adapter->netdev)) { set_bit(__I40E_DOWN, &adapter->vsi.state); - i40evf_down(adapter); + i40evf_irq_disable(adapter); + i40evf_napi_disable_all(adapter); + netif_tx_disable(netdev); + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); i40evf_free_traffic_irqs(adapter); i40evf_free_all_tx_resources(adapter); i40evf_free_all_rx_resources(adapter); @@ -1560,22 +1580,38 @@ static void i40evf_reset_task(struct work_struct *work) continue_reset: adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; - i40evf_down(adapter); + i40evf_irq_disable(adapter); + + if (netif_running(adapter->netdev)) { + i40evf_napi_disable_all(adapter); + netif_tx_disable(netdev); + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + } + adapter->state = __I40EVF_RESETTING; /* kill and reinit the admin queue */ if (i40evf_shutdown_adminq(hw)) - dev_warn(&adapter->pdev->dev, - "%s: Failed to destroy the Admin Queue resources\n", - __func__); + dev_warn(&adapter->pdev->dev, "Failed to shut down adminq\n"); + adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; err = i40evf_init_adminq(hw); if (err) - dev_info(&adapter->pdev->dev, "%s: init_adminq failed: %d\n", - __func__, err); + dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n", + err); - adapter->aq_pending = 0; - adapter->aq_required = 0; i40evf_map_queues(adapter); + + /* re-add all MAC filters */ + list_for_each_entry(f, &adapter->mac_filter_list, list) { + f->add = true; + } + /* re-add all VLAN filters */ + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + f->add = true; + } + adapter->aq_required = I40EVF_FLAG_AQ_ADD_MAC_FILTER; + adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); mod_timer(&adapter->watchdog_timer, jiffies + 2); @@ -1977,7 +2013,7 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw) * * This task completes the work that was begun in probe. Due to the nature * of VF-PF communications, we may need to wait tens of milliseconds to get - * reponses back from the PF. Rather than busy-wait in probe and bog down the + * responses back from the PF. Rather than busy-wait in probe and bog down the * whole system, we'll do it in a task so we can sleep. * This task only runs during driver init. Once we've established * communications with the PF driver and set up our netdev, the watchdog @@ -2368,7 +2404,7 @@ static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state) } /** - * i40evf_resume - Power managment resume routine + * i40evf_resume - Power management resume routine * @pdev: PCI device information struct * * Called when the system (VM) is resumed from sleep/suspend. |