diff options
Diffstat (limited to 'drivers/net/ethernet/intel')
97 files changed, 5157 insertions, 2829 deletions
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 06ddd7147c7f..d55638ad8704 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -299,6 +299,17 @@ config ICE To compile this driver as a module, choose M here. The module will be called ice. +config ICE_HWMON + bool "Intel(R) Ethernet Connection E800 Series Support HWMON support" + default y + depends on ICE && HWMON && !(ICE=y && HWMON=m) + help + Say Y if you want to expose thermal sensor data on Intel devices. + + Some of our devices contain internal thermal sensors. + This data is available via the hwmon sysfs interface and exposes + the onboard sensors. + config ICE_SWITCHDEV bool "Switchdev Support" default y diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index 63c3c79380a1..23a58cada43a 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -678,11 +678,8 @@ /* PCI/PCI-X/PCI-EX Config space */ #define PCI_HEADER_TYPE_REGISTER 0x0E -#define PCIE_LINK_STATUS 0x12 #define PCI_HEADER_TYPE_MULTIFUNC 0x80 -#define PCIE_LINK_WIDTH_MASK 0x3F0 -#define PCIE_LINK_WIDTH_SHIFT 4 #define PHY_REVISION_MASK 0xFFFFFFF0 #define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c index 5df7ad93f3d7..8c3d9c5962f2 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.c +++ b/drivers/net/ethernet/intel/e1000e/mac.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 1999 - 2018 Intel Corporation. */ +#include <linux/bitfield.h> + #include "e1000.h" /** @@ -13,21 +15,17 @@ **/ s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw) { + struct pci_dev *pdev = hw->adapter->pdev; struct e1000_mac_info *mac = &hw->mac; struct e1000_bus_info *bus = &hw->bus; - struct e1000_adapter *adapter = hw->adapter; - u16 pcie_link_status, cap_offset; + u16 pcie_link_status; - cap_offset = adapter->pdev->pcie_cap; - if (!cap_offset) { + if (!pci_pcie_cap(pdev)) { bus->width = e1000_bus_width_unknown; } else { - pci_read_config_word(adapter->pdev, - cap_offset + PCIE_LINK_STATUS, - &pcie_link_status); - bus->width = (enum e1000_bus_width)((pcie_link_status & - PCIE_LINK_WIDTH_MASK) >> - PCIE_LINK_WIDTH_SHIFT); + pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &pcie_link_status); + bus->width = (enum e1000_bus_width)FIELD_GET(PCI_EXP_LNKSTA_NLW, + pcie_link_status); } mac->ops.set_lan_id(hw); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 13a05604dcc0..1bc5b6c0b897 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -1057,16 +1057,16 @@ static u32 fm10k_get_rssrk_size(struct net_device __always_unused *netdev) return FM10K_RSSRK_SIZE * FM10K_RSSRK_ENTRIES_PER_REG; } -static int fm10k_get_rssh(struct net_device *netdev, u32 *indir, u8 *key, - u8 *hfunc) +static int fm10k_get_rssh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh) { struct fm10k_intfc *interface = netdev_priv(netdev); + u8 *key = rxfh->key; int i, err; - if (hfunc) - *hfunc = ETH_RSS_HASH_TOP; + rxfh->hfunc = ETH_RSS_HASH_TOP; - err = fm10k_get_reta(netdev, indir); + err = fm10k_get_reta(netdev, rxfh->indir); if (err || !key) return err; @@ -1076,23 +1076,25 @@ static int fm10k_get_rssh(struct net_device *netdev, u32 *indir, u8 *key, return 0; } -static int fm10k_set_rssh(struct net_device *netdev, const u32 *indir, - const u8 *key, const u8 hfunc) +static int fm10k_set_rssh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) { struct fm10k_intfc *interface = netdev_priv(netdev); struct fm10k_hw *hw = &interface->hw; int i, err; /* We do not allow change in unsupported parameters */ - if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && + rxfh->hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; - err = fm10k_set_reta(netdev, indir); - if (err || !key) + err = fm10k_set_reta(netdev, rxfh->indir); + if (err || !rxfh->key) return err; - for (i = 0; i < FM10K_RSSRK_SIZE; i++, key += 4) { - u32 rssrk = le32_to_cpu(*(__le32 *)key); + for (i = 0; i < FM10K_RSSRK_SIZE; i++, rxfh->key += 4) { + u32 rssrk = le32_to_cpu(*(__le32 *)rxfh->key); if (interface->rssrk[i] == rssrk) continue; diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 1bf424ac3145..9b701615c7c6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -24,6 +24,7 @@ #define I40E_MAX_VEB 16 #define I40E_MAX_NUM_DESCRIPTORS 4096 +#define I40E_MAX_NUM_DESCRIPTORS_XL710 8160 #define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024) #define I40E_DEFAULT_NUM_DESCRIPTORS 512 #define I40E_REQ_DESCRIPTOR_MULTIPLE 32 @@ -33,11 +34,11 @@ #define I40E_MIN_VSI_ALLOC 83 /* LAN, ATR, FCOE, 64 VF */ /* max 16 qps */ #define i40e_default_queues_per_vmdq(pf) \ - (((pf)->hw_features & I40E_HW_RSS_AQ_CAPABLE) ? 4 : 1) + (test_bit(I40E_HW_CAP_RSS_AQ, (pf)->hw.caps) ? 4 : 1) #define I40E_DEFAULT_QUEUES_PER_VF 4 #define I40E_MAX_VF_QUEUES 16 #define i40e_pf_get_max_q_per_tc(pf) \ - (((pf)->hw_features & I40E_HW_128_QP_RSS_CAPABLE) ? 128 : 64) + (test_bit(I40E_HW_CAP_128_QP_RSS, (pf)->hw.caps) ? 128 : 64) #define I40E_FDIR_RING_COUNT 32 #define I40E_MAX_AQ_BUF_SIZE 4096 #define I40E_AQ_LEN 256 @@ -78,7 +79,7 @@ #define I40E_MAX_BW_INACTIVE_ACCUM 4 /* accumulate 4 credits max */ /* driver state flags */ -enum i40e_state_t { +enum i40e_state { __I40E_TESTING, __I40E_CONFIG_BUSY, __I40E_CONFIG_DONE, @@ -126,7 +127,7 @@ enum i40e_state_t { BIT_ULL(__I40E_PF_RESET_AND_REBUILD_REQUESTED) /* VSI state flags */ -enum i40e_vsi_state_t { +enum i40e_vsi_state { __I40E_VSI_DOWN, __I40E_VSI_NEEDS_RESTART, __I40E_VSI_SYNCING_FILTERS, @@ -138,6 +139,60 @@ enum i40e_vsi_state_t { __I40E_VSI_STATE_SIZE__, }; +enum i40e_pf_flags { + I40E_FLAG_MSI_ENA, + I40E_FLAG_MSIX_ENA, + I40E_FLAG_RSS_ENA, + I40E_FLAG_VMDQ_ENA, + I40E_FLAG_SRIOV_ENA, + I40E_FLAG_DCB_CAPABLE, + I40E_FLAG_DCB_ENA, + I40E_FLAG_FD_SB_ENA, + I40E_FLAG_FD_ATR_ENA, + I40E_FLAG_MFP_ENA, + I40E_FLAG_HW_ATR_EVICT_ENA, + I40E_FLAG_VEB_MODE_ENA, + I40E_FLAG_VEB_STATS_ENA, + I40E_FLAG_LINK_POLLING_ENA, + I40E_FLAG_TRUE_PROMISC_ENA, + I40E_FLAG_LEGACY_RX_ENA, + I40E_FLAG_PTP_ENA, + I40E_FLAG_IWARP_ENA, + I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, + I40E_FLAG_SOURCE_PRUNING_DIS, + I40E_FLAG_TC_MQPRIO_ENA, + I40E_FLAG_FD_SB_INACTIVE, + I40E_FLAG_FD_SB_TO_CLOUD_FILTER, + I40E_FLAG_FW_LLDP_DIS, + I40E_FLAG_RS_FEC, + I40E_FLAG_BASE_R_FEC, + /* TOTAL_PORT_SHUTDOWN_ENA + * Allows to physically disable the link on the NIC's port. + * If enabled, (after link down request from the OS) + * no link, traffic or led activity is possible on that port. + * + * If I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA is set, the + * I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA must be explicitly forced + * to true and cannot be disabled by system admin at that time. + * The functionalities are exclusive in terms of configuration, but + * they also have similar behavior (allowing to disable physical + * link of the port), with following differences: + * - LINK_DOWN_ON_CLOSE_ENA is configurable at host OS run-time and + * is supported by whole family of 7xx Intel Ethernet Controllers + * - TOTAL_PORT_SHUTDOWN_ENA may be enabled only before OS loads + * (in BIOS) only if motherboard's BIOS and NIC's FW has support of it + * - when LINK_DOWN_ON_CLOSE_ENABLED is used, the link is being brought + * down by sending phy_type=0 to NIC's FW + * - when TOTAL_PORT_SHUTDOWN_ENA is used, phy_type is not altered, + * instead the link is being brought down by clearing + * bit (I40E_AQ_PHY_ENABLE_LINK) in abilities field of + * i40e_aq_set_phy_config structure + */ + I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, + I40E_FLAG_VF_VLAN_PRUNING_ENA, + I40E_PF_FLAGS_NBITS, /* must be last */ +}; + enum i40e_interrupt_policy { I40E_INTERRUPT_BEST_CASE, I40E_INTERRUPT_MEDIUM, @@ -413,9 +468,7 @@ struct i40e_pf { struct i40e_hw hw; DECLARE_BITMAP(state, __I40E_STATE_SIZE__); struct msix_entry *msix_entries; - bool fc_autoneg_status; - u16 eeprom_version; u16 num_vmdq_vsis; /* num vmdq vsis this PF has set up */ u16 num_vmdq_qps; /* num queue pairs per vmdq pool */ u16 num_vmdq_msix; /* num queue vectors per vmdq pool */ @@ -431,7 +484,6 @@ struct i40e_pf { u16 rss_size_max; /* HW defined max RSS queues */ u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */ u16 num_alloc_vsi; /* num VSIs this driver supports */ - u8 atr_sample_rate; bool wol_en; struct hlist_head fdir_filter_list; @@ -469,89 +521,16 @@ struct i40e_pf { struct hlist_head cloud_filter_list; u16 num_cloud_filters; - enum i40e_interrupt_policy int_policy; u16 rx_itr_default; u16 tx_itr_default; u32 msg_enable; char int_name[I40E_INT_NAME_STR_LEN]; - u16 adminq_work_limit; /* num of admin receive queue desc to process */ unsigned long service_timer_period; unsigned long service_timer_previous; struct timer_list service_timer; struct work_struct service_task; - u32 hw_features; -#define I40E_HW_RSS_AQ_CAPABLE BIT(0) -#define I40E_HW_128_QP_RSS_CAPABLE BIT(1) -#define I40E_HW_ATR_EVICT_CAPABLE BIT(2) -#define I40E_HW_WB_ON_ITR_CAPABLE BIT(3) -#define I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE BIT(4) -#define I40E_HW_NO_PCI_LINK_CHECK BIT(5) -#define I40E_HW_100M_SGMII_CAPABLE BIT(6) -#define I40E_HW_NO_DCB_SUPPORT BIT(7) -#define I40E_HW_USE_SET_LLDP_MIB BIT(8) -#define I40E_HW_GENEVE_OFFLOAD_CAPABLE BIT(9) -#define I40E_HW_PTP_L4_CAPABLE BIT(10) -#define I40E_HW_WOL_MC_MAGIC_PKT_WAKE BIT(11) -#define I40E_HW_HAVE_CRT_RETIMER BIT(13) -#define I40E_HW_OUTER_UDP_CSUM_CAPABLE BIT(14) -#define I40E_HW_PHY_CONTROLS_LEDS BIT(15) -#define I40E_HW_STOP_FW_LLDP BIT(16) -#define I40E_HW_PORT_ID_VALID BIT(17) -#define I40E_HW_RESTART_AUTONEG BIT(18) - - u32 flags; -#define I40E_FLAG_RX_CSUM_ENABLED BIT(0) -#define I40E_FLAG_MSI_ENABLED BIT(1) -#define I40E_FLAG_MSIX_ENABLED BIT(2) -#define I40E_FLAG_RSS_ENABLED BIT(3) -#define I40E_FLAG_VMDQ_ENABLED BIT(4) -#define I40E_FLAG_SRIOV_ENABLED BIT(5) -#define I40E_FLAG_DCB_CAPABLE BIT(6) -#define I40E_FLAG_DCB_ENABLED BIT(7) -#define I40E_FLAG_FD_SB_ENABLED BIT(8) -#define I40E_FLAG_FD_ATR_ENABLED BIT(9) -#define I40E_FLAG_MFP_ENABLED BIT(10) -#define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT(11) -#define I40E_FLAG_VEB_MODE_ENABLED BIT(12) -#define I40E_FLAG_VEB_STATS_ENABLED BIT(13) -#define I40E_FLAG_LINK_POLLING_ENABLED BIT(14) -#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT(15) -#define I40E_FLAG_LEGACY_RX BIT(16) -#define I40E_FLAG_PTP BIT(17) -#define I40E_FLAG_IWARP_ENABLED BIT(18) -#define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED BIT(19) -#define I40E_FLAG_SOURCE_PRUNING_DISABLED BIT(20) -#define I40E_FLAG_TC_MQPRIO BIT(21) -#define I40E_FLAG_FD_SB_INACTIVE BIT(22) -#define I40E_FLAG_FD_SB_TO_CLOUD_FILTER BIT(23) -#define I40E_FLAG_DISABLE_FW_LLDP BIT(24) -#define I40E_FLAG_RS_FEC BIT(25) -#define I40E_FLAG_BASE_R_FEC BIT(26) -/* TOTAL_PORT_SHUTDOWN - * Allows to physically disable the link on the NIC's port. - * If enabled, (after link down request from the OS) - * no link, traffic or led activity is possible on that port. - * - * If I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED is set, the - * I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED must be explicitly forced to true - * and cannot be disabled by system admin at that time. - * The functionalities are exclusive in terms of configuration, but they also - * have similar behavior (allowing to disable physical link of the port), - * with following differences: - * - LINK_DOWN_ON_CLOSE_ENABLED is configurable at host OS run-time and is - * supported by whole family of 7xx Intel Ethernet Controllers - * - TOTAL_PORT_SHUTDOWN may be enabled only before OS loads (in BIOS) - * only if motherboard's BIOS and NIC's FW has support of it - * - when LINK_DOWN_ON_CLOSE_ENABLED is used, the link is being brought down - * by sending phy_type=0 to NIC's FW - * - when TOTAL_PORT_SHUTDOWN is used, phy_type is not altered, instead - * the link is being brought down by clearing bit (I40E_AQ_PHY_ENABLE_LINK) - * in abilities field of i40e_aq_set_phy_config structure - */ -#define I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED BIT(27) -#define I40E_FLAG_VF_VLAN_PRUNING BIT(28) - + DECLARE_BITMAP(flags, I40E_PF_FLAGS_NBITS); struct i40e_client_instance *cinst; bool stat_offsets_loaded; struct i40e_hw_port_stats stats; @@ -559,7 +538,6 @@ struct i40e_pf { u32 tx_timeout_count; u32 tx_timeout_recovery_level; unsigned long tx_timeout_last_recovery; - u32 tx_sluggish_count; u32 hw_csum_rx_error; u32 led_status; u16 corer_count; /* Core reset count */ @@ -581,17 +559,13 @@ struct i40e_pf { struct i40e_lump_tracking *irq_pile; /* switch config info */ - u16 pf_seid; u16 main_vsi_seid; u16 mac_seid; - struct kobject *switch_kobj; #ifdef CONFIG_DEBUG_FS struct dentry *i40e_dbg_pf; #endif /* CONFIG_DEBUG_FS */ bool cur_promisc; - u16 instance; /* A unique number per i40e_pf instance in the system */ - /* sr-iov config info */ struct i40e_vf *vf; int num_alloc_vfs; /* actual number of VFs allocated */ @@ -685,9 +659,7 @@ struct i40e_pf { unsigned long ptp_tx_start; struct hwtstamp_config tstamp_config; struct timespec64 ptp_prev_hw_time; - struct work_struct ptp_pps_work; struct work_struct ptp_extts0_work; - struct work_struct ptp_extts1_work; ktime_t ptp_reset_start; struct mutex tmreg_lock; /* Used to protect the SYSTIME registers. */ u32 ptp_adj_mult; @@ -695,10 +667,7 @@ struct i40e_pf { u32 tx_hwtstamp_skipped; u32 rx_hwtstamp_cleared; u32 latch_event_flags; - u64 ptp_pps_start; - u32 pps_delay; spinlock_t ptp_rx_lock; /* Used to protect Rx timestamp registers. */ - struct ptp_pin_desc ptp_pin[3]; unsigned long latch_events[4]; bool ptp_tx; bool ptp_rx; @@ -711,7 +680,6 @@ struct i40e_pf { u32 fd_inv; u16 phy_led_val; - u16 override_q_count; u16 last_sw_conf_flags; u16 last_sw_conf_valid_flags; /* List to keep previous DDP profiles to be rolled back in the future */ @@ -1267,7 +1235,7 @@ struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr); void i40e_vlan_stripping_enable(struct i40e_vsi *vsi); static inline bool i40e_is_sw_dcb(struct i40e_pf *pf) { - return !!(pf->flags & I40E_FLAG_DISABLE_FW_LLDP); + return test_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags); } #ifdef CONFIG_I40E_DCB @@ -1301,7 +1269,7 @@ int i40e_set_partition_bw_setting(struct i40e_pf *pf); int i40e_commit_partition_bw_setting(struct i40e_pf *pf); void i40e_print_link_message(struct i40e_vsi *vsi, bool isup); -void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags); +void i40e_set_fec_in_flags(u8 fec_cfg, unsigned long *flags); static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi) { @@ -1321,13 +1289,13 @@ int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi, * i40e_is_tc_mqprio_enabled - check if TC MQPRIO is enabled on PF * @pf: pointer to a pf. * - * Check and return value of flag I40E_FLAG_TC_MQPRIO. + * Check and return state of flag I40E_FLAG_TC_MQPRIO. * - * Return: I40E_FLAG_TC_MQPRIO set state. + * Return: true/false if I40E_FLAG_TC_MQPRIO is set or not **/ -static inline u32 i40e_is_tc_mqprio_enabled(struct i40e_pf *pf) +static inline bool i40e_is_tc_mqprio_enabled(struct i40e_pf *pf) { - return pf->flags & I40E_FLAG_TC_MQPRIO; + return test_bit(I40E_FLAG_TC_MQPRIO_ENA, pf->flags); } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 9ce6e633cc2f..f73f5930fc58 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -9,40 +9,6 @@ static void i40e_resume_aq(struct i40e_hw *hw); /** - * i40e_adminq_init_regs - Initialize AdminQ registers - * @hw: pointer to the hardware structure - * - * This assumes the alloc_asq and alloc_arq functions have already been called - **/ -static void i40e_adminq_init_regs(struct i40e_hw *hw) -{ - /* set head and tail registers in our local struct */ - if (i40e_is_vf(hw)) { - hw->aq.asq.tail = I40E_VF_ATQT1; - hw->aq.asq.head = I40E_VF_ATQH1; - hw->aq.asq.len = I40E_VF_ATQLEN1; - hw->aq.asq.bal = I40E_VF_ATQBAL1; - hw->aq.asq.bah = I40E_VF_ATQBAH1; - hw->aq.arq.tail = I40E_VF_ARQT1; - hw->aq.arq.head = I40E_VF_ARQH1; - hw->aq.arq.len = I40E_VF_ARQLEN1; - hw->aq.arq.bal = I40E_VF_ARQBAL1; - hw->aq.arq.bah = I40E_VF_ARQBAH1; - } else { - hw->aq.asq.tail = I40E_PF_ATQT; - hw->aq.asq.head = I40E_PF_ATQH; - hw->aq.asq.len = I40E_PF_ATQLEN; - hw->aq.asq.bal = I40E_PF_ATQBAL; - hw->aq.asq.bah = I40E_PF_ATQBAH; - hw->aq.arq.tail = I40E_PF_ARQT; - hw->aq.arq.head = I40E_PF_ARQH; - hw->aq.arq.len = I40E_PF_ARQLEN; - hw->aq.arq.bal = I40E_PF_ARQBAL; - hw->aq.arq.bah = I40E_PF_ARQBAH; - } -} - -/** * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings * @hw: pointer to the hardware structure **/ @@ -267,17 +233,17 @@ static int i40e_config_asq_regs(struct i40e_hw *hw) u32 reg = 0; /* Clear Head and Tail */ - wr32(hw, hw->aq.asq.head, 0); - wr32(hw, hw->aq.asq.tail, 0); + wr32(hw, I40E_PF_ATQH, 0); + wr32(hw, I40E_PF_ATQT, 0); /* set starting point */ - wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | + wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries | I40E_PF_ATQLEN_ATQENABLE_MASK)); - wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa)); - wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa)); + wr32(hw, I40E_PF_ATQBAL, lower_32_bits(hw->aq.asq.desc_buf.pa)); + wr32(hw, I40E_PF_ATQBAH, upper_32_bits(hw->aq.asq.desc_buf.pa)); /* Check one register to verify that config was applied */ - reg = rd32(hw, hw->aq.asq.bal); + reg = rd32(hw, I40E_PF_ATQBAL); if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa)) ret_code = -EIO; @@ -296,20 +262,20 @@ static int i40e_config_arq_regs(struct i40e_hw *hw) u32 reg = 0; /* Clear Head and Tail */ - wr32(hw, hw->aq.arq.head, 0); - wr32(hw, hw->aq.arq.tail, 0); + wr32(hw, I40E_PF_ARQH, 0); + wr32(hw, I40E_PF_ARQT, 0); /* set starting point */ - wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | + wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries | I40E_PF_ARQLEN_ARQENABLE_MASK)); - wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa)); - wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa)); + wr32(hw, I40E_PF_ARQBAL, lower_32_bits(hw->aq.arq.desc_buf.pa)); + wr32(hw, I40E_PF_ARQBAH, upper_32_bits(hw->aq.arq.desc_buf.pa)); /* Update tail in the HW to post pre-allocated buffers */ - wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1); + wr32(hw, I40E_PF_ARQT, hw->aq.num_arq_entries - 1); /* Check one register to verify that config was applied */ - reg = rd32(hw, hw->aq.arq.bal); + reg = rd32(hw, I40E_PF_ARQBAL); if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa)) ret_code = -EIO; @@ -452,11 +418,11 @@ static int i40e_shutdown_asq(struct i40e_hw *hw) } /* Stop firmware AdminQ processing */ - wr32(hw, hw->aq.asq.head, 0); - wr32(hw, hw->aq.asq.tail, 0); - wr32(hw, hw->aq.asq.len, 0); - wr32(hw, hw->aq.asq.bal, 0); - wr32(hw, hw->aq.asq.bah, 0); + wr32(hw, I40E_PF_ATQH, 0); + wr32(hw, I40E_PF_ATQT, 0); + wr32(hw, I40E_PF_ATQLEN, 0); + wr32(hw, I40E_PF_ATQBAL, 0); + wr32(hw, I40E_PF_ATQBAH, 0); hw->aq.asq.count = 0; /* to indicate uninitialized queue */ @@ -486,11 +452,11 @@ static int i40e_shutdown_arq(struct i40e_hw *hw) } /* Stop firmware AdminQ processing */ - wr32(hw, hw->aq.arq.head, 0); - wr32(hw, hw->aq.arq.tail, 0); - wr32(hw, hw->aq.arq.len, 0); - wr32(hw, hw->aq.arq.bal, 0); - wr32(hw, hw->aq.arq.bah, 0); + wr32(hw, I40E_PF_ARQH, 0); + wr32(hw, I40E_PF_ARQT, 0); + wr32(hw, I40E_PF_ARQLEN, 0); + wr32(hw, I40E_PF_ARQBAL, 0); + wr32(hw, I40E_PF_ARQBAH, 0); hw->aq.arq.count = 0; /* to indicate uninitialized queue */ @@ -503,44 +469,76 @@ shutdown_arq_out: } /** - * i40e_set_hw_flags - set HW flags + * i40e_set_hw_caps - set HW flags * @hw: pointer to the hardware structure **/ -static void i40e_set_hw_flags(struct i40e_hw *hw) +static void i40e_set_hw_caps(struct i40e_hw *hw) { - struct i40e_adminq_info *aq = &hw->aq; - - hw->flags = 0; + bitmap_zero(hw->caps, I40E_HW_CAPS_NBITS); switch (hw->mac.type) { case I40E_MAC_XL710: - if (aq->api_maj_ver > 1 || - (aq->api_maj_ver == 1 && - aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710)) { - hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE; - hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE; + if (i40e_is_aq_api_ver_ge(hw, 1, + I40E_MINOR_VER_GET_LINK_INFO_XL710)) { + set_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps); + set_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, hw->caps); /* The ability to RX (not drop) 802.1ad frames */ - hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE; + set_bit(I40E_HW_CAP_802_1AD, hw->caps); + } + if (i40e_is_aq_api_ver_ge(hw, 1, 5)) { + /* Supported in FW API version higher than 1.4 */ + set_bit(I40E_HW_CAP_GENEVE_OFFLOAD, hw->caps); + } + if (i40e_is_fw_ver_lt(hw, 4, 33)) { + set_bit(I40E_HW_CAP_RESTART_AUTONEG, hw->caps); + /* No DCB support for FW < v4.33 */ + set_bit(I40E_HW_CAP_NO_DCB_SUPPORT, hw->caps); + } + if (i40e_is_fw_ver_lt(hw, 4, 3)) { + /* Disable FW LLDP if FW < v4.3 */ + set_bit(I40E_HW_CAP_STOP_FW_LLDP, hw->caps); + } + if (i40e_is_fw_ver_ge(hw, 4, 40)) { + /* Use the FW Set LLDP MIB API if FW >= v4.40 */ + set_bit(I40E_HW_CAP_USE_SET_LLDP_MIB, hw->caps); + } + if (i40e_is_fw_ver_ge(hw, 6, 0)) { + /* Enable PTP L4 if FW > v6.0 */ + set_bit(I40E_HW_CAP_PTP_L4, hw->caps); } break; case I40E_MAC_X722: - hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE | - I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK; + set_bit(I40E_HW_CAP_AQ_SRCTL_ACCESS_ENABLE, hw->caps); + set_bit(I40E_HW_CAP_NVM_READ_REQUIRES_LOCK, hw->caps); + set_bit(I40E_HW_CAP_RSS_AQ, hw->caps); + set_bit(I40E_HW_CAP_128_QP_RSS, hw->caps); + set_bit(I40E_HW_CAP_ATR_EVICT, hw->caps); + set_bit(I40E_HW_CAP_WB_ON_ITR, hw->caps); + set_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, hw->caps); + set_bit(I40E_HW_CAP_NO_PCI_LINK_CHECK, hw->caps); + set_bit(I40E_HW_CAP_USE_SET_LLDP_MIB, hw->caps); + set_bit(I40E_HW_CAP_GENEVE_OFFLOAD, hw->caps); + set_bit(I40E_HW_CAP_PTP_L4, hw->caps); + set_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, hw->caps); + set_bit(I40E_HW_CAP_OUTER_UDP_CSUM, hw->caps); + + if (rd32(hw, I40E_GLQF_FDEVICTENA(1)) != + I40E_FDEVICT_PCTYPE_DEFAULT) { + hw_warn(hw, "FD EVICT PCTYPES are not right, disable FD HW EVICT\n"); + clear_bit(I40E_HW_CAP_ATR_EVICT, hw->caps); + } - if (aq->api_maj_ver > 1 || - (aq->api_maj_ver == 1 && - aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722)) - hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE; + if (i40e_is_aq_api_ver_ge(hw, 1, + I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722)) + set_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, hw->caps); - if (aq->api_maj_ver > 1 || - (aq->api_maj_ver == 1 && - aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722)) - hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE; + if (i40e_is_aq_api_ver_ge(hw, 1, + I40E_MINOR_VER_GET_LINK_INFO_X722)) + set_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps); - if (aq->api_maj_ver > 1 || - (aq->api_maj_ver == 1 && - aq->api_min_ver >= I40E_MINOR_VER_FW_REQUEST_FEC_X722)) - hw->flags |= I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE; + if (i40e_is_aq_api_ver_ge(hw, 1, + I40E_MINOR_VER_FW_REQUEST_FEC_X722)) + set_bit(I40E_HW_CAP_X722_FEC_REQUEST, hw->caps); fallthrough; default: @@ -548,22 +546,18 @@ static void i40e_set_hw_flags(struct i40e_hw *hw) } /* Newer versions of firmware require lock when reading the NVM */ - if (aq->api_maj_ver > 1 || - (aq->api_maj_ver == 1 && - aq->api_min_ver >= 5)) - hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK; - - if (aq->api_maj_ver > 1 || - (aq->api_maj_ver == 1 && - aq->api_min_ver >= 8)) { - hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT; - hw->flags |= I40E_HW_FLAG_DROP_MODE; - } + if (i40e_is_aq_api_ver_ge(hw, 1, 5)) + set_bit(I40E_HW_CAP_NVM_READ_REQUIRES_LOCK, hw->caps); + + /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */ + if (i40e_is_aq_api_ver_ge(hw, 1, 7)) + set_bit(I40E_HW_CAP_802_1AD, hw->caps); + + if (i40e_is_aq_api_ver_ge(hw, 1, 8)) + set_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps); - if (aq->api_maj_ver > 1 || - (aq->api_maj_ver == 1 && - aq->api_min_ver >= 9)) - hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED; + if (i40e_is_aq_api_ver_ge(hw, 1, 9)) + set_bit(I40E_HW_CAP_AQ_PHY_ACCESS_EXTENDED, hw->caps); } /** @@ -593,9 +587,6 @@ int i40e_init_adminq(struct i40e_hw *hw) goto init_adminq_exit; } - /* Set up register offsets */ - i40e_adminq_init_regs(hw); - /* setup ASQ command write back timeout */ hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT; @@ -633,7 +624,7 @@ int i40e_init_adminq(struct i40e_hw *hw) /* Some features were introduced in different FW API version * for different MAC type. */ - i40e_set_hw_flags(hw); + i40e_set_hw_caps(hw); /* get the NVM version info */ i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION, @@ -648,25 +639,7 @@ int i40e_init_adminq(struct i40e_hw *hw) &oem_lo); hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo; - if (hw->mac.type == I40E_MAC_XL710 && - hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && - hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) { - hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE; - hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE; - } - if (hw->mac.type == I40E_MAC_X722 && - hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && - hw->aq.api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722) { - hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE; - } - - /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */ - if (hw->aq.api_maj_ver > 1 || - (hw->aq.api_maj_ver == 1 && - hw->aq.api_min_ver >= 7)) - hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE; - - if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) { + if (i40e_is_aq_api_ver_ge(hw, I40E_FW_API_VERSION_MAJOR + 1, 0)) { ret_code = -EIO; goto init_adminq_free_arq; } @@ -723,9 +696,9 @@ static u16 i40e_clean_asq(struct i40e_hw *hw) desc = I40E_ADMINQ_DESC(*asq, ntc); details = I40E_ADMINQ_DETAILS(*asq, ntc); - while (rd32(hw, hw->aq.asq.head) != ntc) { + while (rd32(hw, I40E_PF_ATQH) != ntc) { i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, - "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head)); + "ntc %d head %d.\n", ntc, rd32(hw, I40E_PF_ATQH)); if (details->callback) { I40E_ADMINQ_CALLBACK cb_func = @@ -759,7 +732,7 @@ static bool i40e_asq_done(struct i40e_hw *hw) /* AQ designers suggest use of head for better * timing reliability than DD bit */ - return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use; + return rd32(hw, I40E_PF_ATQH) == hw->aq.asq.next_to_use; } @@ -800,7 +773,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw, hw->aq.asq_last_status = I40E_AQ_RC_OK; - val = rd32(hw, hw->aq.asq.head); + val = rd32(hw, I40E_PF_ATQH); if (val >= hw->aq.num_asq_entries) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: head overrun at %d\n", val); @@ -892,7 +865,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw, if (hw->aq.asq.next_to_use == hw->aq.asq.count) hw->aq.asq.next_to_use = 0; if (!details->postpone) - wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use); + wr32(hw, I40E_PF_ATQT, hw->aq.asq.next_to_use); /* if cmd_details are not defined or async flag is not set, * we need to wait for desc write back @@ -952,7 +925,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw, /* update the error if time out occurred */ if ((!cmd_completed) && (!details->async && !details->postpone)) { - if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) { + if (rd32(hw, I40E_PF_ATQLEN) & I40E_GL_ATQLEN_ATQCRIT_MASK) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: AQ Critical error.\n"); status = -EIO; @@ -1106,7 +1079,7 @@ int i40e_clean_arq_element(struct i40e_hw *hw, } /* set next_to_use to head */ - ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK; + ntu = rd32(hw, I40E_PF_ARQH) & I40E_PF_ARQH_ARQH_MASK; if (ntu == ntc) { /* nothing to do - shouldn't need to update ring's values */ ret_code = -EALREADY; @@ -1154,7 +1127,7 @@ int i40e_clean_arq_element(struct i40e_hw *hw, desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa)); /* set tail = the last cleaned desc index. */ - wr32(hw, hw->aq.arq.tail, ntc); + wr32(hw, I40E_PF_ARQT, ntc); /* ntc is updated to tail + 1 */ ntc++; if (ntc == hw->aq.num_arq_entries) diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h index 80125bea80a2..ee86d2c53079 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h @@ -29,13 +29,6 @@ struct i40e_adminq_ring { /* used for interrupt processing */ u16 next_to_use; u16 next_to_clean; - - /* used for queue tracking */ - u32 head; - u32 tail; - u32 len; - u32 bah; - u32 bal; }; /* ASQ transaction details */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index d7e24d661724..bd52b73cf61f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -195,11 +195,11 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, **/ bool i40e_check_asq_alive(struct i40e_hw *hw) { - if (hw->aq.asq.len) - return !!(rd32(hw, hw->aq.asq.len) & - I40E_PF_ATQLEN_ATQENABLE_MASK); - else + /* Check if the queue is initialized */ + if (!hw->aq.asq.count) return false; + + return !!(rd32(hw, I40E_PF_ATQLEN) & I40E_PF_ATQLEN_ATQENABLE_MASK); } /** @@ -1374,8 +1374,8 @@ i40e_aq_get_phy_capabilities(struct i40e_hw *hw, if (report_init) { if (hw->mac.type == I40E_MAC_XL710 && - hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && - hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) { + i40e_is_aq_api_ver_ge(hw, I40E_FW_API_VERSION_MAJOR, + I40E_MINOR_VER_GET_LINK_INFO_XL710)) { status = i40e_aq_get_link_info(hw, true, NULL, NULL); } else { hw->phy.phy_types = le32_to_cpu(abilities->phy_type); @@ -1645,12 +1645,11 @@ int i40e_aq_get_link_info(struct i40e_hw *hw, else hw_link_info->lse_enable = false; - if ((hw->mac.type == I40E_MAC_XL710) && - (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 && - hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE) + if (hw->mac.type == I40E_MAC_XL710 && i40e_is_fw_ver_lt(hw, 4, 40) && + hw_link_info->phy_type == 0xE) hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU; - if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE && + if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps) && hw->mac.type != I40E_MAC_X722) { __le32 tmp; @@ -1750,21 +1749,6 @@ int i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, } /** - * i40e_is_aq_api_ver_ge - * @aq: pointer to AdminQ info containing HW API version to compare - * @maj: API major value - * @min: API minor value - * - * Assert whether current HW API version is greater/equal than provided. - **/ -static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj, - u16 min) -{ - return (aq->api_maj_ver > maj || - (aq->api_maj_ver == maj && aq->api_min_ver >= min)); -} - -/** * i40e_aq_add_vsi * @hw: pointer to the hw struct * @vsi_ctx: pointer to a vsi context struct @@ -1890,14 +1874,14 @@ int i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, if (set) { flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; - if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) + if (rx_only_promisc && i40e_is_aq_api_ver_ge(hw, 1, 5)) flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; } cmd->promiscuous_flags = cpu_to_le16(flags); cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); - if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) + if (i40e_is_aq_api_ver_ge(hw, 1, 5)) cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); @@ -2000,13 +1984,13 @@ int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, if (enable) { flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; - if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) + if (i40e_is_aq_api_ver_ge(hw, 1, 5)) flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; } cmd->promiscuous_flags = cpu_to_le16(flags); cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); - if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) + if (i40e_is_aq_api_ver_ge(hw, 1, 5)) cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); cmd->seid = cpu_to_le16(seid); @@ -2253,7 +2237,7 @@ int i40e_aq_set_switch_config(struct i40e_hw *hw, scfg->flags = cpu_to_le16(flags); scfg->valid_flags = cpu_to_le16(valid_flags); scfg->mode = mode; - if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) { + if (test_bit(I40E_HW_CAP_802_1AD, hw->caps)) { scfg->switch_tag = cpu_to_le16(hw->switch_tag); scfg->first_tag = cpu_to_le16(hw->first_tag); scfg->second_tag = cpu_to_le16(hw->second_tag); @@ -3637,7 +3621,7 @@ i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore, (struct i40e_aqc_lldp_restore *)&desc.params.raw; int status; - if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) { + if (!test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps)) { i40e_debug(hw, I40E_DEBUG_ALL, "Restore LLDP not supported by current FW version.\n"); return -ENODEV; @@ -3680,7 +3664,7 @@ int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN; if (persist) { - if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) + if (test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps)) cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST; else i40e_debug(hw, I40E_DEBUG_ALL, @@ -3713,7 +3697,7 @@ int i40e_aq_start_lldp(struct i40e_hw *hw, bool persist, cmd->command = I40E_AQ_LLDP_AGENT_START; if (persist) { - if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) + if (test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps)) cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST; else i40e_debug(hw, I40E_DEBUG_ALL, @@ -3741,7 +3725,7 @@ i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable, (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw; int status; - if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) + if (!test_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, hw->caps)) return -ENODEV; i40e_fill_default_direct_cmd_desc(&desc, @@ -5043,7 +5027,7 @@ static int i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr, u32 i; *reg_val = 0; - if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { + if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) { status = i40e_aq_get_phy_register(hw, I40E_AQ_PHY_REG_ACCESS_EXTERNAL, @@ -5076,7 +5060,7 @@ static int i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr, int status; u32 i; - if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { + if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) { status = i40e_aq_set_phy_register(hw, I40E_AQ_PHY_REG_ACCESS_EXTERNAL, @@ -5115,7 +5099,7 @@ int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, u8 port_num; u32 i; - if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { + if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) { status = i40e_aq_get_phy_register(hw, I40E_AQ_PHY_REG_ACCESS_EXTERNAL, @@ -5238,14 +5222,14 @@ int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, **/ u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr) { - bool use_register; + bool use_register = false; int status = 0; int retry = 5; u32 val = 0; - use_register = (((hw->aq.api_maj_ver == 1) && - (hw->aq.api_min_ver < 5)) || - (hw->mac.type == I40E_MAC_X722)); + if (i40e_is_aq_api_ver_lt(hw, 1, 5) || hw->mac.type == I40E_MAC_X722) + use_register = true; + if (!use_register) { do_retry: status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL); @@ -5300,13 +5284,13 @@ int i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, **/ void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) { - bool use_register; + bool use_register = false; int status = 0; int retry = 5; - use_register = (((hw->aq.api_maj_ver == 1) && - (hw->aq.api_min_ver < 5)) || - (hw->mac.type == I40E_MAC_X722)); + if (i40e_is_aq_api_ver_lt(hw, 1, 5) || hw->mac.type == I40E_MAC_X722) + use_register = true; + if (!use_register) { do_retry: status = i40e_aq_rx_ctl_write_register(hw, reg_addr, @@ -5335,7 +5319,7 @@ static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio, struct i40e_aqc_phy_register_access *cmd) { if (set_mdio && cmd->phy_interface == I40E_AQ_PHY_REG_ACCESS_EXTERNAL) { - if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED) + if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS_EXTENDED, hw->caps)) cmd->cmd_flags |= I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER | ((mdio_num << diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c index 68602fc375f6..498728e16a37 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -804,14 +804,11 @@ int i40e_get_dcb_config(struct i40e_hw *hw) int ret = 0; /* If Firmware version < v4.33 on X710/XL710, IEEE only */ - if ((hw->mac.type == I40E_MAC_XL710) && - (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || - (hw->aq.fw_maj_ver < 4))) + if (hw->mac.type == I40E_MAC_XL710 && i40e_is_fw_ver_lt(hw, 4, 33)) return i40e_get_ieee_dcb_config(hw); /* If Firmware version == v4.33 on X710/XL710, use old CEE struct */ - if ((hw->mac.type == I40E_MAC_XL710) && - ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33))) { + if (hw->mac.type == I40E_MAC_XL710 && i40e_is_fw_ver_eq(hw, 4, 33)) { ret = i40e_aq_get_cee_dcb_config(hw, &cee_v1_cfg, sizeof(cee_v1_cfg), NULL); if (!ret) { @@ -877,7 +874,7 @@ int i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change) return -EOPNOTSUPP; /* Read LLDP NVM area */ - if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) { + if (test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps)) { u8 offset = 0; if (hw->mac.type == I40E_MAC_XL710) diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c index 077a95dad32c..4721845fda6e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c @@ -310,8 +310,8 @@ static u8 i40e_dcbnl_getstate(struct net_device *netdev) struct i40e_pf *pf = i40e_netdev_to_pf(netdev); dev_dbg(&pf->pdev->dev, "DCB state=%d\n", - !!(pf->flags & I40E_FLAG_DCB_ENABLED)); - return !!(pf->flags & I40E_FLAG_DCB_ENABLED); + test_bit(I40E_FLAG_DCB_ENA, pf->flags) ? 1 : 0); + return test_bit(I40E_FLAG_DCB_ENA, pf->flags) ? 1 : 0; } /** @@ -331,19 +331,19 @@ static u8 i40e_dcbnl_setstate(struct net_device *netdev, u8 state) return ret; dev_dbg(&pf->pdev->dev, "new state=%d current state=%d\n", - state, (pf->flags & I40E_FLAG_DCB_ENABLED) ? 1 : 0); + state, test_bit(I40E_FLAG_DCB_ENA, pf->flags) ? 1 : 0); /* Nothing to do */ - if (!state == !(pf->flags & I40E_FLAG_DCB_ENABLED)) + if (!state == !test_bit(I40E_FLAG_DCB_ENA, pf->flags)) return ret; if (i40e_is_sw_dcb(pf)) { if (state) { - pf->flags |= I40E_FLAG_DCB_ENABLED; + set_bit(I40E_FLAG_DCB_ENA, pf->flags); memcpy(&pf->hw.desired_dcbx_config, &pf->hw.local_dcbx_config, sizeof(struct i40e_dcbx_config)); } else { - pf->flags &= ~I40E_FLAG_DCB_ENABLED; + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); } } else { /* Cannot directly manipulate FW LLDP Agent */ @@ -653,7 +653,7 @@ static u8 i40e_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap) { struct i40e_pf *pf = i40e_netdev_to_pf(netdev); - if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) + if (!test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags)) return I40E_DCBNL_STATUS_ERROR; switch (capid) { @@ -693,7 +693,7 @@ static int i40e_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) { struct i40e_pf *pf = i40e_netdev_to_pf(netdev); - if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) + if (!test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags)) return -EINVAL; *num = I40E_MAX_TRAFFIC_CLASS; @@ -827,15 +827,12 @@ static void i40e_dcbnl_get_perm_hw_addr(struct net_device *dev, u8 *perm_addr) { struct i40e_pf *pf = i40e_netdev_to_pf(dev); - int i, j; + int i; memset(perm_addr, 0xff, MAX_ADDR_LEN); for (i = 0; i < dev->addr_len; i++) perm_addr[i] = pf->hw.mac.perm_addr[i]; - - for (j = 0; j < dev->addr_len; j++, i++) - perm_addr[i] = pf->hw.mac.san_addr[j]; } static const struct dcbnl_rtnl_ops dcbnl_ops = { @@ -891,11 +888,11 @@ void i40e_dcbnl_set_all(struct i40e_vsi *vsi) return; /* DCB not enabled */ - if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) + if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags)) return; /* MFP mode but not an iSCSI PF so return */ - if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(hw->func_caps.iscsi)) + if (test_bit(I40E_FLAG_MFP_ENA, pf->flags) && !(hw->func_caps.iscsi)) return; dcbxcfg = &hw->local_dcbx_config; @@ -1002,7 +999,7 @@ void i40e_dcbnl_flush_apps(struct i40e_pf *pf, int i; /* MFP mode but not an iSCSI PF so return */ - if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(pf->hw.func_caps.iscsi)) + if (test_bit(I40E_FLAG_MFP_ENA, pf->flags) && !(pf->hw.func_caps.iscsi)) return; for (i = 0; i < old_cfg->numapps; i++) { @@ -1025,7 +1022,7 @@ void i40e_dcbnl_setup(struct i40e_vsi *vsi) struct i40e_pf *pf = i40e_netdev_to_pf(dev); /* Not DCB capable */ - if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) + if (!test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags)) return; dev->dcbnl_ops = &dcbnl_ops; diff --git a/drivers/net/ethernet/intel/i40e/i40e_debug.h b/drivers/net/ethernet/intel/i40e/i40e_debug.h index 27ebc72d8bfe..e9871dfb32bd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debug.h +++ b/drivers/net/ethernet/intel/i40e/i40e_debug.h @@ -37,6 +37,7 @@ struct i40e_hw; struct device *i40e_hw_to_dev(struct i40e_hw *hw); #define hw_dbg(hw, S, A...) dev_dbg(i40e_hw_to_dev(hw), S, ##A) +#define hw_warn(hw, S, A...) dev_warn(i40e_hw_to_dev(hw), S, ##A) #define i40e_debug(h, m, s, ...) \ do { \ diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 999c9708def5..ef70ddbe9c2f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -147,9 +147,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) " state[%d] = %08lx\n", i, vsi->state[i]); if (vsi == pf->vsi[pf->lan_vsi]) - dev_info(&pf->pdev->dev, " MAC address: %pM SAN MAC: %pM Port MAC: %pM\n", + dev_info(&pf->pdev->dev, " MAC address: %pM Port MAC: %pM\n", pf->hw.mac.addr, - pf->hw.mac.san_addr, pf->hw.mac.port_addr); hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { dev_info(&pf->pdev->dev, @@ -820,8 +819,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp, /* By default we are in VEPA mode, if this is the first VF/VMDq * VSI to be added switch to VEB mode. */ - if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { - pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) { + set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); } @@ -1029,9 +1028,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp, "emp reset count: %d\n", pf->empr_count); dev_info(&pf->pdev->dev, "pf reset count: %d\n", pf->pfr_count); - dev_info(&pf->pdev->dev, - "pf tx sluggish count: %d\n", - pf->tx_sluggish_count); } else if (strncmp(&cmd_buf[5], "port", 4) == 0) { struct i40e_aqc_query_port_ets_config_resp *bw_data; struct i40e_dcbx_config *cfg = diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index fd7163128c4d..26778c448090 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -430,35 +430,35 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = { struct i40e_priv_flags { char flag_string[ETH_GSTRING_LEN]; - u64 flag; + u8 bitno; bool read_only; }; -#define I40E_PRIV_FLAG(_name, _flag, _read_only) { \ +#define I40E_PRIV_FLAG(_name, _bitno, _read_only) { \ .flag_string = _name, \ - .flag = _flag, \ + .bitno = _bitno, \ .read_only = _read_only, \ } static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = { /* NOTE: MFP setting cannot be changed */ - I40E_PRIV_FLAG("MFP", I40E_FLAG_MFP_ENABLED, 1), + I40E_PRIV_FLAG("MFP", I40E_FLAG_MFP_ENA, 1), I40E_PRIV_FLAG("total-port-shutdown", - I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED, 1), - I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENABLED, 0), - I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENABLED, 0), - I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0), - I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_ENABLED, 0), + I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, 1), + I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENA, 0), + I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENA, 0), + I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENA, 0), + I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_ENA, 0), I40E_PRIV_FLAG("link-down-on-close", - I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED, 0), - I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0), + I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, 0), + I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX_ENA, 0), I40E_PRIV_FLAG("disable-source-pruning", - I40E_FLAG_SOURCE_PRUNING_DISABLED, 0), - I40E_PRIV_FLAG("disable-fw-lldp", I40E_FLAG_DISABLE_FW_LLDP, 0), + I40E_FLAG_SOURCE_PRUNING_DIS, 0), + I40E_PRIV_FLAG("disable-fw-lldp", I40E_FLAG_FW_LLDP_DIS, 0), I40E_PRIV_FLAG("rs-fec", I40E_FLAG_RS_FEC, 0), I40E_PRIV_FLAG("base-r-fec", I40E_FLAG_BASE_R_FEC, 0), I40E_PRIV_FLAG("vf-vlan-pruning", - I40E_FLAG_VF_VLAN_PRUNING, 0), + I40E_FLAG_VF_VLAN_PRUNING_ENA, 0), }; #define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags) @@ -466,7 +466,7 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = { /* Private flags with a global effect, restricted to PF 0 */ static const struct i40e_priv_flags i40e_gl_gstrings_priv_flags[] = { I40E_PRIV_FLAG("vf-true-promisc-support", - I40E_FLAG_TRUE_PROMISC_SUPPORT, 0), + I40E_FLAG_TRUE_PROMISC_ENA, 0), }; #define I40E_GL_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gl_gstrings_priv_flags) @@ -502,7 +502,7 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 1000baseT_Full); - if (pf->hw_features & I40E_HW_100M_SGMII_CAPABLE) { + if (test_bit(I40E_HW_CAP_100M_SGMII, pf->hw.caps)) { ethtool_link_ksettings_add_link_mode(ks, supported, 100baseT_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, @@ -601,7 +601,7 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, 10000baseKX4_Full); } if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR && - !(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) { + !test_bit(I40E_HW_CAP_CRT_RETIMER, pf->hw.caps)) { ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseKR_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) @@ -609,7 +609,7 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, 10000baseKR_Full); } if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX && - !(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) { + !test_bit(I40E_HW_CAP_CRT_RETIMER, pf->hw.caps)) { ethtool_link_ksettings_add_link_mode(ks, supported, 1000baseKX_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) @@ -917,7 +917,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 1000baseT_Full); - if (pf->hw_features & I40E_HW_100M_SGMII_CAPABLE) { + if (test_bit(I40E_HW_CAP_100M_SGMII, pf->hw.caps)) { ethtool_link_ksettings_add_link_mode(ks, supported, 100baseT_Full); if (hw_link_info->requested_speeds & @@ -1488,12 +1488,8 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg) struct i40e_pf *pf = np->vsi->back; struct i40e_hw *hw = &pf->hw; int status = 0; - u32 flags = 0; int err = 0; - flags = READ_ONCE(pf->flags); - i40e_set_fec_in_flags(fec_cfg, &flags); - /* Get the current phy config */ memset(&abilities, 0, sizeof(abilities)); status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, @@ -1525,7 +1521,7 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg) err = -EAGAIN; goto done; } - pf->flags = flags; + i40e_set_fec_in_flags(fec_cfg, pf->flags); status = i40e_update_link_info(hw); if (status) /* debug level message only due to relation to the link @@ -1599,7 +1595,7 @@ static int i40e_set_fec_param(struct net_device *netdev, return -EPERM; if (hw->mac.type == I40E_MAC_X722 && - !(hw->flags & I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE)) { + !test_bit(I40E_HW_CAP_X722_FEC_REQUEST, hw->caps)) { netdev_err(netdev, "Setting FEC encoding not supported by firmware. Please update the NVM image.\n"); return -EOPNOTSUPP; } @@ -2015,6 +2011,18 @@ static void i40e_get_drvinfo(struct net_device *netdev, drvinfo->n_priv_flags += I40E_GL_PRIV_FLAGS_STR_LEN; } +static u32 i40e_get_max_num_descriptors(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + + switch (hw->mac.type) { + case I40E_MAC_XL710: + return I40E_MAX_NUM_DESCRIPTORS_XL710; + default: + return I40E_MAX_NUM_DESCRIPTORS; + } +} + static void i40e_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, @@ -2024,8 +2032,8 @@ static void i40e_get_ringparam(struct net_device *netdev, struct i40e_pf *pf = np->vsi->back; struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; - ring->rx_max_pending = I40E_MAX_NUM_DESCRIPTORS; - ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS; + ring->rx_max_pending = i40e_get_max_num_descriptors(pf); + ring->tx_max_pending = i40e_get_max_num_descriptors(pf); ring->rx_mini_max_pending = 0; ring->rx_jumbo_max_pending = 0; ring->rx_pending = vsi->rx_rings[0]->count; @@ -2050,12 +2058,12 @@ static int i40e_set_ringparam(struct net_device *netdev, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { + u32 new_rx_count, new_tx_count, max_num_descriptors; struct i40e_ring *tx_rings = NULL, *rx_rings = NULL; struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_hw *hw = &np->vsi->back->hw; struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - u32 new_rx_count, new_tx_count; u16 tx_alloc_queue_pairs; int timeout = 50; int i, err = 0; @@ -2063,14 +2071,15 @@ static int i40e_set_ringparam(struct net_device *netdev, if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; - if (ring->tx_pending > I40E_MAX_NUM_DESCRIPTORS || + max_num_descriptors = i40e_get_max_num_descriptors(pf); + if (ring->tx_pending > max_num_descriptors || ring->tx_pending < I40E_MIN_NUM_DESCRIPTORS || - ring->rx_pending > I40E_MAX_NUM_DESCRIPTORS || + ring->rx_pending > max_num_descriptors || ring->rx_pending < I40E_MIN_NUM_DESCRIPTORS) { netdev_info(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n", ring->tx_pending, ring->rx_pending, - I40E_MIN_NUM_DESCRIPTORS, I40E_MAX_NUM_DESCRIPTORS); + I40E_MIN_NUM_DESCRIPTORS, max_num_descriptors); return -EINVAL; } @@ -2419,7 +2428,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, veb_stats = ((pf->lan_veb != I40E_NO_VEB) && (pf->lan_veb < I40E_MAX_VEB) && - (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)); + test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags)); if (veb_stats) { veb = pf->veb[pf->lan_veb]; @@ -2514,13 +2523,11 @@ static void i40e_get_priv_flag_strings(struct net_device *netdev, u8 *data) u8 *p = data; for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) - ethtool_sprintf(&p, "%s", - i40e_gstrings_priv_flags[i].flag_string); + ethtool_puts(&p, i40e_gstrings_priv_flags[i].flag_string); if (pf->hw.pf_id != 0) return; for (i = 0; i < I40E_GL_PRIV_FLAGS_STR_LEN; i++) - ethtool_sprintf(&p, "%s", - i40e_gl_gstrings_priv_flags[i].flag_string); + ethtool_puts(&p, i40e_gl_gstrings_priv_flags[i].flag_string); } static void i40e_get_strings(struct net_device *netdev, u32 stringset, @@ -2548,7 +2555,7 @@ static int i40e_get_ts_info(struct net_device *dev, struct i40e_pf *pf = i40e_netdev_to_pf(dev); /* only report HW timestamping if PTP is enabled */ - if (!(pf->flags & I40E_FLAG_PTP)) + if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags)) return ethtool_op_get_ts_info(dev, info); info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | @@ -2570,7 +2577,7 @@ static int i40e_get_ts_info(struct net_device *dev, BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ); - if (pf->hw_features & I40E_HW_PTP_L4_CAPABLE) + if (test_bit(I40E_HW_CAP_PTP_L4, pf->hw.caps)) info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) | @@ -2819,10 +2826,10 @@ static int i40e_set_phys_id(struct net_device *netdev, switch (state) { case ETHTOOL_ID_ACTIVE: - if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) { + if (!test_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps)) { pf->led_status = i40e_led_get(hw); } else { - if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) + if (!test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_ALL, NULL); ret = i40e_led_get_phy(hw, &temp_status, @@ -2831,25 +2838,25 @@ static int i40e_set_phys_id(struct net_device *netdev, } return blink_freq; case ETHTOOL_ID_ON: - if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) + if (!test_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps)) i40e_led_set(hw, 0xf, false); else ret = i40e_led_set_phy(hw, true, pf->led_status, 0); break; case ETHTOOL_ID_OFF: - if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) + if (!test_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps)) i40e_led_set(hw, 0x0, false); else ret = i40e_led_set_phy(hw, false, pf->led_status, 0); break; case ETHTOOL_ID_INACTIVE: - if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) { + if (!test_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps)) { i40e_led_set(hw, pf->led_status, false); } else { ret = i40e_led_set_phy(hw, false, pf->led_status, (pf->phy_led_val | I40E_PHY_LED_MODE_ORIG)); - if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) + if (!test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) i40e_aq_set_phy_debug(hw, 0, NULL); } break; @@ -2886,7 +2893,6 @@ static int __i40e_get_coalesce(struct net_device *netdev, struct i40e_vsi *vsi = np->vsi; ec->tx_max_coalesced_frames_irq = vsi->work_limit; - ec->rx_max_coalesced_frames_irq = vsi->work_limit; /* rx and tx usecs has per queue value. If user doesn't specify the * queue, return queue 0's value to represent. @@ -3020,7 +3026,7 @@ static int __i40e_set_coalesce(struct net_device *netdev, struct i40e_pf *pf = vsi->back; int i; - if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) + if (ec->tx_max_coalesced_frames_irq) vsi->work_limit = ec->tx_max_coalesced_frames_irq; if (queue < 0) { @@ -3628,7 +3634,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) bitmap_zero(flow_pctypes, FLOW_PCTYPES_SIZE); - if (pf->flags & I40E_FLAG_MFP_ENABLED) { + if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) { dev_err(&pf->pdev->dev, "Change of RSS hash input set is not supported when MFP mode is enabled\n"); return -EOPNOTSUPP; @@ -3644,19 +3650,22 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) switch (nfc->flow_type) { case TCP_V4_FLOW: set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP, flow_pctypes); - if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) + if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, + pf->hw.caps)) set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK, flow_pctypes); break; case TCP_V6_FLOW: set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP, flow_pctypes); - if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) + if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, + pf->hw.caps)) set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK, flow_pctypes); break; case UDP_V4_FLOW: set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_UDP, flow_pctypes); - if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) { + if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, + pf->hw.caps)) { set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP, flow_pctypes); set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP, @@ -3666,7 +3675,8 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) break; case UDP_V6_FLOW: set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_UDP, flow_pctypes); - if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) { + if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, + pf->hw.caps)) { set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP, flow_pctypes); set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP, @@ -4644,7 +4654,7 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi, * main port cannot change them when in MFP mode as this would impact * any filters on the other ports. */ - if (pf->flags & I40E_FLAG_MFP_ENABLED) { + if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) { netif_err(pf, drv, vsi->netdev, "Cannot change Flow Director input sets while MFP is enabled\n"); return -EOPNOTSUPP; } @@ -4804,7 +4814,7 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, return -EINVAL; pf = vsi->back; - if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) + if (!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) return -EOPNOTSUPP; if (test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) @@ -5001,7 +5011,7 @@ static void i40e_get_channels(struct net_device *dev, ch->max_combined = i40e_max_channels(vsi); /* report info for other vector */ - ch->other_count = (pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0; + ch->other_count = test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) ? 1 : 0; ch->max_other = ch->other_count; /* Note: This code assumes DCB is disabled for now. */ @@ -5044,7 +5054,7 @@ static int i40e_set_channels(struct net_device *dev, return -EINVAL; /* verify other_count has not changed */ - if (ch->other_count != ((pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0)) + if (ch->other_count != (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) ? 1 : 0)) return -EINVAL; /* verify the number of channels does not exceed hardware limits */ @@ -5109,15 +5119,13 @@ static u32 i40e_get_rxfh_indir_size(struct net_device *netdev) /** * i40e_get_rxfh - get the rx flow hash indirection table * @netdev: network interface device structure - * @indir: indirection table - * @key: hash key - * @hfunc: hash function + * @rxfh: pointer to param struct (indir, key, hfunc) * * Reads the indirection table directly from the hardware. Returns 0 on * success. **/ -static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, - u8 *hfunc) +static int i40e_get_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; @@ -5125,13 +5133,12 @@ static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, int ret; u16 i; - if (hfunc) - *hfunc = ETH_RSS_HASH_TOP; + rxfh->hfunc = ETH_RSS_HASH_TOP; - if (!indir) + if (!rxfh->indir) return 0; - seed = key; + seed = rxfh->key; lut = kzalloc(I40E_HLUT_ARRAY_SIZE, GFP_KERNEL); if (!lut) return -ENOMEM; @@ -5139,7 +5146,7 @@ static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, if (ret) goto out; for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++) - indir[i] = (u32)(lut[i]); + rxfh->indir[i] = (u32)(lut[i]); out: kfree(lut); @@ -5150,15 +5157,15 @@ out: /** * i40e_set_rxfh - set the rx flow hash indirection table * @netdev: network interface device structure - * @indir: indirection table - * @key: hash key - * @hfunc: hash function to use + * @rxfh: pointer to param struct (indir, key, hfunc) + * @extack: extended ACK from the Netlink message * * Returns -EINVAL if the table specifies an invalid queue id, otherwise * returns 0 after programming the table. **/ -static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir, - const u8 *key, const u8 hfunc) +static int i40e_set_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; @@ -5166,17 +5173,18 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir, u8 *seed = NULL; u16 i; - if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && + rxfh->hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; - if (key) { + if (rxfh->key) { if (!vsi->rss_hkey_user) { vsi->rss_hkey_user = kzalloc(I40E_HKEY_ARRAY_SIZE, GFP_KERNEL); if (!vsi->rss_hkey_user) return -ENOMEM; } - memcpy(vsi->rss_hkey_user, key, I40E_HKEY_ARRAY_SIZE); + memcpy(vsi->rss_hkey_user, rxfh->key, I40E_HKEY_ARRAY_SIZE); seed = vsi->rss_hkey_user; } if (!vsi->rss_lut_user) { @@ -5186,9 +5194,9 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir, } /* Each 32 bits pointed by 'indir' is stored with a lut entry */ - if (indir) + if (rxfh->indir) for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++) - vsi->rss_lut_user[i] = (u8)(indir[i]); + vsi->rss_lut_user[i] = (u8)(rxfh->indir[i]); else i40e_fill_rss_lut(pf, vsi->rss_lut_user, I40E_HLUT_ARRAY_SIZE, vsi->rss_size); @@ -5215,11 +5223,11 @@ static u32 i40e_get_priv_flags(struct net_device *dev) u32 i, j, ret_flags = 0; for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) { - const struct i40e_priv_flags *priv_flags; + const struct i40e_priv_flags *priv_flag; - priv_flags = &i40e_gstrings_priv_flags[i]; + priv_flag = &i40e_gstrings_priv_flags[i]; - if (priv_flags->flag & pf->flags) + if (test_bit(priv_flag->bitno, pf->flags)) ret_flags |= BIT(i); } @@ -5227,11 +5235,11 @@ static u32 i40e_get_priv_flags(struct net_device *dev) return ret_flags; for (j = 0; j < I40E_GL_PRIV_FLAGS_STR_LEN; j++) { - const struct i40e_priv_flags *priv_flags; + const struct i40e_priv_flags *priv_flag; - priv_flags = &i40e_gl_gstrings_priv_flags[j]; + priv_flag = &i40e_gl_gstrings_priv_flags[j]; - if (priv_flags->flag & pf->flags) + if (test_bit(priv_flag->bitno, pf->flags)) ret_flags |= BIT(i + j); } @@ -5245,8 +5253,10 @@ static u32 i40e_get_priv_flags(struct net_device *dev) **/ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) { + DECLARE_BITMAP(changed_flags, I40E_PF_FLAGS_NBITS); + DECLARE_BITMAP(orig_flags, I40E_PF_FLAGS_NBITS); + DECLARE_BITMAP(new_flags, I40E_PF_FLAGS_NBITS); struct i40e_netdev_priv *np = netdev_priv(dev); - u64 orig_flags, new_flags, changed_flags; enum i40e_admin_queue_err adq_err; struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; @@ -5254,51 +5264,57 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) int status; u32 i, j; - orig_flags = READ_ONCE(pf->flags); - new_flags = orig_flags; + bitmap_copy(orig_flags, pf->flags, I40E_PF_FLAGS_NBITS); + bitmap_copy(new_flags, pf->flags, I40E_PF_FLAGS_NBITS); for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) { - const struct i40e_priv_flags *priv_flags; - - priv_flags = &i40e_gstrings_priv_flags[i]; + const struct i40e_priv_flags *priv_flag; + bool new_val; - if (flags & BIT(i)) - new_flags |= priv_flags->flag; - else - new_flags &= ~(priv_flags->flag); + priv_flag = &i40e_gstrings_priv_flags[i]; + new_val = (flags & BIT(i)) ? true : false; /* If this is a read-only flag, it can't be changed */ - if (priv_flags->read_only && - ((orig_flags ^ new_flags) & ~BIT(i))) + if (priv_flag->read_only && + test_bit(priv_flag->bitno, orig_flags) != new_val) return -EOPNOTSUPP; + + if (new_val) + set_bit(priv_flag->bitno, new_flags); + else + clear_bit(priv_flag->bitno, new_flags); } if (pf->hw.pf_id != 0) goto flags_complete; for (j = 0; j < I40E_GL_PRIV_FLAGS_STR_LEN; j++) { - const struct i40e_priv_flags *priv_flags; + const struct i40e_priv_flags *priv_flag; + bool new_val; - priv_flags = &i40e_gl_gstrings_priv_flags[j]; - - if (flags & BIT(i + j)) - new_flags |= priv_flags->flag; - else - new_flags &= ~(priv_flags->flag); + priv_flag = &i40e_gl_gstrings_priv_flags[j]; + new_val = (flags & BIT(i + j)) ? true : false; /* If this is a read-only flag, it can't be changed */ - if (priv_flags->read_only && - ((orig_flags ^ new_flags) & ~BIT(i))) + if (priv_flag->read_only && + test_bit(priv_flag->bitno, orig_flags) != new_val) return -EOPNOTSUPP; + + if (new_val) + set_bit(priv_flag->bitno, new_flags); + else + clear_bit(priv_flag->bitno, new_flags); } flags_complete: - changed_flags = orig_flags ^ new_flags; + bitmap_xor(changed_flags, pf->flags, orig_flags, I40E_PF_FLAGS_NBITS); - if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) + if (test_bit(I40E_FLAG_FW_LLDP_DIS, changed_flags)) reset_needed = I40E_PF_RESET_AND_REBUILD_FLAG; - if (changed_flags & (I40E_FLAG_VEB_STATS_ENABLED | - I40E_FLAG_LEGACY_RX | I40E_FLAG_SOURCE_PRUNING_DISABLED)) + + if (test_bit(I40E_FLAG_VEB_STATS_ENA, changed_flags) || + test_bit(I40E_FLAG_LEGACY_RX_ENA, changed_flags) || + test_bit(I40E_FLAG_SOURCE_PRUNING_DIS, changed_flags)) reset_needed = BIT(__I40E_PF_RESET_REQUESTED); /* Before we finalize any flag changes, we need to perform some @@ -5306,8 +5322,8 @@ flags_complete: */ /* ATR eviction is not supported on all devices */ - if ((new_flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) && - !(pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)) + if (test_bit(I40E_FLAG_HW_ATR_EVICT_ENA, new_flags) && + !test_bit(I40E_HW_CAP_ATR_EVICT, pf->hw.caps)) return -EOPNOTSUPP; /* If the driver detected FW LLDP was disabled on init, this flag could @@ -5318,15 +5334,14 @@ flags_complete: * disable LLDP, however we _must_ not allow the user to enable/disable * LLDP with this flag on unsupported FW versions. */ - if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) { - if (!(pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) { - dev_warn(&pf->pdev->dev, - "Device does not support changing FW LLDP\n"); - return -EOPNOTSUPP; - } + if (test_bit(I40E_FLAG_FW_LLDP_DIS, changed_flags) && + !test_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, pf->hw.caps)) { + dev_warn(&pf->pdev->dev, + "Device does not support changing FW LLDP\n"); + return -EOPNOTSUPP; } - if (changed_flags & I40E_FLAG_RS_FEC && + if (test_bit(I40E_FLAG_RS_FEC, changed_flags) && pf->hw.device_id != I40E_DEV_ID_25G_SFP28 && pf->hw.device_id != I40E_DEV_ID_25G_B) { dev_warn(&pf->pdev->dev, @@ -5334,7 +5349,7 @@ flags_complete: return -EOPNOTSUPP; } - if (changed_flags & I40E_FLAG_BASE_R_FEC && + if (test_bit(I40E_FLAG_BASE_R_FEC, changed_flags) && pf->hw.device_id != I40E_DEV_ID_25G_SFP28 && pf->hw.device_id != I40E_DEV_ID_25G_B && pf->hw.device_id != I40E_DEV_ID_KX_X722) { @@ -5349,17 +5364,17 @@ flags_complete: */ /* Flush current ATR settings if ATR was disabled */ - if ((changed_flags & I40E_FLAG_FD_ATR_ENABLED) && - !(new_flags & I40E_FLAG_FD_ATR_ENABLED)) { + if (test_bit(I40E_FLAG_FD_ATR_ENA, changed_flags) && + !test_bit(I40E_FLAG_FD_ATR_ENA, new_flags)) { set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); } - if (changed_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT) { + if (test_bit(I40E_FLAG_TRUE_PROMISC_ENA, changed_flags)) { u16 sw_flags = 0, valid_flags = 0; int ret; - if (!(new_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) + if (!test_bit(I40E_FLAG_TRUE_PROMISC_ENA, new_flags)) sw_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; ret = i40e_aq_set_switch_config(&pf->hw, sw_flags, valid_flags, @@ -5374,17 +5389,17 @@ flags_complete: } } - if ((changed_flags & I40E_FLAG_RS_FEC) || - (changed_flags & I40E_FLAG_BASE_R_FEC)) { + if (test_bit(I40E_FLAG_RS_FEC, changed_flags) || + test_bit(I40E_FLAG_BASE_R_FEC, changed_flags)) { u8 fec_cfg = 0; - if (new_flags & I40E_FLAG_RS_FEC && - new_flags & I40E_FLAG_BASE_R_FEC) { + if (test_bit(I40E_FLAG_RS_FEC, new_flags) && + test_bit(I40E_FLAG_BASE_R_FEC, new_flags)) { fec_cfg = I40E_AQ_SET_FEC_AUTO; - } else if (new_flags & I40E_FLAG_RS_FEC) { + } else if (test_bit(I40E_FLAG_RS_FEC, new_flags)) { fec_cfg = (I40E_AQ_SET_FEC_REQUEST_RS | I40E_AQ_SET_FEC_ABILITY_RS); - } else if (new_flags & I40E_FLAG_BASE_R_FEC) { + } else if (test_bit(I40E_FLAG_BASE_R_FEC, new_flags)) { fec_cfg = (I40E_AQ_SET_FEC_REQUEST_KR | I40E_AQ_SET_FEC_ABILITY_KR); } @@ -5392,35 +5407,35 @@ flags_complete: dev_warn(&pf->pdev->dev, "Cannot change FEC config\n"); } - if ((changed_flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) && - (orig_flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)) { + if (test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, changed_flags) && + test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, orig_flags)) { dev_err(&pf->pdev->dev, "Setting link-down-on-close not supported on this port (because total-port-shutdown is enabled)\n"); return -EOPNOTSUPP; } - if ((changed_flags & I40E_FLAG_VF_VLAN_PRUNING) && + if (test_bit(I40E_FLAG_VF_VLAN_PRUNING_ENA, changed_flags) && pf->num_alloc_vfs) { dev_warn(&pf->pdev->dev, "Changing vf-vlan-pruning flag while VF(s) are active is not supported\n"); return -EOPNOTSUPP; } - if ((changed_flags & I40E_FLAG_LEGACY_RX) && + if (test_bit(I40E_FLAG_LEGACY_RX_ENA, changed_flags) && I40E_2K_TOO_SMALL_WITH_PADDING) { dev_warn(&pf->pdev->dev, "2k Rx buffer is too small to fit standard MTU and skb_shared_info\n"); return -EOPNOTSUPP; } - if ((changed_flags & new_flags & - I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) && - (new_flags & I40E_FLAG_MFP_ENABLED)) + if (test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, changed_flags) && + test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, new_flags) && + test_bit(I40E_FLAG_MFP_ENA, new_flags)) dev_warn(&pf->pdev->dev, "Turning on link-down-on-close flag may affect other partitions\n"); - if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) { - if (new_flags & I40E_FLAG_DISABLE_FW_LLDP) { + if (test_bit(I40E_FLAG_FW_LLDP_DIS, changed_flags)) { + if (test_bit(I40E_FLAG_FW_LLDP_DIS, new_flags)) { #ifdef CONFIG_I40E_DCB i40e_dcb_sw_default_config(pf); #endif /* CONFIG_I40E_DCB */ @@ -5461,7 +5476,7 @@ flags_complete: * initialization or (b) while holding the RTNL lock, we don't need * anything fancy here. */ - pf->flags = new_flags; + bitmap_copy(pf->flags, new_flags, I40E_PF_FLAGS_NBITS); /* Issue reset to cause things to take effect, as additional bits * are added we will need to create a mask of bits requiring reset @@ -5491,7 +5506,7 @@ static int i40e_get_module_info(struct net_device *netdev, int status; /* Check if firmware supports reading module EEPROM. */ - if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) { + if (!test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) { netdev_err(vsi->netdev, "Module EEPROM memory read not supported. Please update the NVM image.\n"); return -EINVAL; } @@ -5571,8 +5586,8 @@ static int i40e_get_module_info(struct net_device *netdev, modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN; break; default: - netdev_err(vsi->netdev, "Module type unrecognized\n"); - return -EINVAL; + netdev_dbg(vsi->netdev, "SFP module type unrecognized or no SFP connector used.\n"); + return -EOPNOTSUPP; } return 0; } @@ -5768,7 +5783,7 @@ static const struct ethtool_ops i40e_ethtool_recovery_mode_ops = { static const struct ethtool_ops i40e_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | - ETHTOOL_COALESCE_MAX_FRAMES_IRQ | + ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ | ETHTOOL_COALESCE_USE_ADAPTIVE | ETHTOOL_COALESCE_RX_USECS_HIGH | ETHTOOL_COALESCE_TX_USECS_HIGH, diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 1ab8dbe2d880..dc642efe1cfa 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -371,7 +371,7 @@ static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue) if (tx_ring) { head = i40e_get_head(tx_ring); /* Read interrupt register */ - if (pf->flags & I40E_FLAG_MSIX_ENABLED) + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) val = rd32(&pf->hw, I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx + tx_ring->vsi->base_vector - 1)); @@ -1209,13 +1209,13 @@ static void i40e_update_pf_stats(struct i40e_pf *pf) pf->stat_offsets_loaded, &osd->rx_lpi_count, &nsd->rx_lpi_count); - if (pf->flags & I40E_FLAG_FD_SB_ENABLED && + if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) && !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) nsd->fd_sb_status = true; else nsd->fd_sb_status = false; - if (pf->flags & I40E_FLAG_FD_ATR_ENABLED && + if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) && !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) nsd->fd_atr_status = true; else @@ -1485,7 +1485,7 @@ static s16 i40e_get_vf_new_vlan(struct i40e_vsi *vsi, return pvid; is_any = (trusted || - !(pf->flags & I40E_FLAG_VF_VLAN_PRUNING)); + !test_bit(I40E_FLAG_VF_VLAN_PRUNING_ENA, pf->flags)); if ((vlan_filters && f->vlan == I40E_VLAN_ANY) || (!is_any && !vlan_filters && f->vlan == I40E_VLAN_ANY) || @@ -1890,7 +1890,7 @@ static int i40e_vsi_config_rss(struct i40e_vsi *vsi) u8 *lut; int ret; - if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)) + if (!test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps)) return 0; if (!vsi->rss_size) vsi->rss_size = min_t(int, pf->alloc_rss_size, @@ -2045,7 +2045,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, */ if (vsi->req_queue_pairs > 0) vsi->num_queue_pairs = vsi->req_queue_pairs; - else if (pf->flags & I40E_FLAG_MSIX_ENABLED) + else if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) vsi->num_queue_pairs = pf->num_lan_msix; else vsi->num_queue_pairs = 1; @@ -2058,7 +2058,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, else num_tc_qps = vsi->alloc_queue_pairs; - if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { + if (enabled_tc && test_bit(I40E_FLAG_DCB_ENA, vsi->back->flags)) { /* Find numtc from enabled TC bitmap */ for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (enabled_tc & BIT(i)) /* TC is enabled */ @@ -2077,7 +2077,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; /* Do not allow use more TC queue pairs than MSI-X vectors exist */ - if (pf->flags & I40E_FLAG_MSIX_ENABLED) + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix); /* Setup queue offset/count for all TCs for given VSI */ @@ -2089,8 +2089,10 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, switch (vsi->type) { case I40E_VSI_MAIN: - if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | - I40E_FLAG_FD_ATR_ENABLED)) || + if ((!test_bit(I40E_FLAG_FD_SB_ENA, + pf->flags) && + !test_bit(I40E_FLAG_FD_ATR_ENA, + pf->flags)) || vsi->tc_config.enabled_tc != 1) { qcount = min_t(int, pf->alloc_rss_size, num_tc_qps); @@ -2476,7 +2478,7 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc) if (vsi->type == I40E_VSI_MAIN && pf->lan_veb != I40E_NO_VEB && - !(pf->flags & I40E_FLAG_MFP_ENABLED)) { + !test_bit(I40E_FLAG_MFP_ENA, pf->flags)) { /* set defport ON for Main VSI instead of true promisc * this way we will get all unicast/multicast and VLAN * promisc behavior but will not get VF or VMDq traffic @@ -2907,7 +2909,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf) */ static u16 i40e_calculate_vsi_rx_buf_len(struct i40e_vsi *vsi) { - if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) + if (!vsi->netdev || test_bit(I40E_FLAG_LEGACY_RX_ENA, vsi->back->flags)) return SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048); return PAGE_SIZE < 8192 ? I40E_RXBUFFER_3072 : I40E_RXBUFFER_2048; @@ -3462,8 +3464,8 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) ring->xsk_pool = i40e_xsk_pool(ring); /* some ATR related tx ring init */ - if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) { - ring->atr_sample_rate = vsi->back->atr_sample_rate; + if (test_bit(I40E_FLAG_FD_ATR_ENA, vsi->back->flags)) { + ring->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; ring->atr_count = 0; } else { ring->atr_sample_rate = 0; @@ -3478,9 +3480,11 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) tx_ctx.new_context = 1; tx_ctx.base = (ring->dma / 128); tx_ctx.qlen = ring->count; - tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED | - I40E_FLAG_FD_ATR_ENABLED)); - tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP); + if (test_bit(I40E_FLAG_FD_SB_ENA, vsi->back->flags) || + test_bit(I40E_FLAG_FD_ATR_ENA, vsi->back->flags)) + tx_ctx.fd_ena = 1; + if (test_bit(I40E_FLAG_PTP_ENA, vsi->back->flags)) + tx_ctx.timesync_ena = 1; /* FDIR VSI tx ring can still use RS bit and writebacks */ if (vsi->type != I40E_VSI_FDIR) tx_ctx.head_wb_ena = 1; @@ -3663,7 +3667,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) } /* configure Rx buffer alignment */ - if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) { + if (!vsi->netdev || test_bit(I40E_FLAG_LEGACY_RX_ENA, vsi->back->flags)) { if (I40E_2K_TOO_SMALL_WITH_PADDING) { dev_info(&vsi->back->pdev->dev, "2k Rx buffer is too small to fit standard MTU and skb_shared_info\n"); @@ -3761,7 +3765,7 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) u16 qoffset, qcount; int i, n; - if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { + if (!test_bit(I40E_FLAG_DCB_ENA, vsi->back->flags)) { /* Reset the TC information */ for (i = 0; i < vsi->num_queue_pairs; i++) { rx_ring = vsi->rx_rings[i]; @@ -3828,7 +3832,7 @@ static void i40e_fdir_filter_restore(struct i40e_vsi *vsi) struct i40e_pf *pf = vsi->back; struct hlist_node *node; - if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) + if (!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) return; /* Reset FDir counters as we're replaying all existing filters */ @@ -3966,10 +3970,10 @@ static void i40e_enable_misc_int_causes(struct i40e_pf *pf) I40E_PFINT_ICR0_ENA_VFLR_MASK | I40E_PFINT_ICR0_ENA_ADMINQ_MASK; - if (pf->flags & I40E_FLAG_IWARP_ENABLED) + if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; - if (pf->flags & I40E_FLAG_PTP) + if (test_bit(I40E_FLAG_PTP_ENA, pf->flags)) val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, val); @@ -4205,7 +4209,7 @@ static void i40e_vsi_disable_irq(struct i40e_vsi *vsi) } /* disable each interrupt */ - if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { for (i = vsi->base_vector; i < (vsi->num_q_vectors + vsi->base_vector); i++) wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0); @@ -4231,7 +4235,7 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi) struct i40e_pf *pf = vsi->back; int i; - if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { for (i = 0; i < vsi->num_q_vectors; i++) i40e_irq_dynamic_enable(vsi, i); } else { @@ -4252,7 +4256,7 @@ static void i40e_free_misc_vector(struct i40e_pf *pf) wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0); i40e_flush(&pf->hw); - if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) { + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) { free_irq(pf->msix_entries[0].vector, pf); clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state); } @@ -4287,7 +4291,7 @@ static irqreturn_t i40e_intr(int irq, void *data) (icr0 & I40E_PFINT_ICR0_SWINT_MASK)) pf->sw_int_count++; - if ((pf->flags & I40E_FLAG_IWARP_ENABLED) && + if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags) && (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) { ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n"); @@ -4480,7 +4484,7 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget) i += tx_ring->count; tx_ring->next_to_clean = i; - if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) + if (test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx); return budget > 0; @@ -4593,9 +4597,9 @@ static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename) struct i40e_pf *pf = vsi->back; int err; - if (pf->flags & I40E_FLAG_MSIX_ENABLED) + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) err = i40e_vsi_request_irq_msix(vsi, basename); - else if (pf->flags & I40E_FLAG_MSI_ENABLED) + else if (test_bit(I40E_FLAG_MSI_ENA, pf->flags)) err = request_irq(pf->pdev->irq, i40e_intr, 0, pf->int_name, pf); else @@ -4627,7 +4631,7 @@ static void i40e_netpoll(struct net_device *netdev) if (test_bit(__I40E_VSI_DOWN, vsi->state)) return; - if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { for (i = 0; i < vsi->num_q_vectors; i++) i40e_msix_clean_rings(0, vsi->q_vectors[i]); } else { @@ -4967,7 +4971,7 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi) u32 val, qp; int i; - if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { if (!vsi->q_vectors) return; @@ -5129,16 +5133,17 @@ static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi) static void i40e_reset_interrupt_capability(struct i40e_pf *pf) { /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */ - if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { pci_disable_msix(pf->pdev); kfree(pf->msix_entries); pf->msix_entries = NULL; kfree(pf->irq_pile); pf->irq_pile = NULL; - } else if (pf->flags & I40E_FLAG_MSI_ENABLED) { + } else if (test_bit(I40E_FLAG_MSI_ENA, pf->flags)) { pci_disable_msi(pf->pdev); } - pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); + clear_bit(I40E_FLAG_MSI_ENA, pf->flags); + clear_bit(I40E_FLAG_MSIX_ENA, pf->flags); } /** @@ -5478,11 +5483,11 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc; /* If neither MQPRIO nor DCB is enabled, then always use single TC */ - if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) + if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags)) return 1; /* SFP mode will be enabled for all TCs on port */ - if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) + if (!test_bit(I40E_FLAG_MFP_ENA, pf->flags)) return i40e_dcb_get_num_tc(dcbcfg); /* MFP mode return count of enabled TCs for this PF */ @@ -5512,11 +5517,11 @@ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) /* If neither MQPRIO nor DCB is enabled for this PF then just return * default TC */ - if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) + if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags)) return I40E_DEFAULT_TRAFFIC_CLASS; /* SFP mode we want PF to be enabled for all TCs */ - if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) + if (!test_bit(I40E_FLAG_MFP_ENA, pf->flags)) return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config); /* MFP enabled and iSCSI PF type */ @@ -5605,7 +5610,7 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, /* There is no need to reset BW when mqprio mode is on. */ if (i40e_is_tc_mqprio_enabled(pf)) return 0; - if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) { + if (!vsi->mqprio_qopt.qopt.hw && !test_bit(I40E_FLAG_DCB_ENA, pf->flags)) { ret = i40e_set_bw_limit(vsi, vsi->seid, 0); if (ret) dev_info(&pf->pdev->dev, @@ -5858,7 +5863,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) } vsi->reconfig_rss = false; } - if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) { + if (test_bit(I40E_FLAG_IWARP_ENA, vsi->back->flags)) { ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA; @@ -6271,7 +6276,7 @@ static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid, if (ch->type == I40E_VSI_VMDQ2) ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; - if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) { + if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) { ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); ctxt.info.switch_id = @@ -6576,8 +6581,8 @@ int i40e_create_queue_channel(struct i40e_vsi *vsi, * VSI to be added switch to VEB mode. */ - if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { - pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) { + set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); if (vsi->type == I40E_VSI_MAIN) { if (i40e_is_tc_mqprio_enabled(pf)) @@ -6988,9 +6993,9 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg) if (need_reconfig) { /* Enable DCB tagging only when more than one TC */ if (new_numtc > 1) - pf->flags |= I40E_FLAG_DCB_ENABLED; + set_bit(I40E_FLAG_DCB_ENA, pf->flags); else - pf->flags &= ~I40E_FLAG_DCB_ENABLED; + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); set_bit(__I40E_PORT_SUSPENDED, pf->state); /* Reconfiguration needed quiesce all VSIs */ @@ -7080,7 +7085,7 @@ out: set_bit(__I40E_CLIENT_L2_CHANGE, pf->state); } /* registers are set, lets apply */ - if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB) + if (test_bit(I40E_HW_CAP_USE_SET_LLDP_MIB, pf->hw.caps)) ret = i40e_hw_set_dcb_config(pf, new_cfg); } @@ -7101,7 +7106,7 @@ int i40e_dcb_sw_default_config(struct i40e_pf *pf) struct i40e_hw *hw = &pf->hw; int err; - if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB) { + if (test_bit(I40E_HW_CAP_USE_SET_LLDP_MIB, pf->hw.caps)) { /* Update the local cached instance with TC0 ETS */ memset(&pf->tmp_cfg, 0, sizeof(struct i40e_dcbx_config)); pf->tmp_cfg.etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING; @@ -7162,12 +7167,12 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) /* Do not enable DCB for SW1 and SW2 images even if the FW is capable * Also do not enable DCBx if FW LLDP agent is disabled */ - if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT) { + if (test_bit(I40E_HW_CAP_NO_DCB_SUPPORT, pf->hw.caps)) { dev_info(&pf->pdev->dev, "DCB is not supported.\n"); err = -EOPNOTSUPP; goto out; } - if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) { + if (test_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags)) { dev_info(&pf->pdev->dev, "FW LLDP is disabled, attempting SW DCB\n"); err = i40e_dcb_sw_default_config(pf); if (err) { @@ -7178,8 +7183,8 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) pf->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; /* at init capable but disabled */ - pf->flags |= I40E_FLAG_DCB_CAPABLE; - pf->flags &= ~I40E_FLAG_DCB_ENABLED; + set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); goto out; } err = i40e_init_dcb(hw, true); @@ -7194,20 +7199,20 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_IEEE; - pf->flags |= I40E_FLAG_DCB_CAPABLE; + set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); /* Enable DCB tagging only when more than one TC * or explicitly disable if only one TC */ if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) - pf->flags |= I40E_FLAG_DCB_ENABLED; + set_bit(I40E_FLAG_DCB_ENA, pf->flags); else - pf->flags &= ~I40E_FLAG_DCB_ENABLED; + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); dev_dbg(&pf->pdev->dev, "DCBX offload is supported for this PF.\n"); } } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) { dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n"); - pf->flags |= I40E_FLAG_DISABLE_FW_LLDP; + set_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags); } else { dev_info(&pf->pdev->dev, "Query for DCB configuration failed, err %pe aq_err %s\n", @@ -7367,7 +7372,7 @@ static int i40e_up_complete(struct i40e_vsi *vsi) struct i40e_pf *pf = vsi->back; int err; - if (pf->flags & I40E_FLAG_MSIX_ENABLED) + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) i40e_vsi_configure_msix(vsi); else i40e_configure_msi_and_legacy(vsi); @@ -7471,7 +7476,7 @@ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up) * and its speed values are OK, no need for a flap * if non_zero_phy_type was set, still need to force up */ - if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) + if (test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags)) non_zero_phy_type = true; else if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0) return 0; @@ -7487,7 +7492,7 @@ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up) non_zero_phy_type ? (u8)((mask >> 32) & 0xff) : 0; /* Copy the old settings, except of phy_type */ config.abilities = abilities.abilities; - if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) { + if (test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags)) { if (is_up) config.abilities |= I40E_AQ_PHY_ENABLE_LINK; else @@ -7537,8 +7542,8 @@ int i40e_up(struct i40e_vsi *vsi) int err; if (vsi->type == I40E_VSI_MAIN && - (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED || - vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)) + (test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags) || + test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, vsi->back->flags))) i40e_force_link_state(vsi->back, true); err = i40e_vsi_configure(vsi); @@ -7566,8 +7571,8 @@ void i40e_down(struct i40e_vsi *vsi) i40e_vsi_disable_irq(vsi); i40e_vsi_stop_rings(vsi); if (vsi->type == I40E_VSI_MAIN && - (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED || - vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)) + (test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags) || + test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, vsi->back->flags))) i40e_force_link_state(vsi->back, false); i40e_napi_disable_all(vsi); @@ -7973,7 +7978,7 @@ static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev) struct i40e_fwd_adapter *fwd; int avail_macvlan, ret; - if ((pf->flags & I40E_FLAG_DCB_ENABLED)) { + if (test_bit(I40E_FLAG_DCB_ENA, pf->flags)) { netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n"); return ERR_PTR(-EINVAL); } @@ -8168,23 +8173,23 @@ static int i40e_setup_tc(struct net_device *netdev, void *type_data) hw = mqprio_qopt->qopt.hw; mode = mqprio_qopt->mode; if (!hw) { - pf->flags &= ~I40E_FLAG_TC_MQPRIO; + clear_bit(I40E_FLAG_TC_MQPRIO_ENA, pf->flags); memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); goto config_tc; } /* Check if MFP enabled */ - if (pf->flags & I40E_FLAG_MFP_ENABLED) { + if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) { netdev_info(netdev, "Configuring TC not supported in MFP mode\n"); return ret; } switch (mode) { case TC_MQPRIO_MODE_DCB: - pf->flags &= ~I40E_FLAG_TC_MQPRIO; + clear_bit(I40E_FLAG_TC_MQPRIO_ENA, pf->flags); /* Check if DCB enabled to continue */ - if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) { + if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags)) { netdev_info(netdev, "DCB is not enabled for adapter\n"); return ret; @@ -8198,20 +8203,20 @@ static int i40e_setup_tc(struct net_device *netdev, void *type_data) } break; case TC_MQPRIO_MODE_CHANNEL: - if (pf->flags & I40E_FLAG_DCB_ENABLED) { + if (test_bit(I40E_FLAG_DCB_ENA, pf->flags)) { netdev_info(netdev, "Full offload of TC Mqprio options is not supported when DCB is enabled\n"); return ret; } - if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) + if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) return ret; ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt); if (ret) return ret; memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); - pf->flags |= I40E_FLAG_TC_MQPRIO; - pf->flags &= ~I40E_FLAG_DCB_ENABLED; + set_bit(I40E_FLAG_TC_MQPRIO_ENA, pf->flags); + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); break; default: return -EINVAL; @@ -8795,11 +8800,11 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi, return -EINVAL; } - if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) { + if (test_bit(I40E_FLAG_FD_SB_ENA, vsi->back->flags)) { dev_err(&vsi->back->pdev->dev, "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n"); - vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED; - vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER; + clear_bit(I40E_FLAG_FD_SB_ENA, vsi->back->flags); + clear_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, vsi->back->flags); } filter = kzalloc(sizeof(*filter), GFP_KERNEL); @@ -8895,11 +8900,11 @@ static int i40e_delete_clsflower(struct i40e_vsi *vsi, pf->num_cloud_filters--; if (!pf->num_cloud_filters) - if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) && - !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) { - pf->flags |= I40E_FLAG_FD_SB_ENABLED; - pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER; - pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE; + if (test_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags) && + !test_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags)) { + set_bit(I40E_FLAG_FD_SB_ENA, pf->flags); + clear_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags); + clear_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); } return 0; } @@ -9200,11 +9205,11 @@ static void i40e_cloud_filter_exit(struct i40e_pf *pf) } pf->num_cloud_filters = 0; - if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) && - !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) { - pf->flags |= I40E_FLAG_FD_SB_ENABLED; - pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER; - pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE; + if (test_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags) && + !test_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags)) { + set_bit(I40E_FLAG_FD_SB_ENA, pf->flags); + clear_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags); + clear_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); } } @@ -9292,7 +9297,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired) i40e_prep_for_reset(pf); i40e_reset_and_rebuild(pf, true, lock_acquired); dev_info(&pf->pdev->dev, - pf->flags & I40E_FLAG_DISABLE_FW_LLDP ? + test_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags) ? "FW LLDP is disabled\n" : "FW LLDP is enabled\n"); @@ -9407,12 +9412,12 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, if (I40E_IS_X710TL_DEVICE(hw->device_id) && (hw->phy.link_info.link_speed & ~(I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB)) && - !(pf->flags & I40E_FLAG_DCB_CAPABLE)) + !test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags)) /* let firmware decide if the DCB should be disabled */ - pf->flags |= I40E_FLAG_DCB_CAPABLE; + set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); /* Not DCB capable or capability disabled */ - if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) + if (!test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags)) return ret; /* Ignore if event is not for Nearest Bridge */ @@ -9448,7 +9453,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) { dev_warn(&pf->pdev->dev, "DCB is not supported for X710-T*L 2.5/5G speeds\n"); - pf->flags &= ~I40E_FLAG_DCB_CAPABLE; + clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); } else { dev_info(&pf->pdev->dev, "Failed querying DCB configuration data from firmware, err %pe aq_err %s\n", @@ -9476,9 +9481,9 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, /* Enable DCB tagging only when more than one TC */ if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) - pf->flags |= I40E_FLAG_DCB_ENABLED; + set_bit(I40E_FLAG_DCB_ENA, pf->flags); else - pf->flags &= ~I40E_FLAG_DCB_ENABLED; + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); set_bit(__I40E_PORT_SUSPENDED, pf->state); /* Reconfiguration needed quiesce all VSIs */ @@ -9610,7 +9615,7 @@ u32 i40e_get_global_fd_count(struct i40e_pf *pf) static void i40e_reenable_fdir_sb(struct i40e_pf *pf) { if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) - if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && + if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) && (I40E_DEBUG_FD & pf->hw.debug_mask)) dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); } @@ -9631,7 +9636,7 @@ static void i40e_reenable_fdir_atr(struct i40e_pf *pf) I40E_L3_SRC_MASK | I40E_L3_DST_MASK | I40E_L4_SRC_MASK | I40E_L4_DST_MASK); - if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && + if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) && (I40E_DEBUG_FD & pf->hw.debug_mask)) dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n"); } @@ -9946,7 +9951,7 @@ static void i40e_link_event(struct i40e_pf *pf) if (pf->vf) i40e_vc_notify_link_state(pf); - if (pf->flags & I40E_FLAG_PTP) + if (test_bit(I40E_FLAG_PTP_ENA, pf->flags)) i40e_ptp_set_increment(pf); #ifdef CONFIG_I40E_DCB if (new_link == old_link) @@ -9963,13 +9968,13 @@ static void i40e_link_event(struct i40e_pf *pf) memset(&pf->tmp_cfg, 0, sizeof(pf->tmp_cfg)); err = i40e_dcb_sw_default_config(pf); if (err) { - pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | - I40E_FLAG_DCB_ENABLED); + clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); } else { pf->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; - pf->flags |= I40E_FLAG_DCB_CAPABLE; - pf->flags &= ~I40E_FLAG_DCB_ENABLED; + set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); } } #endif /* CONFIG_I40E_DCB */ @@ -9994,7 +9999,7 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf) return; pf->service_timer_previous = jiffies; - if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) || + if (test_bit(I40E_FLAG_LINK_POLLING_ENA, pf->flags) || test_bit(__I40E_TEMP_LINK_POLLING, pf->state)) i40e_link_event(pf); @@ -10005,7 +10010,7 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf) if (pf->vsi[i] && pf->vsi[i]->netdev) i40e_update_stats(pf->vsi[i]); - if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) { + if (test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags)) { /* Update the stats for the active switching components */ for (i = 0; i < I40E_MAX_VEB; i++) if (pf->veb[i]) @@ -10094,7 +10099,7 @@ static void i40e_handle_link_event(struct i40e_pf *pf, if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && (!(status->link_info & I40E_AQ_LINK_UP)) && - (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) { + (!test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))) { dev_err(&pf->pdev->dev, "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n"); dev_err(&pf->pdev->dev, @@ -10122,7 +10127,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) return; /* check for error indications */ - val = rd32(&pf->hw, pf->hw.aq.arq.len); + val = rd32(&pf->hw, I40E_PF_ARQLEN); oldval = val; if (val & I40E_PF_ARQLEN_ARQVFE_MASK) { if (hw->debug_mask & I40E_DEBUG_AQ) @@ -10141,9 +10146,9 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK; } if (oldval != val) - wr32(&pf->hw, pf->hw.aq.arq.len, val); + wr32(&pf->hw, I40E_PF_ARQLEN, val); - val = rd32(&pf->hw, pf->hw.aq.asq.len); + val = rd32(&pf->hw, I40E_PF_ATQLEN); oldval = val; if (val & I40E_PF_ATQLEN_ATQVFE_MASK) { if (pf->hw.debug_mask & I40E_DEBUG_AQ) @@ -10161,7 +10166,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK; } if (oldval != val) - wr32(&pf->hw, pf->hw.aq.asq.len, val); + wr32(&pf->hw, I40E_PF_ATQLEN, val); event.buf_len = I40E_MAX_AQ_BUF_SIZE; event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); @@ -10221,9 +10226,9 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) opcode); break; } - } while (i++ < pf->adminq_work_limit); + } while (i++ < I40E_AQ_WORK_LIMIT); - if (i < pf->adminq_work_limit) + if (i < I40E_AQ_WORK_LIMIT) clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state); /* re-enable Admin queue interrupt cause */ @@ -10400,7 +10405,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb) if (ret) goto end_reconstitute; - if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) + if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) veb->bridge_mode = BRIDGE_MODE_VEB; else veb->bridge_mode = BRIDGE_MODE_VEPA; @@ -10545,7 +10550,7 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf) wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]); } - if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) + if (!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) return; /* find existing VSI and see if it needs configuring */ @@ -10557,8 +10562,8 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf) pf->vsi[pf->lan_vsi]->seid, 0); if (!vsi) { dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); - pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; - pf->flags |= I40E_FLAG_FD_SB_INACTIVE; + clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags); + set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); return; } } @@ -10936,14 +10941,14 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) i40e_aq_set_dcb_parameters(hw, false, NULL); dev_warn(&pf->pdev->dev, "DCB is not supported for X710-T*L 2.5/5G speeds\n"); - pf->flags &= ~I40E_FLAG_DCB_CAPABLE; + clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); } else { i40e_aq_set_dcb_parameters(hw, true, NULL); ret = i40e_init_pf_dcb(pf); if (ret) { dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret); - pf->flags &= ~I40E_FLAG_DCB_CAPABLE; + clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); /* Continue without DCB enabled */ } } @@ -11064,7 +11069,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) wr32(hw, I40E_REG_MSS, val); } - if (pf->hw_features & I40E_HW_RESTART_AUTONEG) { + if (test_bit(I40E_HW_CAP_RESTART_AUTONEG, pf->hw.caps)) { msleep(75); ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); if (ret) @@ -11074,7 +11079,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) pf->hw.aq.asq_last_status)); } /* reinit the misc interrupt */ - if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { ret = i40e_setup_misc_vector(pf); if (ret) goto end_unlock; @@ -11349,7 +11354,7 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi) if (!vsi->num_rx_desc) vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, I40E_REQ_DESCRIPTOR_MULTIPLE); - if (pf->flags & I40E_FLAG_MSIX_ENABLED) + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) vsi->num_q_vectors = pf->num_lan_msix; else vsi->num_q_vectors = 1; @@ -11667,7 +11672,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) ring->count = vsi->num_tx_desc; ring->size = 0; ring->dcb_tc = 0; - if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) + if (test_bit(I40E_HW_CAP_WB_ON_ITR, vsi->back->hw.caps)) ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; ring->itr_setting = pf->tx_itr_default; WRITE_ONCE(vsi->tx_rings[i], ring++); @@ -11684,7 +11689,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) ring->count = vsi->num_tx_desc; ring->size = 0; ring->dcb_tc = 0; - if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) + if (test_bit(I40E_HW_CAP_WB_ON_ITR, vsi->back->hw.caps)) ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; set_ring_xdp(ring); ring->itr_setting = pf->tx_itr_default; @@ -11748,7 +11753,7 @@ static int i40e_init_msix(struct i40e_pf *pf) int v_actual; int iwarp_requested = 0; - if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) + if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) return -ENODEV; /* The number of vectors we'll request will be comprised of: @@ -11787,7 +11792,7 @@ static int i40e_init_msix(struct i40e_pf *pf) vectors_left -= pf->num_lan_msix; /* reserve one vector for sideband flow director */ - if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { + if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) { if (vectors_left) { pf->num_fdsb_msix = 1; v_budget++; @@ -11798,7 +11803,7 @@ static int i40e_init_msix(struct i40e_pf *pf) } /* can we reserve enough for iWARP? */ - if (pf->flags & I40E_FLAG_IWARP_ENABLED) { + if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) { iwarp_requested = pf->num_iwarp_msix; if (!vectors_left) @@ -11810,7 +11815,7 @@ static int i40e_init_msix(struct i40e_pf *pf) } /* any vectors left over go for VMDq support */ - if (pf->flags & I40E_FLAG_VMDQ_ENABLED) { + if (test_bit(I40E_FLAG_VMDQ_ENA, pf->flags)) { if (!vectors_left) { pf->num_vmdq_msix = 0; pf->num_vmdq_qps = 0; @@ -11867,7 +11872,7 @@ static int i40e_init_msix(struct i40e_pf *pf) v_actual = i40e_reserve_msix_vectors(pf, v_budget); if (v_actual < I40E_MIN_MSIX) { - pf->flags &= ~I40E_FLAG_MSIX_ENABLED; + clear_bit(I40E_FLAG_MSIX_ENA, pf->flags); kfree(pf->msix_entries); pf->msix_entries = NULL; pci_disable_msix(pf->pdev); @@ -11905,7 +11910,7 @@ static int i40e_init_msix(struct i40e_pf *pf) pf->num_lan_msix = 1; break; case 3: - if (pf->flags & I40E_FLAG_IWARP_ENABLED) { + if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) { pf->num_lan_msix = 1; pf->num_iwarp_msix = 1; } else { @@ -11913,7 +11918,7 @@ static int i40e_init_msix(struct i40e_pf *pf) } break; default: - if (pf->flags & I40E_FLAG_IWARP_ENABLED) { + if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) { pf->num_iwarp_msix = min_t(int, (vec / 3), iwarp_requested); pf->num_vmdq_vsis = min_t(int, (vec / 3), @@ -11922,7 +11927,7 @@ static int i40e_init_msix(struct i40e_pf *pf) pf->num_vmdq_vsis = min_t(int, (vec / 2), I40E_DEFAULT_NUM_VMDQ_VSI); } - if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { + if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) { pf->num_fdsb_msix = 1; vec--; } @@ -11934,22 +11939,20 @@ static int i40e_init_msix(struct i40e_pf *pf) } } - if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && - (pf->num_fdsb_msix == 0)) { + if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) && pf->num_fdsb_msix == 0) { dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n"); - pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; - pf->flags |= I40E_FLAG_FD_SB_INACTIVE; + clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags); + set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); } - if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && - (pf->num_vmdq_msix == 0)) { + if (test_bit(I40E_FLAG_VMDQ_ENA, pf->flags) && pf->num_vmdq_msix == 0) { dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n"); - pf->flags &= ~I40E_FLAG_VMDQ_ENABLED; + clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags); } - if ((pf->flags & I40E_FLAG_IWARP_ENABLED) && - (pf->num_iwarp_msix == 0)) { + if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags) && + pf->num_iwarp_msix == 0) { dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n"); - pf->flags &= ~I40E_FLAG_IWARP_ENABLED; + clear_bit(I40E_FLAG_IWARP_ENA, pf->flags); } i40e_debug(&pf->hw, I40E_DEBUG_INIT, "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n", @@ -12003,7 +12006,7 @@ static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi) int err, v_idx, num_q_vectors; /* if not MSIX, give the one vector only to the LAN VSI */ - if (pf->flags & I40E_FLAG_MSIX_ENABLED) + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) num_q_vectors = vsi->num_q_vectors; else if (vsi == pf->vsi[pf->lan_vsi]) num_q_vectors = 1; @@ -12034,38 +12037,39 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf) int vectors = 0; ssize_t size; - if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { vectors = i40e_init_msix(pf); if (vectors < 0) { - pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | - I40E_FLAG_IWARP_ENABLED | - I40E_FLAG_RSS_ENABLED | - I40E_FLAG_DCB_CAPABLE | - I40E_FLAG_DCB_ENABLED | - I40E_FLAG_SRIOV_ENABLED | - I40E_FLAG_FD_SB_ENABLED | - I40E_FLAG_FD_ATR_ENABLED | - I40E_FLAG_VMDQ_ENABLED); - pf->flags |= I40E_FLAG_FD_SB_INACTIVE; + clear_bit(I40E_FLAG_MSIX_ENA, pf->flags); + clear_bit(I40E_FLAG_IWARP_ENA, pf->flags); + clear_bit(I40E_FLAG_RSS_ENA, pf->flags); + clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); + clear_bit(I40E_FLAG_SRIOV_ENA, pf->flags); + clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags); + clear_bit(I40E_FLAG_FD_ATR_ENA, pf->flags); + clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags); + set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); /* rework the queue expectations without MSIX */ i40e_determine_queue_usage(pf); } } - if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && - (pf->flags & I40E_FLAG_MSI_ENABLED)) { + if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags) && + test_bit(I40E_FLAG_MSI_ENA, pf->flags)) { dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n"); vectors = pci_enable_msi(pf->pdev); if (vectors < 0) { dev_info(&pf->pdev->dev, "MSI init failed - %d\n", vectors); - pf->flags &= ~I40E_FLAG_MSI_ENABLED; + clear_bit(I40E_FLAG_MSI_ENA, pf->flags); } vectors = 1; /* one MSI or Legacy vector */ } - if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) + if (!test_bit(I40E_FLAG_MSI_ENA, pf->flags) && + !test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n"); /* set up vector assignment tracking */ @@ -12098,7 +12102,8 @@ static int i40e_restore_interrupt_scheme(struct i40e_pf *pf) * scheme. We need to re-enabled them here in order to attempt to * re-acquire the MSI or MSI-X vectors */ - pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); + set_bit(I40E_FLAG_MSI_ENA, pf->flags); + set_bit(I40E_FLAG_MSIX_ENA, pf->flags); err = i40e_init_interrupt_scheme(pf); if (err) @@ -12120,7 +12125,7 @@ static int i40e_restore_interrupt_scheme(struct i40e_pf *pf) if (err) goto err_unwind; - if (pf->flags & I40E_FLAG_IWARP_ENABLED) + if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) i40e_client_update_msix_info(pf); return 0; @@ -12148,7 +12153,7 @@ static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf) { int err; - if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { err = i40e_setup_misc_vector(pf); if (err) { @@ -12158,7 +12163,7 @@ static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf) return err; } } else { - u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED; + u32 flags = test_bit(I40E_FLAG_MSI_ENA, pf->flags) ? 0 : IRQF_SHARED; err = request_irq(pf->pdev->irq, i40e_intr, flags, pf->int_name, pf); @@ -12362,7 +12367,7 @@ int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) { struct i40e_pf *pf = vsi->back; - if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) + if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps)) return i40e_config_rss_aq(vsi, seed, lut, lut_size); else return i40e_config_rss_reg(vsi, seed, lut, lut_size); @@ -12381,7 +12386,7 @@ int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) { struct i40e_pf *pf = vsi->back; - if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) + if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps)) return i40e_get_rss_aq(vsi, seed, lut, lut_size); else return i40e_get_rss_reg(vsi, seed, lut, lut_size); @@ -12484,7 +12489,7 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; int new_rss_size; - if (!(pf->flags & I40E_FLAG_RSS_ENABLED)) + if (!test_bit(I40E_FLAG_RSS_ENA, pf->flags)) return 0; queue_count = min_t(int, queue_count, num_online_cpus()); @@ -12717,9 +12722,9 @@ static int i40e_sw_init(struct i40e_pf *pf) u16 pow; /* Set default capability flags */ - pf->flags = I40E_FLAG_RX_CSUM_ENABLED | - I40E_FLAG_MSI_ENABLED | - I40E_FLAG_MSIX_ENABLED; + bitmap_zero(pf->flags, I40E_PF_FLAGS_NBITS); + set_bit(I40E_FLAG_MSI_ENA, pf->flags); + set_bit(I40E_FLAG_MSIX_ENA, pf->flags); /* Set default ITR */ pf->rx_itr_default = I40E_ITR_RX_DEF; @@ -12739,14 +12744,14 @@ static int i40e_sw_init(struct i40e_pf *pf) pf->rss_size_max = min_t(int, pf->rss_size_max, pow); if (pf->hw.func_caps.rss) { - pf->flags |= I40E_FLAG_RSS_ENABLED; + set_bit(I40E_FLAG_RSS_ENA, pf->flags); pf->alloc_rss_size = min_t(int, pf->rss_size_max, num_online_cpus()); } /* MFP mode enabled */ if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) { - pf->flags |= I40E_FLAG_MFP_ENABLED; + set_bit(I40E_FLAG_MFP_ENA, pf->flags); dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); if (i40e_get_partition_bw_setting(pf)) { dev_warn(&pf->pdev->dev, @@ -12763,84 +12768,31 @@ static int i40e_sw_init(struct i40e_pf *pf) if ((pf->hw.func_caps.fd_filters_guaranteed > 0) || (pf->hw.func_caps.fd_filters_best_effort > 0)) { - pf->flags |= I40E_FLAG_FD_ATR_ENABLED; - pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; - if (pf->flags & I40E_FLAG_MFP_ENABLED && + set_bit(I40E_FLAG_FD_ATR_ENA, pf->flags); + if (test_bit(I40E_FLAG_MFP_ENA, pf->flags) && pf->hw.num_partitions > 1) dev_info(&pf->pdev->dev, "Flow Director Sideband mode Disabled in MFP mode\n"); else - pf->flags |= I40E_FLAG_FD_SB_ENABLED; + set_bit(I40E_FLAG_FD_SB_ENA, pf->flags); pf->fdir_pf_filter_count = pf->hw.func_caps.fd_filters_guaranteed; pf->hw.fdir_shared_filter_count = pf->hw.func_caps.fd_filters_best_effort; } - if (pf->hw.mac.type == I40E_MAC_X722) { - pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE | - I40E_HW_128_QP_RSS_CAPABLE | - I40E_HW_ATR_EVICT_CAPABLE | - I40E_HW_WB_ON_ITR_CAPABLE | - I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE | - I40E_HW_NO_PCI_LINK_CHECK | - I40E_HW_USE_SET_LLDP_MIB | - I40E_HW_GENEVE_OFFLOAD_CAPABLE | - I40E_HW_PTP_L4_CAPABLE | - I40E_HW_WOL_MC_MAGIC_PKT_WAKE | - I40E_HW_OUTER_UDP_CSUM_CAPABLE); - -#define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03 - if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) != - I40E_FDEVICT_PCTYPE_DEFAULT) { - dev_warn(&pf->pdev->dev, - "FD EVICT PCTYPES are not right, disable FD HW EVICT\n"); - pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE; - } - } else if ((pf->hw.aq.api_maj_ver > 1) || - ((pf->hw.aq.api_maj_ver == 1) && - (pf->hw.aq.api_min_ver > 4))) { - /* Supported in FW API version higher than 1.4 */ - pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE; - } - /* Enable HW ATR eviction if possible */ - if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE) - pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED; - - if ((pf->hw.mac.type == I40E_MAC_XL710) && - (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || - (pf->hw.aq.fw_maj_ver < 4))) { - pf->hw_features |= I40E_HW_RESTART_AUTONEG; - /* No DCB support for FW < v4.33 */ - pf->hw_features |= I40E_HW_NO_DCB_SUPPORT; - } - - /* Disable FW LLDP if FW < v4.3 */ - if ((pf->hw.mac.type == I40E_MAC_XL710) && - (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || - (pf->hw.aq.fw_maj_ver < 4))) - pf->hw_features |= I40E_HW_STOP_FW_LLDP; - - /* Use the FW Set LLDP MIB API if FW > v4.40 */ - if ((pf->hw.mac.type == I40E_MAC_XL710) && - (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) || - (pf->hw.aq.fw_maj_ver >= 5))) - pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB; - - /* Enable PTP L4 if FW > v6.0 */ - if (pf->hw.mac.type == I40E_MAC_XL710 && - pf->hw.aq.fw_maj_ver >= 6) - pf->hw_features |= I40E_HW_PTP_L4_CAPABLE; + if (test_bit(I40E_HW_CAP_ATR_EVICT, pf->hw.caps)) + set_bit(I40E_FLAG_HW_ATR_EVICT_ENA, pf->flags); if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) { pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; - pf->flags |= I40E_FLAG_VMDQ_ENABLED; + set_bit(I40E_FLAG_VMDQ_ENA, pf->flags); pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf); } if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) { - pf->flags |= I40E_FLAG_IWARP_ENABLED; + set_bit(I40E_FLAG_IWARP_ENA, pf->flags); /* IWARP needs one extra vector for CQP just like MISC.*/ pf->num_iwarp_msix = (int)num_online_cpus() + 1; } @@ -12850,25 +12802,23 @@ static int i40e_sw_init(struct i40e_pf *pf) * if NPAR is functioning so unset this hw flag in this case. */ if (pf->hw.mac.type == I40E_MAC_XL710 && - pf->hw.func_caps.npar_enable && - (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) - pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE; + pf->hw.func_caps.npar_enable) + clear_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, pf->hw.caps); #ifdef CONFIG_PCI_IOV if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) { pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; - pf->flags |= I40E_FLAG_SRIOV_ENABLED; + set_bit(I40E_FLAG_SRIOV_ENA, pf->flags); pf->num_req_vfs = min_t(int, pf->hw.func_caps.num_vfs, I40E_MAX_VF_COUNT); } #endif /* CONFIG_PCI_IOV */ - pf->eeprom_version = 0xDEAD; pf->lan_veb = I40E_NO_VEB; pf->lan_vsi = I40E_NO_VSI; /* By default FW has this off for performance reasons */ - pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED; + clear_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags); /* set up queue assignment tracking */ size = sizeof(struct i40e_lump_tracking) @@ -12887,8 +12837,8 @@ static int i40e_sw_init(struct i40e_pf *pf) /* Link down on close must be on when total port shutdown * is enabled for a given port */ - pf->flags |= (I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED | - I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED); + set_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags); + set_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags); dev_info(&pf->pdev->dev, "total-port-shutdown was enabled, link-down-on-close is forced on\n"); } @@ -12914,31 +12864,31 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) */ if (features & NETIF_F_NTUPLE) { /* Enable filters and mark for reset */ - if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) + if (!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) need_reset = true; /* enable FD_SB only if there is MSI-X vector and no cloud * filters exist */ if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) { - pf->flags |= I40E_FLAG_FD_SB_ENABLED; - pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE; + set_bit(I40E_FLAG_FD_SB_ENA, pf->flags); + clear_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); } } else { /* turn off filters, mark for reset and clear SW filter list */ - if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { + if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) { need_reset = true; i40e_fdir_filter_exit(pf); } - pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; + clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags); clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state); - pf->flags |= I40E_FLAG_FD_SB_INACTIVE; + set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); /* reset fd counters */ pf->fd_add_err = 0; pf->fd_atr_cnt = 0; /* if ATR was auto disabled it can be re-enabled. */ if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) - if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && + if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) && (I40E_DEBUG_FD & pf->hw.debug_mask)) dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); } @@ -13087,7 +13037,7 @@ static int i40e_get_phys_port_id(struct net_device *netdev, struct i40e_pf *pf = np->vsi->back; struct i40e_hw *hw = &pf->hw; - if (!(pf->hw_features & I40E_HW_PORT_ID_VALID)) + if (!test_bit(I40E_HW_CAP_PORT_ID_VALID, pf->hw.caps)) return -EOPNOTSUPP; ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id)); @@ -13116,7 +13066,7 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct i40e_pf *pf = np->vsi->back; int err = 0; - if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED)) + if (!test_bit(I40E_FLAG_SRIOV_ENA, pf->flags)) return -EOPNOTSUPP; if (vid) { @@ -13216,9 +13166,9 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev, veb->bridge_mode = mode; /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */ if (mode == BRIDGE_MODE_VEB) - pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); else - pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; + clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); break; } @@ -13552,7 +13502,7 @@ static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair) struct i40e_hw *hw = &pf->hw; /* All rings in a qp belong to the same qvector. */ - if (pf->flags & I40E_FLAG_MSIX_ENABLED) + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx); else i40e_irq_dynamic_enable_icr0(pf); @@ -13577,7 +13527,7 @@ static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair) * * All rings in a qp belong to the same qvector. */ - if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { u32 intpf = vsi->base_vector + rxr->q_vector->v_idx; wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0); @@ -13762,7 +13712,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) NETIF_F_RXCSUM | 0; - if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE)) + if (!test_bit(I40E_HW_CAP_OUTER_UDP_CSUM, pf->hw.caps)) netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; netdev->udp_tunnel_nic_info = &pf->udp_tunnel_nic; @@ -13798,7 +13748,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; - if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) + if (!test_bit(I40E_FLAG_MFP_ENA, pf->flags)) hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC; netdev->hw_features |= hw_features | NETIF_F_LOOPBACK; @@ -13989,7 +13939,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) * negative logic - if it's set, we need to fiddle with * the VSI to disable source pruning. */ - if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) { + if (test_bit(I40E_FLAG_SOURCE_PRUNING_DIS, pf->flags)) { memset(&ctxt, 0, sizeof(ctxt)); ctxt.seid = pf->main_vsi_seid; ctxt.pf_num = pf->hw.pf_id; @@ -14011,7 +13961,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) } /* MFP mode setup queue map and update VSI */ - if ((pf->flags & I40E_FLAG_MFP_ENABLED) && + if (test_bit(I40E_FLAG_MFP_ENA, pf->flags) && !(pf->hw.func_caps.iscsi)) { /* NIC type PF */ memset(&ctxt, 0, sizeof(ctxt)); ctxt.seid = pf->main_vsi_seid; @@ -14059,7 +14009,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ctxt.uplink_seid = vsi->uplink_seid; ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; ctxt.flags = I40E_AQ_VSI_TYPE_PF; - if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) && + if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags) && (i40e_is_vsi_uplink_mode_veb(vsi))) { ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); @@ -14107,7 +14057,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); } - if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) { + if (test_bit(I40E_FLAG_IWARP_ENA, vsi->back->flags)) { ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); ctxt.info.queueing_opt_flags |= @@ -14323,7 +14273,7 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) /* In Legacy mode, we do not have to get any other vector since we * piggyback on the misc/ICR0 for queue interrupts. */ - if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) + if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) return ret; if (vsi->num_q_vectors) vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, @@ -14490,9 +14440,9 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, * already enabled, in which case we can't force VEPA * mode. */ - if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { + if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) { veb->bridge_mode = BRIDGE_MODE_VEPA; - pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; + clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); } i40e_config_bridge_mode(veb); } @@ -14588,12 +14538,16 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, break; } - if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) && - (vsi->type == I40E_VSI_VMDQ2)) { + if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps) && + vsi->type == I40E_VSI_VMDQ2) { ret = i40e_vsi_config_rss(vsi); + if (ret) + goto err_config; } return vsi; +err_config: + i40e_vsi_clear_rings(vsi); err_rings: i40e_vsi_free_q_vectors(vsi); err_msix: @@ -14831,7 +14785,7 @@ void i40e_veb_release(struct i40e_veb *veb) static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) { struct i40e_pf *pf = veb->pf; - bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED); + bool enable_stats = !!test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags); int ret; ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid, @@ -15020,12 +14974,11 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf, * the PF's VSI */ pf->mac_seid = uplink_seid; - pf->pf_seid = downlink_seid; pf->main_vsi_seid = seid; if (printconfig) dev_info(&pf->pdev->dev, "pf_seid=%d main_vsi_seid=%d\n", - pf->pf_seid, pf->main_vsi_seid); + downlink_seid, pf->main_vsi_seid); break; case I40E_SWITCH_ELEMENT_TYPE_PF: case I40E_SWITCH_ELEMENT_TYPE_VF: @@ -15131,7 +15084,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui */ if ((pf->hw.pf_id == 0) && - !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) { + !test_bit(I40E_FLAG_TRUE_PROMISC_ENA, pf->flags)) { flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; pf->last_sw_conf_flags = flags; } @@ -15198,16 +15151,12 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui /* enable RSS in the HW, even for only one queue, as the stack can use * the hash */ - if ((pf->flags & I40E_FLAG_RSS_ENABLED)) + if (test_bit(I40E_FLAG_RSS_ENA, pf->flags)) i40e_pf_config_rss(pf); /* fill in link information and enable LSE reporting */ i40e_link_event(pf); - /* Initialize user-specific link properties */ - pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & - I40E_AQ_AN_COMPLETED) ? true : false); - i40e_ptp_init(pf); if (!lock_acquired) @@ -15240,42 +15189,42 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) queues_left = pf->hw.func_caps.num_tx_qp; if ((queues_left == 1) || - !(pf->flags & I40E_FLAG_MSIX_ENABLED)) { + !test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { /* one qp for PF, no queues for anything else */ queues_left = 0; pf->alloc_rss_size = pf->num_lan_qps = 1; /* make sure all the fancies are disabled */ - pf->flags &= ~(I40E_FLAG_RSS_ENABLED | - I40E_FLAG_IWARP_ENABLED | - I40E_FLAG_FD_SB_ENABLED | - I40E_FLAG_FD_ATR_ENABLED | - I40E_FLAG_DCB_CAPABLE | - I40E_FLAG_DCB_ENABLED | - I40E_FLAG_SRIOV_ENABLED | - I40E_FLAG_VMDQ_ENABLED); - pf->flags |= I40E_FLAG_FD_SB_INACTIVE; - } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED | - I40E_FLAG_FD_SB_ENABLED | - I40E_FLAG_FD_ATR_ENABLED | - I40E_FLAG_DCB_CAPABLE))) { + clear_bit(I40E_FLAG_RSS_ENA, pf->flags); + clear_bit(I40E_FLAG_IWARP_ENA, pf->flags); + clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags); + clear_bit(I40E_FLAG_FD_ATR_ENA, pf->flags); + clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); + clear_bit(I40E_FLAG_SRIOV_ENA, pf->flags); + clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags); + set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); + } else if (!test_bit(I40E_FLAG_RSS_ENA, pf->flags) && + !test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) && + !test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) && + !test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags)) { /* one qp for PF */ pf->alloc_rss_size = pf->num_lan_qps = 1; queues_left -= pf->num_lan_qps; - pf->flags &= ~(I40E_FLAG_RSS_ENABLED | - I40E_FLAG_IWARP_ENABLED | - I40E_FLAG_FD_SB_ENABLED | - I40E_FLAG_FD_ATR_ENABLED | - I40E_FLAG_DCB_ENABLED | - I40E_FLAG_VMDQ_ENABLED); - pf->flags |= I40E_FLAG_FD_SB_INACTIVE; + clear_bit(I40E_FLAG_RSS_ENA, pf->flags); + clear_bit(I40E_FLAG_IWARP_ENA, pf->flags); + clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags); + clear_bit(I40E_FLAG_FD_ATR_ENA, pf->flags); + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); + clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags); + set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); } else { /* Not enough queues for all TCs */ - if ((pf->flags & I40E_FLAG_DCB_CAPABLE) && - (queues_left < I40E_MAX_TRAFFIC_CLASS)) { - pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | - I40E_FLAG_DCB_ENABLED); + if (test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags) && + queues_left < I40E_MAX_TRAFFIC_CLASS) { + clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); } @@ -15288,24 +15237,24 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) queues_left -= pf->num_lan_qps; } - if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { + if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) { if (queues_left > 1) { queues_left -= 1; /* save 1 queue for FD */ } else { - pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; - pf->flags |= I40E_FLAG_FD_SB_INACTIVE; + clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags); + set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n"); } } - if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && + if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags) && pf->num_vf_qps && pf->num_req_vfs && queues_left) { pf->num_req_vfs = min_t(int, pf->num_req_vfs, (queues_left / pf->num_vf_qps)); queues_left -= (pf->num_req_vfs * pf->num_vf_qps); } - if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && + if (test_bit(I40E_FLAG_VMDQ_ENA, pf->flags) && pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) { pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis, (queues_left / pf->num_vmdq_qps)); @@ -15316,7 +15265,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) dev_dbg(&pf->pdev->dev, "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n", pf->hw.func_caps.num_tx_qp, - !!(pf->flags & I40E_FLAG_FD_SB_ENABLED), + !!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags), pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs, pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps, queues_left); @@ -15340,7 +15289,8 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf) settings->hash_lut_size = I40E_HASH_LUT_SIZE_128; /* Flow Director is enabled */ - if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)) + if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) || + test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags)) settings->enable_fdir = true; /* Ethtype and MACVLAN filters enabled for PF */ @@ -15372,21 +15322,21 @@ static void i40e_print_features(struct i40e_pf *pf) i += scnprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d", pf->hw.func_caps.num_vsis, pf->vsi[pf->lan_vsi]->num_queue_pairs); - if (pf->flags & I40E_FLAG_RSS_ENABLED) + if (test_bit(I40E_FLAG_RSS_ENA, pf->flags)) i += scnprintf(&buf[i], REMAIN(i), " RSS"); - if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) + if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags)) i += scnprintf(&buf[i], REMAIN(i), " FD_ATR"); - if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { + if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) { i += scnprintf(&buf[i], REMAIN(i), " FD_SB"); i += scnprintf(&buf[i], REMAIN(i), " NTUPLE"); } - if (pf->flags & I40E_FLAG_DCB_CAPABLE) + if (test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags)) i += scnprintf(&buf[i], REMAIN(i), " DCB"); i += scnprintf(&buf[i], REMAIN(i), " VxLAN"); i += scnprintf(&buf[i], REMAIN(i), " Geneve"); - if (pf->flags & I40E_FLAG_PTP) + if (test_bit(I40E_FLAG_PTP_ENA, pf->flags)) i += scnprintf(&buf[i], REMAIN(i), " PTP"); - if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) + if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) i += scnprintf(&buf[i], REMAIN(i), " VEB"); else i += scnprintf(&buf[i], REMAIN(i), " VEPA"); @@ -15417,22 +15367,26 @@ static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf) * @fec_cfg: FEC option to set in flags * @flags: ptr to flags in which we set FEC option **/ -void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags) +void i40e_set_fec_in_flags(u8 fec_cfg, unsigned long *flags) { - if (fec_cfg & I40E_AQ_SET_FEC_AUTO) - *flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC; + if (fec_cfg & I40E_AQ_SET_FEC_AUTO) { + set_bit(I40E_FLAG_RS_FEC, flags); + set_bit(I40E_FLAG_BASE_R_FEC, flags); + } if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) || (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) { - *flags |= I40E_FLAG_RS_FEC; - *flags &= ~I40E_FLAG_BASE_R_FEC; + set_bit(I40E_FLAG_RS_FEC, flags); + clear_bit(I40E_FLAG_BASE_R_FEC, flags); } if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) || (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) { - *flags |= I40E_FLAG_BASE_R_FEC; - *flags &= ~I40E_FLAG_RS_FEC; + set_bit(I40E_FLAG_BASE_R_FEC, flags); + clear_bit(I40E_FLAG_RS_FEC, flags); + } + if (fec_cfg == 0) { + clear_bit(I40E_FLAG_RS_FEC, flags); + clear_bit(I40E_FLAG_BASE_R_FEC, flags); } - if (fec_cfg == 0) - *flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC); } /** @@ -15676,7 +15630,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) #endif /* CONFIG_I40E_DCB */ struct i40e_pf *pf; struct i40e_hw *hw; - static u16 pfs_found; u16 wol_nvm_bits; char nvm_ver[32]; u16 link_status; @@ -15754,7 +15707,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->bus.device = PCI_SLOT(pdev->devfn); hw->bus.func = PCI_FUNC(pdev->devfn); hw->bus.bus_id = pdev->bus->number; - pf->instance = pfs_found; /* Select something other than the 802.1ad ethertype for the * switch to use internally and drop on ingress. @@ -15816,7 +15768,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE; hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE; - pf->adminq_work_limit = I40E_AQ_WORK_LIMIT; snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", @@ -15858,15 +15809,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->vendor_id, hw->device_id, hw->subsystem_vendor_id, hw->subsystem_device_id); - if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && - hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) + if (i40e_is_aq_api_ver_ge(hw, I40E_FW_API_VERSION_MAJOR, + I40E_FW_MINOR_VERSION(hw) + 1)) dev_dbg(&pdev->dev, "The driver for the device detected a newer version of the NVM image v%u.%u than v%u.%u.\n", hw->aq.api_maj_ver, hw->aq.api_min_ver, I40E_FW_API_VERSION_MAJOR, I40E_FW_MINOR_VERSION(hw)); - else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) + else if (i40e_is_aq_api_ver_lt(hw, 1, 4)) dev_info(&pdev->dev, "The driver for the device detected an older version of the NVM image v%u.%u than expected v%u.%u. Please update the NVM image.\n", hw->aq.api_maj_ver, @@ -15913,7 +15864,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * Ignore error return codes because if it was already disabled via * hardware settings this will fail */ - if (pf->hw_features & I40E_HW_STOP_FW_LLDP) { + if (test_bit(I40E_HW_CAP_STOP_FW_LLDP, pf->hw.caps)) { dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n"); i40e_aq_stop_lldp(hw, true, false, NULL); } @@ -15930,7 +15881,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ether_addr_copy(hw->mac.perm_addr, hw->mac.addr); i40e_get_port_mac_addr(hw, hw->mac.port_addr); if (is_valid_ether_addr(hw->mac.port_addr)) - pf->hw_features |= I40E_HW_PORT_ID_VALID; + set_bit(I40E_HW_CAP_PORT_ID_VALID, pf->hw.caps); i40e_ptp_alloc_pins(pf); pci_set_drvdata(pdev, pf); @@ -15940,10 +15891,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) status = i40e_get_fw_lldp_status(&pf->hw, &lldp_status); (!status && lldp_status == I40E_GET_FW_LLDP_STATUS_ENABLED) ? - (pf->flags &= ~I40E_FLAG_DISABLE_FW_LLDP) : - (pf->flags |= I40E_FLAG_DISABLE_FW_LLDP); + (clear_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags)) : + (set_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags)); dev_info(&pdev->dev, - (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ? + test_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags) ? "FW LLDP is disabled\n" : "FW LLDP is enabled\n"); @@ -15953,7 +15904,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = i40e_init_pf_dcb(pf); if (err) { dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err); - pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED); + clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); + clear_bit(I40E_FLAG_DCB_ENA, pf->flags); /* Continue without DCB enabled */ } #endif /* CONFIG_I40E_DCB */ @@ -16021,11 +15973,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) #ifdef CONFIG_PCI_IOV /* prep for VF support */ - if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && - (pf->flags & I40E_FLAG_MSIX_ENABLED) && + if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags) && + test_bit(I40E_FLAG_MSIX_ENA, pf->flags) && !test_bit(__I40E_BAD_EEPROM, pf->state)) { if (pci_num_vf(pdev)) - pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); } #endif err = i40e_setup_pf_switch(pf, false, false); @@ -16066,7 +16018,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) wr32(hw, I40E_REG_MSS, val); } - if (pf->hw_features & I40E_HW_RESTART_AUTONEG) { + if (test_bit(I40E_HW_CAP_RESTART_AUTONEG, pf->hw.caps)) { msleep(75); err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); if (err) @@ -16086,7 +16038,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * the misc functionality and queue processing is combined in * the same vector and that gets setup at open. */ - if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { err = i40e_setup_misc_vector(pf); if (err) { dev_info(&pdev->dev, @@ -16099,8 +16051,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) #ifdef CONFIG_PCI_IOV /* prep for VF support */ - if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && - (pf->flags & I40E_FLAG_MSIX_ENABLED) && + if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags) && + test_bit(I40E_FLAG_MSIX_ENA, pf->flags) && !test_bit(__I40E_BAD_EEPROM, pf->state)) { /* disable link interrupts for VFs */ val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM); @@ -16120,7 +16072,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } #endif /* CONFIG_PCI_IOV */ - if (pf->flags & I40E_FLAG_IWARP_ENABLED) { + if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) { pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile, pf->num_iwarp_msix, I40E_IWARP_IRQ_PILE_ID); @@ -16128,7 +16080,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_info(&pdev->dev, "failed to get tracking for %d vectors for IWARP err=%d\n", pf->num_iwarp_msix, pf->iwarp_base_vector); - pf->flags &= ~I40E_FLAG_IWARP_ENABLED; + clear_bit(I40E_FLAG_IWARP_ENA, pf->flags); } } @@ -16142,7 +16094,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) round_jiffies(jiffies + pf->service_timer_period)); /* add this PF to client device list and launch a client service task */ - if (pf->flags & I40E_FLAG_IWARP_ENABLED) { + if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) { err = i40e_lan_add_device(pf); if (err) dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n", @@ -16155,7 +16107,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * and will report PCI Gen 1 x 1 by default so don't bother * checking them. */ - if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) { + if (!test_bit(I40E_HW_CAP_NO_PCI_LINK_CHECK, pf->hw.caps)) { char speed[PCI_SPEED_SIZE] = "Unknown"; char width[PCI_WIDTH_SIZE] = "Unknown"; @@ -16209,7 +16161,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pf->hw.phy.link_info.requested_speeds = abilities.link_speed; /* set the FEC config due to the board capabilities */ - i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags); + i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, pf->flags); /* get the supported phy types from the fw */ err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL); @@ -16236,10 +16188,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pf->main_vsi_seid); if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) || - (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4)) - pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS; + (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4)) + set_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps); if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722) - pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER; + set_bit(I40E_HW_CAP_CRT_RETIMER, pf->hw.caps); /* print a string summarizing features */ i40e_print_features(pf); @@ -16308,10 +16260,10 @@ static void i40e_remove(struct pci_dev *pdev) usleep_range(1000, 2000); set_bit(__I40E_IN_REMOVE, pf->state); - if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { + if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags)) { set_bit(__I40E_VF_RESETS_DISABLED, pf->state); i40e_free_vfs(pf); - pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; + clear_bit(I40E_FLAG_SRIOV_ENA, pf->flags); } /* no more scheduling of any task */ set_bit(__I40E_SUSPENDED, pf->state); @@ -16366,7 +16318,7 @@ static void i40e_remove(struct pci_dev *pdev) i40e_cloud_filter_exit(pf); /* remove attached clients */ - if (pf->flags & I40E_FLAG_IWARP_ENABLED) { + if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) { ret_code = i40e_lan_del_device(pf); if (ret_code) dev_warn(&pdev->dev, "Failed to delete client device: %d\n", @@ -16385,7 +16337,7 @@ static void i40e_remove(struct pci_dev *pdev) unmap: /* Free MSI/legacy interrupt 0 when in recovery mode. */ if (test_bit(__I40E_RECOVERY_MODE, pf->state) && - !(pf->flags & I40E_FLAG_MSIX_ENABLED)) + !test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) free_irq(pf->pdev->irq, pf); /* shutdown the adminq */ @@ -16601,7 +16553,8 @@ static void i40e_shutdown(struct pci_dev *pdev) */ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); - if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE)) + if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) && + pf->wol_en) i40e_enable_mc_magic_wake(pf); i40e_prep_for_reset(pf); @@ -16613,7 +16566,7 @@ static void i40e_shutdown(struct pci_dev *pdev) /* Free MSI/legacy interrupt 0 when in recovery mode. */ if (test_bit(__I40E_RECOVERY_MODE, pf->state) && - !(pf->flags & I40E_FLAG_MSIX_ENABLED)) + !test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) free_irq(pf->pdev->irq, pf); /* Since we're going to destroy queues during the @@ -16654,7 +16607,8 @@ static int __maybe_unused i40e_suspend(struct device *dev) */ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); - if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE)) + if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) && + pf->wol_en) i40e_enable_mc_magic_wake(pf); /* Since we're going to destroy queues during the diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 77cdbfc19d47..62eb34871094 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -291,7 +291,7 @@ static int i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, static int __i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u16 *data) { - if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) + if (test_bit(I40E_HW_CAP_AQ_SRCTL_ACCESS_ENABLE, hw->caps)) return i40e_read_nvm_word_aq(hw, offset, data); return i40e_read_nvm_word_srctl(hw, offset, data); @@ -310,14 +310,14 @@ int i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, { int ret_code = 0; - if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK) + if (test_bit(I40E_HW_CAP_NVM_READ_REQUIRES_LOCK, hw->caps)) ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (ret_code) return ret_code; ret_code = __i40e_read_nvm_word(hw, offset, data); - if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK) + if (test_bit(I40E_HW_CAP_NVM_READ_REQUIRES_LOCK, hw->caps)) i40e_release_nvm(hw); return ret_code; @@ -499,7 +499,7 @@ static int __i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, u16 *words, u16 *data) { - if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) + if (test_bit(I40E_HW_CAP_AQ_SRCTL_ACCESS_ENABLE, hw->caps)) return i40e_read_nvm_buffer_aq(hw, offset, words, data); return i40e_read_nvm_buffer_srctl(hw, offset, words, data); @@ -521,7 +521,7 @@ int i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, { int ret_code = 0; - if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { + if (test_bit(I40E_HW_CAP_AQ_SRCTL_ACCESS_ENABLE, hw->caps)) { ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (!ret_code) { ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 001162042050..af4269330581 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -501,4 +501,74 @@ i40e_add_pinfo_to_list(struct i40e_hw *hw, /* i40e_ddp */ int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash); +/* Firmware and AdminQ version check helpers */ + +/** + * i40e_is_aq_api_ver_ge + * @hw: pointer to i40e_hw structure + * @maj: API major value to compare + * @min: API minor value to compare + * + * Assert whether current HW API version is greater/equal than provided. + **/ +static inline bool i40e_is_aq_api_ver_ge(struct i40e_hw *hw, u16 maj, u16 min) +{ + return (hw->aq.api_maj_ver > maj || + (hw->aq.api_maj_ver == maj && hw->aq.api_min_ver >= min)); +} + +/** + * i40e_is_aq_api_ver_lt + * @hw: pointer to i40e_hw structure + * @maj: API major value to compare + * @min: API minor value to compare + * + * Assert whether current HW API version is less than provided. + **/ +static inline bool i40e_is_aq_api_ver_lt(struct i40e_hw *hw, u16 maj, u16 min) +{ + return !i40e_is_aq_api_ver_ge(hw, maj, min); +} + +/** + * i40e_is_fw_ver_ge + * @hw: pointer to i40e_hw structure + * @maj: API major value to compare + * @min: API minor value to compare + * + * Assert whether current firmware version is greater/equal than provided. + **/ +static inline bool i40e_is_fw_ver_ge(struct i40e_hw *hw, u16 maj, u16 min) +{ + return (hw->aq.fw_maj_ver > maj || + (hw->aq.fw_maj_ver == maj && hw->aq.fw_min_ver >= min)); +} + +/** + * i40e_is_fw_ver_lt + * @hw: pointer to i40e_hw structure + * @maj: API major value to compare + * @min: API minor value to compare + * + * Assert whether current firmware version is less than provided. + **/ +static inline bool i40e_is_fw_ver_lt(struct i40e_hw *hw, u16 maj, u16 min) +{ + return !i40e_is_fw_ver_ge(hw, maj, min); +} + +/** + * i40e_is_fw_ver_eq + * @hw: pointer to i40e_hw structure + * @maj: API major value to compare + * @min: API minor value to compare + * + * Assert whether current firmware version is equal to provided. + **/ +static inline bool i40e_is_fw_ver_eq(struct i40e_hw *hw, u16 maj, u16 min) +{ + return (hw->aq.fw_maj_ver > maj || + (hw->aq.fw_maj_ver == maj && hw->aq.fw_min_ver == min)); +} + #endif /* _I40E_PROTOTYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 20b77398f060..1cf993a79438 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -35,7 +35,7 @@ enum i40e_ptp_pin { GPIO_4 }; -enum i40e_can_set_pins_t { +enum i40e_can_set_pins { CANT_DO_PINS = -1, CAN_SET_PINS, CAN_DO_PINS @@ -193,7 +193,7 @@ static bool i40e_is_ptp_pin_dev(struct i40e_hw *hw) * return CAN_DO_PINS if pins can be manipulated within a NIC or * return CANT_DO_PINS otherwise. **/ -static enum i40e_can_set_pins_t i40e_can_set_pins(struct i40e_pf *pf) +static enum i40e_can_set_pins i40e_can_set_pins(struct i40e_pf *pf) { if (!i40e_is_ptp_pin_dev(&pf->hw)) { dev_warn(&pf->pdev->dev, @@ -680,7 +680,7 @@ void i40e_ptp_rx_hang(struct i40e_pf *pf) * configured. We don't want to spuriously warn about Rx timestamp * hangs if we don't care about the timestamps. */ - if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx) + if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags) || !pf->ptp_rx) return; spin_lock_bh(&pf->ptp_rx_lock); @@ -733,7 +733,7 @@ void i40e_ptp_tx_hang(struct i40e_pf *pf) { struct sk_buff *skb; - if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx) + if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags) || !pf->ptp_tx) return; /* Nothing to do if we're not already waiting for a timestamp */ @@ -771,7 +771,7 @@ void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf) u32 hi, lo; u64 ns; - if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx) + if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags) || !pf->ptp_tx) return; /* don't attempt to timestamp if we don't have an skb */ @@ -818,7 +818,7 @@ void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index) /* Since we cannot turn off the Rx timestamp logic if the device is * doing Tx timestamping, check if Rx timestamping is configured. */ - if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx) + if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags) || !pf->ptp_rx) return; hw = &pf->hw; @@ -924,7 +924,7 @@ int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr) { struct hwtstamp_config *config = &pf->tstamp_config; - if (!(pf->flags & I40E_FLAG_PTP)) + if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags)) return -EOPNOTSUPP; return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? @@ -1071,7 +1071,7 @@ static void i40e_ptp_set_pins_hw(struct i40e_pf *pf) static int i40e_ptp_set_pins(struct i40e_pf *pf, struct i40e_ptp_pins_settings *pins) { - enum i40e_can_set_pins_t pin_caps = i40e_can_set_pins(pf); + enum i40e_can_set_pins pin_caps = i40e_can_set_pins(pf); int i = 0; if (pin_caps == CANT_DO_PINS) @@ -1211,7 +1211,7 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf, case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: - if (!(pf->hw_features & I40E_HW_PTP_L4_CAPABLE)) + if (!test_bit(I40E_HW_CAP_PTP_L4, pf->hw.caps)) return -ERANGE; pf->ptp_rx = true; tsyntype = I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK | @@ -1225,7 +1225,7 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf, case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: - if (!(pf->hw_features & I40E_HW_PTP_L4_CAPABLE)) + if (!test_bit(I40E_HW_CAP_PTP_L4, pf->hw.caps)) return -ERANGE; fallthrough; case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: @@ -1234,7 +1234,7 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf, pf->ptp_rx = true; tsyntype = I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK | I40E_PRTTSYN_CTL1_TSYNTYPE_V2; - if (pf->hw_features & I40E_HW_PTP_L4_CAPABLE) { + if (test_bit(I40E_HW_CAP_PTP_L4, pf->hw.caps)) { tsyntype |= I40E_PRTTSYN_CTL1_UDP_ENA_MASK; config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; } else { @@ -1308,7 +1308,7 @@ int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr) struct hwtstamp_config config; int err; - if (!(pf->flags & I40E_FLAG_PTP)) + if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags)) return -EOPNOTSUPP; if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) @@ -1426,7 +1426,7 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf) void i40e_ptp_save_hw_time(struct i40e_pf *pf) { /* don't try to access the PTP clock if it's not enabled */ - if (!(pf->flags & I40E_FLAG_PTP)) + if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags)) return; i40e_ptp_gettimex(&pf->ptp_caps, &pf->ptp_prev_hw_time, NULL); @@ -1483,7 +1483,7 @@ void i40e_ptp_init(struct i40e_pf *pf) pf_id = (rd32(hw, I40E_PRTTSYN_CTL0) & I40E_PRTTSYN_CTL0_PF_ID_MASK) >> I40E_PRTTSYN_CTL0_PF_ID_SHIFT; if (hw->pf_id != pf_id) { - pf->flags &= ~I40E_FLAG_PTP; + clear_bit(I40E_FLAG_PTP_ENA, pf->flags); dev_info(&pf->pdev->dev, "%s: PTP not supported on %s\n", __func__, netdev->name); @@ -1504,7 +1504,7 @@ void i40e_ptp_init(struct i40e_pf *pf) if (pf->hw.debug_mask & I40E_DEBUG_LAN) dev_info(&pf->pdev->dev, "PHC enabled\n"); - pf->flags |= I40E_FLAG_PTP; + set_bit(I40E_FLAG_PTP_ENA, pf->flags); /* Ensure the clocks are running. */ regval = rd32(hw, I40E_PRTTSYN_CTL0); @@ -1539,7 +1539,7 @@ void i40e_ptp_stop(struct i40e_pf *pf) struct i40e_hw *hw = &pf->hw; u32 regval; - pf->flags &= ~I40E_FLAG_PTP; + clear_bit(I40E_FLAG_PTP_ENA, pf->flags); pf->ptp_tx = false; pf->ptp_rx = false; diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h index f6671ac79735..14ab642cafdb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_register.h +++ b/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -863,16 +863,6 @@ #define I40E_PFPM_WUFC 0x0006B400 /* Reset: POR */ #define I40E_PFPM_WUFC_MAG_SHIFT 1 #define I40E_PFPM_WUFC_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MAG_SHIFT) -#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */ -#define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */ -#define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */ -#define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */ -#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */ -#define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */ -#define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */ -#define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */ -#define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */ -#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */ #define I40E_VFQF_HLUT_MAX_INDEX 15 @@ -899,6 +889,7 @@ #define I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT 7 #define I40E_GLQF_ORT_FLX_PAYLOAD_MASK I40E_MASK(0x1, I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) #define I40E_GLQF_FDEVICTENA(_i) (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ +#define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03 /* Redefined for X722 family */ #define I40E_GLGEN_STAT_CLEAR 0x00390004 /* Reset: CORER */ #endif /* _I40E_REGISTER_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index dd410b15000f..b82df5bdfac0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -464,7 +464,7 @@ static int i40e_add_del_fdir_tcp(struct i40e_vsi *vsi, &pf->fd_tcp6_filter_cnt); if (add) { - if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && + if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) && I40E_DEBUG_FD & pf->hw.debug_mask) dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n"); set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); @@ -734,7 +734,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u64 qword0_raw, * FD ATR/SB and then re-enable it when there is room. */ if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) { - if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && + if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) && !test_and_set_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) if (I40E_DEBUG_FD & pf->hw.debug_mask) @@ -1071,7 +1071,7 @@ static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi, if (q_vector->arm_wb_state) return; - if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { + if (test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) { val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK | I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */ @@ -1095,7 +1095,7 @@ static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi, **/ void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) { - if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { + if (test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) { u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */ I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | @@ -2699,7 +2699,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, u32 intval; /* If we don't have MSIX, then we only need to re-enable icr0 */ - if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) { + if (!test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) { i40e_irq_dynamic_enable_icr0(vsi->back); return; } @@ -2888,7 +2888,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, u16 i; /* make sure ATR is enabled */ - if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) + if (!test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags)) return; if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) @@ -2933,7 +2933,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, /* Due to lack of space, no more new filters can be programmed */ if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) return; - if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) { + if (test_bit(I40E_FLAG_HW_ATR_EVICT_ENA, pf->flags)) { /* HW ATR eviction will take care of removing filters on FIN * and RST packets. */ @@ -2995,7 +2995,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & I40E_TXD_FLTR_QW1_CNTINDEX_MASK; - if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) + if (test_bit(I40E_FLAG_HW_ATR_EVICT_ENA, pf->flags)) dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK; fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); @@ -3053,7 +3053,7 @@ static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, tx_flags |= I40E_TX_FLAGS_SW_VLAN; } - if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED)) + if (!test_bit(I40E_FLAG_DCB_ENA, tx_ring->vsi->back->flags)) goto out; /* Insert 802.1p priority into VLAN header */ @@ -3229,7 +3229,7 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb, * we are not already transmitting a packet to be timestamped */ pf = i40e_netdev_to_pf(tx_ring->netdev); - if (!(pf->flags & I40E_FLAG_PTP)) + if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags)) return 0; if (pf->ptp_tx && diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 421fe5675584..abf15067eb5d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -58,7 +58,7 @@ static inline u16 i40e_intrl_usec_to_reg(int intrl) * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any * register but instead is a special value meaning "don't update" ITR0/1/2. */ -enum i40e_dyn_idx_t { +enum i40e_dyn_idx { I40E_IDX_ITR0 = 0, I40E_IDX_ITR1 = 1, I40E_IDX_ITR2 = 2, @@ -92,8 +92,8 @@ enum i40e_dyn_idx_t { BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) #define i40e_pf_get_default_rss_hena(pf) \ - (((pf)->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \ - I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA) + (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, (pf)->hw.caps) ? \ + I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA) /* Supported Rx Buffer Sizes (a multiple of 128) */ #define I40E_RXBUFFER_256 256 @@ -306,7 +306,7 @@ struct i40e_rx_queue_stats { u64 page_busy_count; }; -enum i40e_ring_state_t { +enum i40e_ring_state { __I40E_TX_FDIR_INIT_DONE, __I40E_TX_XPS_INIT_DONE, __I40E_RING_STATE_NBITS /* must be last */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index f95bc2a4a838..d9031499697e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -64,9 +64,7 @@ typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *); enum i40e_mac_type { I40E_MAC_UNKNOWN = 0, I40E_MAC_XL710, - I40E_MAC_VF, I40E_MAC_X722, - I40E_MAC_X722_VF, I40E_MAC_GENERIC, }; @@ -272,9 +270,7 @@ struct i40e_mac_info { enum i40e_mac_type type; u8 addr[ETH_ALEN]; u8 perm_addr[ETH_ALEN]; - u8 san_addr[ETH_ALEN]; u8 port_addr[ETH_ALEN]; - u16 max_fcoeq; }; enum i40e_aq_resources_ids { @@ -482,6 +478,36 @@ struct i40e_dcbx_config { struct i40e_dcb_app_priority_table app[I40E_DCBX_MAX_APPS]; }; +enum i40e_hw_flags { + I40E_HW_CAP_AQ_SRCTL_ACCESS_ENABLE, + I40E_HW_CAP_802_1AD, + I40E_HW_CAP_AQ_PHY_ACCESS, + I40E_HW_CAP_NVM_READ_REQUIRES_LOCK, + I40E_HW_CAP_FW_LLDP_STOPPABLE, + I40E_HW_CAP_FW_LLDP_PERSISTENT, + I40E_HW_CAP_AQ_PHY_ACCESS_EXTENDED, + I40E_HW_CAP_X722_FEC_REQUEST, + I40E_HW_CAP_RSS_AQ, + I40E_HW_CAP_128_QP_RSS, + I40E_HW_CAP_ATR_EVICT, + I40E_HW_CAP_WB_ON_ITR, + I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, + I40E_HW_CAP_NO_PCI_LINK_CHECK, + I40E_HW_CAP_100M_SGMII, + I40E_HW_CAP_NO_DCB_SUPPORT, + I40E_HW_CAP_USE_SET_LLDP_MIB, + I40E_HW_CAP_GENEVE_OFFLOAD, + I40E_HW_CAP_PTP_L4, + I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, + I40E_HW_CAP_CRT_RETIMER, + I40E_HW_CAP_OUTER_UDP_CSUM, + I40E_HW_CAP_PHY_CONTROLS_LEDS, + I40E_HW_CAP_STOP_FW_LLDP, + I40E_HW_CAP_PORT_ID_VALID, + I40E_HW_CAP_RESTART_AUTONEG, + I40E_HW_CAPS_NBITS, +}; + /* Port hardware description */ struct i40e_hw { u8 __iomem *hw_addr; @@ -546,16 +572,7 @@ struct i40e_hw { struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */ struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */ -#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0) -#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1) -#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2) -#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3) -#define I40E_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4) -#define I40E_HW_FLAG_FW_LLDP_PERSISTENT BIT_ULL(5) -#define I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED BIT_ULL(6) -#define I40E_HW_FLAG_DROP_MODE BIT_ULL(7) -#define I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE BIT_ULL(8) - u64 flags; + DECLARE_BITMAP(caps, I40E_HW_CAPS_NBITS); /* Used in set switch config AQ command */ u16 switch_tag; @@ -567,12 +584,6 @@ struct i40e_hw { char err_str[16]; }; -static inline bool i40e_is_vf(struct i40e_hw *hw) -{ - return (hw->mac.type == I40E_MAC_VF || - hw->mac.type == I40E_MAC_X722_VF); -} - struct i40e_driver_version { u8 major_version; u8 minor_version; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 3f99eb198245..37cca484abb8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1808,7 +1808,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) if (pci_num_vf(pf->pdev) != num_alloc_vfs) { ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); if (ret) { - pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; + clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); pf->num_alloc_vfs = 0; goto err_iov; } @@ -1919,8 +1919,8 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) } if (num_vfs) { - if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { - pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) { + set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG); } ret = i40e_pci_sriov_enable(pdev, num_vfs); @@ -1929,7 +1929,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) if (!pci_vfs_assigned(pf->pdev)) { i40e_free_vfs(pf); - pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; + clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG); } else { dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); @@ -2137,14 +2137,14 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF; } else { - if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) && + if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps) && (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ; else vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG; } - if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) { + if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, pf->hw.caps)) { if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; @@ -2153,12 +2153,12 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP; - if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) && + if (test_bit(I40E_HW_CAP_OUTER_UDP_CSUM, pf->hw.caps) && (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) { - if (pf->flags & I40E_FLAG_MFP_ENABLED) { + if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) { dev_err(&pf->pdev->dev, "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n", vf->vf_id); @@ -2168,7 +2168,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING; } - if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) { + if (test_bit(I40E_HW_CAP_WB_ON_ITR, pf->hw.caps)) { if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; @@ -4843,7 +4843,7 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting) goto out; } - if (pf->flags & I40E_FLAG_MFP_ENABLED) { + if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) { dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n"); ret = -EINVAL; goto out; diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 63b45c61cc4a..db8188c7ac4b 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -313,7 +313,8 @@ struct iavf_adapter { #define IAVF_FLAG_AQ_SET_HENA BIT_ULL(12) #define IAVF_FLAG_AQ_SET_RSS_KEY BIT_ULL(13) #define IAVF_FLAG_AQ_SET_RSS_LUT BIT_ULL(14) -#define IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE BIT_ULL(15) +#define IAVF_FLAG_AQ_SET_RSS_HFUNC BIT_ULL(15) +#define IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE BIT_ULL(16) #define IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT_ULL(19) #define IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT_ULL(20) #define IAVF_FLAG_AQ_ENABLE_CHANNELS BIT_ULL(21) @@ -415,6 +416,7 @@ struct iavf_adapter { struct iavf_vsi vsi; u32 aq_wait_count; /* RSS stuff */ + enum virtchnl_rss_algorithm hfunc; u64 hena; u16 rss_key_size; u16 rss_lut_size; @@ -540,6 +542,7 @@ void iavf_get_hena(struct iavf_adapter *adapter); void iavf_set_hena(struct iavf_adapter *adapter); void iavf_set_rss_key(struct iavf_adapter *adapter); void iavf_set_rss_lut(struct iavf_adapter *adapter); +void iavf_set_rss_hfunc(struct iavf_adapter *adapter); void iavf_enable_vlan_stripping(struct iavf_adapter *adapter); void iavf_disable_vlan_stripping(struct iavf_adapter *adapter); void iavf_virtchnl_completion(struct iavf_adapter *adapter, diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.c b/drivers/net/ethernet/intel/iavf/iavf_adminq.c index 9ffbd24d83cb..82fcd18ad660 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_adminq.c +++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.c @@ -8,27 +8,6 @@ #include "iavf_prototype.h" /** - * iavf_adminq_init_regs - Initialize AdminQ registers - * @hw: pointer to the hardware structure - * - * This assumes the alloc_asq and alloc_arq functions have already been called - **/ -static void iavf_adminq_init_regs(struct iavf_hw *hw) -{ - /* set head and tail registers in our local struct */ - hw->aq.asq.tail = IAVF_VF_ATQT1; - hw->aq.asq.head = IAVF_VF_ATQH1; - hw->aq.asq.len = IAVF_VF_ATQLEN1; - hw->aq.asq.bal = IAVF_VF_ATQBAL1; - hw->aq.asq.bah = IAVF_VF_ATQBAH1; - hw->aq.arq.tail = IAVF_VF_ARQT1; - hw->aq.arq.head = IAVF_VF_ARQH1; - hw->aq.arq.len = IAVF_VF_ARQLEN1; - hw->aq.arq.bal = IAVF_VF_ARQBAL1; - hw->aq.arq.bah = IAVF_VF_ARQBAH1; -} - -/** * iavf_alloc_adminq_asq_ring - Allocate Admin Queue send rings * @hw: pointer to the hardware structure **/ @@ -259,17 +238,17 @@ static enum iavf_status iavf_config_asq_regs(struct iavf_hw *hw) u32 reg = 0; /* Clear Head and Tail */ - wr32(hw, hw->aq.asq.head, 0); - wr32(hw, hw->aq.asq.tail, 0); + wr32(hw, IAVF_VF_ATQH1, 0); + wr32(hw, IAVF_VF_ATQT1, 0); /* set starting point */ - wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | + wr32(hw, IAVF_VF_ATQLEN1, (hw->aq.num_asq_entries | IAVF_VF_ATQLEN1_ATQENABLE_MASK)); - wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa)); - wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa)); + wr32(hw, IAVF_VF_ATQBAL1, lower_32_bits(hw->aq.asq.desc_buf.pa)); + wr32(hw, IAVF_VF_ATQBAH1, upper_32_bits(hw->aq.asq.desc_buf.pa)); /* Check one register to verify that config was applied */ - reg = rd32(hw, hw->aq.asq.bal); + reg = rd32(hw, IAVF_VF_ATQBAL1); if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa)) ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR; @@ -288,20 +267,20 @@ static enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw) u32 reg = 0; /* Clear Head and Tail */ - wr32(hw, hw->aq.arq.head, 0); - wr32(hw, hw->aq.arq.tail, 0); + wr32(hw, IAVF_VF_ARQH1, 0); + wr32(hw, IAVF_VF_ARQT1, 0); /* set starting point */ - wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | + wr32(hw, IAVF_VF_ARQLEN1, (hw->aq.num_arq_entries | IAVF_VF_ARQLEN1_ARQENABLE_MASK)); - wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa)); - wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa)); + wr32(hw, IAVF_VF_ARQBAL1, lower_32_bits(hw->aq.arq.desc_buf.pa)); + wr32(hw, IAVF_VF_ARQBAH1, upper_32_bits(hw->aq.arq.desc_buf.pa)); /* Update tail in the HW to post pre-allocated buffers */ - wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1); + wr32(hw, IAVF_VF_ARQT1, hw->aq.num_arq_entries - 1); /* Check one register to verify that config was applied */ - reg = rd32(hw, hw->aq.arq.bal); + reg = rd32(hw, IAVF_VF_ARQBAL1); if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa)) ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR; @@ -455,11 +434,11 @@ static enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw) } /* Stop firmware AdminQ processing */ - wr32(hw, hw->aq.asq.head, 0); - wr32(hw, hw->aq.asq.tail, 0); - wr32(hw, hw->aq.asq.len, 0); - wr32(hw, hw->aq.asq.bal, 0); - wr32(hw, hw->aq.asq.bah, 0); + wr32(hw, IAVF_VF_ATQH1, 0); + wr32(hw, IAVF_VF_ATQT1, 0); + wr32(hw, IAVF_VF_ATQLEN1, 0); + wr32(hw, IAVF_VF_ATQBAL1, 0); + wr32(hw, IAVF_VF_ATQBAH1, 0); hw->aq.asq.count = 0; /* to indicate uninitialized queue */ @@ -489,11 +468,11 @@ static enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw) } /* Stop firmware AdminQ processing */ - wr32(hw, hw->aq.arq.head, 0); - wr32(hw, hw->aq.arq.tail, 0); - wr32(hw, hw->aq.arq.len, 0); - wr32(hw, hw->aq.arq.bal, 0); - wr32(hw, hw->aq.arq.bah, 0); + wr32(hw, IAVF_VF_ARQH1, 0); + wr32(hw, IAVF_VF_ARQT1, 0); + wr32(hw, IAVF_VF_ARQLEN1, 0); + wr32(hw, IAVF_VF_ARQBAL1, 0); + wr32(hw, IAVF_VF_ARQBAH1, 0); hw->aq.arq.count = 0; /* to indicate uninitialized queue */ @@ -529,9 +508,6 @@ enum iavf_status iavf_init_adminq(struct iavf_hw *hw) goto init_adminq_exit; } - /* Set up register offsets */ - iavf_adminq_init_regs(hw); - /* setup ASQ command write back timeout */ hw->aq.asq_cmd_timeout = IAVF_ASQ_CMD_TIMEOUT; @@ -587,9 +563,9 @@ static u16 iavf_clean_asq(struct iavf_hw *hw) desc = IAVF_ADMINQ_DESC(*asq, ntc); details = IAVF_ADMINQ_DETAILS(*asq, ntc); - while (rd32(hw, hw->aq.asq.head) != ntc) { + while (rd32(hw, IAVF_VF_ATQH1) != ntc) { iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, - "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head)); + "ntc %d head %d.\n", ntc, rd32(hw, IAVF_VF_ATQH1)); if (details->callback) { IAVF_ADMINQ_CALLBACK cb_func = @@ -624,7 +600,7 @@ bool iavf_asq_done(struct iavf_hw *hw) /* AQ designers suggest use of head for better * timing reliability than DD bit */ - return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use; + return rd32(hw, IAVF_VF_ATQH1) == hw->aq.asq.next_to_use; } /** @@ -663,7 +639,7 @@ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw, hw->aq.asq_last_status = IAVF_AQ_RC_OK; - val = rd32(hw, hw->aq.asq.head); + val = rd32(hw, IAVF_VF_ATQH1); if (val >= hw->aq.num_asq_entries) { iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: head overrun at %d\n", val); @@ -755,7 +731,7 @@ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw, if (hw->aq.asq.next_to_use == hw->aq.asq.count) hw->aq.asq.next_to_use = 0; if (!details->postpone) - wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use); + wr32(hw, IAVF_VF_ATQT1, hw->aq.asq.next_to_use); /* if cmd_details are not defined or async flag is not set, * we need to wait for desc write back @@ -810,7 +786,7 @@ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw, /* update the error if time out occurred */ if ((!cmd_completed) && (!details->async && !details->postpone)) { - if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) { + if (rd32(hw, IAVF_VF_ATQLEN1) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) { iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: AQ Critical error.\n"); status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR; @@ -878,7 +854,7 @@ enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw, } /* set next_to_use to head */ - ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK; + ntu = rd32(hw, IAVF_VF_ARQH1) & IAVF_VF_ARQH1_ARQH_MASK; if (ntu == ntc) { /* nothing to do - shouldn't need to update ring's values */ ret_code = IAVF_ERR_ADMIN_QUEUE_NO_WORK; @@ -926,7 +902,7 @@ enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw, desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa)); /* set tail = the last cleaned desc index. */ - wr32(hw, hw->aq.arq.tail, ntc); + wr32(hw, IAVF_VF_ARQT1, ntc); /* ntc is updated to tail + 1 */ ntc++; if (ntc == hw->aq.num_arq_entries) diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.h b/drivers/net/ethernet/intel/iavf/iavf_adminq.h index 1f60518eb0e5..406506f64bdd 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_adminq.h +++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.h @@ -29,13 +29,6 @@ struct iavf_adminq_ring { /* used for interrupt processing */ u16 next_to_use; u16 next_to_clean; - - /* used for queue tracking */ - u32 head; - u32 tail; - u32 len; - u32 bah; - u32 bal; }; /* ASQ transaction details */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c index 6edbf134b73f..a9e1da35e248 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c +++ b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c @@ -95,17 +95,21 @@ iavf_fill_adv_rss_sctp_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds) * @rss_cfg: the virtchnl message to be filled with RSS configuration setting * @packet_hdrs: the RSS configuration protocol header types * @hash_flds: the RSS configuration protocol hash fields + * @symm: if true, symmetric hash is required * * Returns 0 if the RSS configuration virtchnl message is filled successfully */ int iavf_fill_adv_rss_cfg_msg(struct virtchnl_rss_cfg *rss_cfg, - u32 packet_hdrs, u64 hash_flds) + u32 packet_hdrs, u64 hash_flds, bool symm) { struct virtchnl_proto_hdrs *proto_hdrs = &rss_cfg->proto_hdrs; struct virtchnl_proto_hdr *hdr; - rss_cfg->rss_algorithm = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC; + if (symm) + rss_cfg->rss_algorithm = VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC; + else + rss_cfg->rss_algorithm = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC; proto_hdrs->tunnel_level = 0; /* always outer layer */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h index 4d3be11af7aa..e31eb2afebea 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h +++ b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h @@ -80,13 +80,14 @@ struct iavf_adv_rss { u32 packet_hdrs; u64 hash_flds; + bool symm; struct virtchnl_rss_cfg cfg_msg; }; int iavf_fill_adv_rss_cfg_msg(struct virtchnl_rss_cfg *rss_cfg, - u32 packet_hdrs, u64 hash_flds); + u32 packet_hdrs, u64 hash_flds, bool symm); struct iavf_adv_rss * iavf_find_adv_rss_cfg_by_hdrs(struct iavf_adapter *adapter, u32 packet_hdrs); void diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c index 8091e6feca01..89d2bce529ae 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_common.c +++ b/drivers/net/ethernet/intel/iavf/iavf_common.c @@ -279,11 +279,11 @@ void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask, void *desc, **/ bool iavf_check_asq_alive(struct iavf_hw *hw) { - if (hw->aq.asq.len) - return !!(rd32(hw, hw->aq.asq.len) & - IAVF_VF_ATQLEN1_ATQENABLE_MASK); - else + /* Check if the queue is initialized */ + if (!hw->aq.asq.count) return false; + + return !!(rd32(hw, IAVF_VF_ATQLEN1) & IAVF_VF_ATQLEN1_ATQENABLE_MASK); } /** diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index dc499fe7734e..cdb4849f5db4 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -396,8 +396,7 @@ static void iavf_get_priv_flag_strings(struct net_device *netdev, u8 *data) unsigned int i; for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) - ethtool_sprintf(&data, "%s", - iavf_gstrings_priv_flags[i].flag_string); + ethtool_puts(&data, iavf_gstrings_priv_flags[i].flag_string); } /** @@ -1436,16 +1435,15 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx spin_lock_bh(&adapter->fdir_fltr_lock); iavf_fdir_list_add_fltr(adapter, fltr); adapter->fdir_active_fltr++; - if (adapter->link_up) { + + if (adapter->link_up) fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST; - adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER; - } else { + else fltr->state = IAVF_FDIR_FLTR_INACTIVE; - } spin_unlock_bh(&adapter->fdir_fltr_lock); if (adapter->link_up) - mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); + iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_FDIR_FILTER); ret: if (err && fltr) kfree(fltr); @@ -1475,7 +1473,6 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx if (fltr) { if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) { fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST; - adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; } else if (fltr->state == IAVF_FDIR_FLTR_INACTIVE) { list_del(&fltr->list); kfree(fltr); @@ -1490,7 +1487,7 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx spin_unlock_bh(&adapter->fdir_fltr_lock); if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST) - mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); + iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_FDIR_FILTER); return err; } @@ -1541,11 +1538,12 @@ static u32 iavf_adv_rss_parse_hdrs(struct ethtool_rxnfc *cmd) /** * iavf_adv_rss_parse_hash_flds - parses hash fields from RSS hash input * @cmd: ethtool rxnfc command + * @symm: true if Symmetric Topelitz is set * * This function parses the rxnfc command and returns intended hash fields for * RSS configuration */ -static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd) +static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd, bool symm) { u64 hfld = IAVF_ADV_RSS_HASH_INVALID; @@ -1617,17 +1615,20 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter, struct iavf_adv_rss *rss_old, *rss_new; bool rss_new_add = false; int count = 50, err = 0; + bool symm = false; u64 hash_flds; u32 hdrs; if (!ADV_RSS_SUPPORT(adapter)) return -EOPNOTSUPP; + symm = !!(adapter->hfunc == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC); + hdrs = iavf_adv_rss_parse_hdrs(cmd); if (hdrs == IAVF_ADV_RSS_FLOW_SEG_HDR_NONE) return -EINVAL; - hash_flds = iavf_adv_rss_parse_hash_flds(cmd); + hash_flds = iavf_adv_rss_parse_hash_flds(cmd, symm); if (hash_flds == IAVF_ADV_RSS_HASH_INVALID) return -EINVAL; @@ -1635,7 +1636,8 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter, if (!rss_new) return -ENOMEM; - if (iavf_fill_adv_rss_cfg_msg(&rss_new->cfg_msg, hdrs, hash_flds)) { + if (iavf_fill_adv_rss_cfg_msg(&rss_new->cfg_msg, hdrs, hash_flds, + symm)) { kfree(rss_new); return -EINVAL; } @@ -1654,12 +1656,13 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter, if (rss_old) { if (rss_old->state != IAVF_ADV_RSS_ACTIVE) { err = -EBUSY; - } else if (rss_old->hash_flds != hash_flds) { + } else if (rss_old->hash_flds != hash_flds || + rss_old->symm != symm) { rss_old->state = IAVF_ADV_RSS_ADD_REQUEST; rss_old->hash_flds = hash_flds; + rss_old->symm = symm; memcpy(&rss_old->cfg_msg, &rss_new->cfg_msg, sizeof(rss_new->cfg_msg)); - adapter->aq_required |= IAVF_FLAG_AQ_ADD_ADV_RSS_CFG; } else { err = -EEXIST; } @@ -1668,13 +1671,13 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter, rss_new->state = IAVF_ADV_RSS_ADD_REQUEST; rss_new->packet_hdrs = hdrs; rss_new->hash_flds = hash_flds; + rss_new->symm = symm; list_add_tail(&rss_new->list, &adapter->adv_rss_list_head); - adapter->aq_required |= IAVF_FLAG_AQ_ADD_ADV_RSS_CFG; } spin_unlock_bh(&adapter->adv_rss_lock); if (!err) - mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); + iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_ADV_RSS_CFG); mutex_unlock(&adapter->crit_lock); @@ -1908,27 +1911,27 @@ static u32 iavf_get_rxfh_indir_size(struct net_device *netdev) /** * iavf_get_rxfh - get the rx flow hash indirection table * @netdev: network interface device structure - * @indir: indirection table - * @key: hash key - * @hfunc: hash function in use + * @rxfh: pointer to param struct (indir, key, hfunc) * * Reads the indirection table directly from the hardware. Always returns 0. **/ -static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, - u8 *hfunc) +static int iavf_get_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh) { struct iavf_adapter *adapter = netdev_priv(netdev); u16 i; - if (hfunc) - *hfunc = ETH_RSS_HASH_TOP; - if (key) - memcpy(key, adapter->rss_key, adapter->rss_key_size); + rxfh->hfunc = ETH_RSS_HASH_TOP; + if (adapter->hfunc == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC) + rxfh->input_xfrm |= RXH_XFRM_SYM_XOR; + + if (rxfh->key) + memcpy(rxfh->key, adapter->rss_key, adapter->rss_key_size); - if (indir) + if (rxfh->indir) /* Each 32 bits pointed by 'indir' is stored with a lut entry */ for (i = 0; i < adapter->rss_lut_size; i++) - indir[i] = (u32)adapter->rss_lut[i]; + rxfh->indir[i] = (u32)adapter->rss_lut[i]; return 0; } @@ -1936,33 +1939,46 @@ static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, /** * iavf_set_rxfh - set the rx flow hash indirection table * @netdev: network interface device structure - * @indir: indirection table - * @key: hash key - * @hfunc: hash function to use + * @rxfh: pointer to param struct (indir, key, hfunc) + * @extack: extended ACK from the Netlink message * * Returns -EINVAL if the table specifies an invalid queue id, otherwise * returns 0 after programming the table. **/ -static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir, - const u8 *key, const u8 hfunc) +static int iavf_set_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) { struct iavf_adapter *adapter = netdev_priv(netdev); u16 i; /* Only support toeplitz hash function */ - if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && + rxfh->hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; - if (!key && !indir) + if ((rxfh->input_xfrm & RXH_XFRM_SYM_XOR) && + adapter->hfunc != VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC) { + if (!ADV_RSS_SUPPORT(adapter)) + return -EOPNOTSUPP; + adapter->hfunc = VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC; + adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_HFUNC; + } else if (!(rxfh->input_xfrm & RXH_XFRM_SYM_XOR) && + adapter->hfunc != VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC) { + adapter->hfunc = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC; + adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_HFUNC; + } + + if (!rxfh->key && !rxfh->indir) return 0; - if (key) - memcpy(adapter->rss_key, key, adapter->rss_key_size); + if (rxfh->key) + memcpy(adapter->rss_key, rxfh->key, adapter->rss_key_size); - if (indir) { + if (rxfh->indir) { /* Each 32 bits pointed by 'indir' is stored with a lut entry */ for (i = 0; i < adapter->rss_lut_size; i++) - adapter->rss_lut[i] = (u8)(indir[i]); + adapter->rss_lut[i] = (u8)(rxfh->indir[i]); } return iavf_config_rss(adapter); @@ -1971,6 +1987,7 @@ static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir, static const struct ethtool_ops iavf_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE, + .cap_rss_sym_xor_supported = true, .get_drvinfo = iavf_get_drvinfo, .get_link = ethtool_op_get_link, .get_ringparam = iavf_get_ringparam, diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index e8d5b889addc..335fd13e86f7 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -1038,13 +1038,12 @@ static int iavf_replace_primary_mac(struct iavf_adapter *adapter, */ new_f->is_primary = true; new_f->add = true; - adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; ether_addr_copy(hw->mac.addr, new_mac); spin_unlock_bh(&adapter->mac_vlan_list_lock); /* schedule the watchdog task to immediately process the request */ - mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); + iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_MAC_FILTER); return 0; } @@ -1263,8 +1262,7 @@ static void iavf_up_complete(struct iavf_adapter *adapter) iavf_napi_enable_all(adapter); - adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES; - mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); + iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ENABLE_QUEUES); } /** @@ -1420,8 +1418,7 @@ void iavf_down(struct iavf_adapter *adapter) adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG; } - adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES; - mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); + iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DISABLE_QUEUES); } /** @@ -2150,6 +2147,10 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter) iavf_set_rss_lut(adapter); return 0; } + if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_HFUNC) { + iavf_set_rss_hfunc(adapter); + return 0; + } if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE) { iavf_set_promiscuous(adapter); @@ -2318,10 +2319,8 @@ iavf_set_vlan_offload_features(struct iavf_adapter *adapter, } } - if (aq_required) { - adapter->aq_required |= aq_required; - mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); - } + if (aq_required) + iavf_schedule_aq_request(adapter, aq_required); } /** @@ -3234,7 +3233,7 @@ static void iavf_adminq_task(struct work_struct *work) goto freedom; /* check for error indications */ - val = rd32(hw, hw->aq.arq.len); + val = rd32(hw, IAVF_VF_ARQLEN1); if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */ goto freedom; oldval = val; @@ -3251,9 +3250,9 @@ static void iavf_adminq_task(struct work_struct *work) val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK; } if (oldval != val) - wr32(hw, hw->aq.arq.len, val); + wr32(hw, IAVF_VF_ARQLEN1, val); - val = rd32(hw, hw->aq.asq.len); + val = rd32(hw, IAVF_VF_ATQLEN1); oldval = val; if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) { dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n"); @@ -3268,7 +3267,7 @@ static void iavf_adminq_task(struct work_struct *work) val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK; } if (oldval != val) - wr32(hw, hw->aq.asq.len, val); + wr32(hw, IAVF_VF_ATQLEN1, val); freedom: kfree(event.msg_buf); diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 2d9366be0ec5..22f2df7c460b 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -1142,6 +1142,34 @@ void iavf_set_rss_lut(struct iavf_adapter *adapter) } /** + * iavf_set_rss_hfunc + * @adapter: adapter structure + * + * Request the PF to set our RSS Hash function + **/ +void iavf_set_rss_hfunc(struct iavf_adapter *adapter) +{ + struct virtchnl_rss_hfunc *vrh; + int len = sizeof(*vrh); + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot set RSS Hash function, command %d pending\n", + adapter->current_op); + return; + } + vrh = kzalloc(len, GFP_KERNEL); + if (!vrh) + return; + vrh->vsi_id = adapter->vsi.id; + vrh->rss_algorithm = adapter->hfunc; + adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_HFUNC; + adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_HFUNC; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_HFUNC, (u8 *)vrh, len); + kfree(vrh); +} + +/** * iavf_enable_vlan_stripping * @adapter: adapter structure * @@ -2190,6 +2218,19 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, dev_warn(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n", iavf_stat_str(&adapter->hw, v_retval)); break; + case VIRTCHNL_OP_CONFIG_RSS_HFUNC: + dev_warn(&adapter->pdev->dev, "Failed to configure hash function, error %s\n", + iavf_stat_str(&adapter->hw, v_retval)); + + if (adapter->hfunc == + VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC) + adapter->hfunc = + VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC; + else + adapter->hfunc = + VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC; + + break; default: dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n", v_retval, iavf_stat_str(&adapter->hw, v_retval), diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile index 0679907980f7..cddd82d4ca0f 100644 --- a/drivers/net/ethernet/intel/ice/Makefile +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -34,7 +34,9 @@ ice-y := ice_main.o \ ice_lag.o \ ice_ethtool.o \ ice_repr.o \ - ice_tc_lib.o + ice_tc_lib.o \ + ice_fwlog.o \ + ice_debugfs.o ice-$(CONFIG_PCI_IOV) += \ ice_sriov.o \ ice_virtchnl.o \ @@ -49,3 +51,4 @@ ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o ice-$(CONFIG_ICE_SWITCHDEV) += ice_eswitch.o ice_eswitch_br.o ice-$(CONFIG_GNSS) += ice_gnss.o +ice-$(CONFIG_ICE_HWMON) += ice_hwmon.o diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 351e0d36df44..2defac6d9168 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -360,6 +360,7 @@ struct ice_vsi { /* RSS config */ u16 rss_table_size; /* HW RSS table size */ u16 rss_size; /* Allocated RSS queues */ + u8 rss_hfunc; /* User configured hash type */ u8 *rss_hkey_user; /* User configured hash keys */ u8 *rss_lut_user; /* User configured lookup table entries */ u8 rss_lut_type; /* used to configure Get/Set RSS LUT AQ call */ @@ -522,11 +523,18 @@ enum ice_misc_thread_tasks { ICE_MISC_THREAD_NBITS /* must be last */ }; -struct ice_switchdev_info { +struct ice_eswitch { struct ice_vsi *control_vsi; struct ice_vsi *uplink_vsi; struct ice_esw_br_offloads *br_offloads; + struct xarray reprs; bool is_running; + /* struct to allow cp queues management optimization */ + struct { + int to_reach; + int value; + bool is_reaching; + } qs; }; struct ice_agg_node { @@ -563,6 +571,10 @@ struct ice_pf { struct ice_vsi_stats **vsi_stats; struct ice_sw *first_sw; /* first switch created by firmware */ u16 eswitch_mode; /* current mode of eswitch */ + struct dentry *ice_debugfs_pf; + struct dentry *ice_debugfs_pf_fwlog; + /* keep track of all the dentrys for FW log modules */ + struct dentry **ice_debugfs_pf_fwlog_modules; struct ice_vfs vfs; DECLARE_BITMAP(features, ICE_F_MAX); DECLARE_BITMAP(state, ICE_STATE_NBITS); @@ -637,7 +649,7 @@ struct ice_pf { struct ice_link_default_override_tlv link_dflt_override; struct ice_lag *lag; /* Link Aggregation information */ - struct ice_switchdev_info switchdev; + struct ice_eswitch eswitch; struct ice_esw_br_port *br_port; #define ICE_INVALID_AGG_NODE_ID 0 @@ -648,6 +660,7 @@ struct ice_pf { #define ICE_MAX_VF_AGG_NODES 32 struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES]; struct ice_dplls dplls; + struct device *hwmon_dev; }; extern struct workqueue_struct *ice_lag_wq; @@ -846,7 +859,7 @@ static inline struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num) */ static inline bool ice_is_switchdev_running(struct ice_pf *pf) { - return pf->switchdev.is_running; + return pf->eswitch.is_running; } #define ICE_FD_STAT_CTR_BLOCK_COUNT 256 @@ -881,6 +894,11 @@ static inline bool ice_is_adq_active(struct ice_pf *pf) return false; } +void ice_debugfs_fwlog_init(struct ice_pf *pf); +void ice_debugfs_init(void); +void ice_debugfs_exit(void); +void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module); + bool netif_is_ice(const struct net_device *dev); int ice_vsi_setup_tx_rings(struct ice_vsi *vsi); int ice_vsi_setup_rx_rings(struct ice_vsi *vsi); @@ -912,6 +930,7 @@ int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size); int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size); int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed); int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed); +int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc); void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size); int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset); void ice_print_link_msg(struct ice_vsi *vsi, bool isup); @@ -989,4 +1008,6 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf) set_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags); clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); } + +extern const struct xdp_metadata_ops ice_xdp_md_ops; #endif /* _ICE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index d7fdb7ba7268..12c510bb1d9b 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -117,6 +117,7 @@ struct ice_aqc_list_caps_elem { #define ICE_AQC_CAPS_NET_VER 0x004C #define ICE_AQC_CAPS_PENDING_NET_VER 0x004D #define ICE_AQC_CAPS_RDMA 0x0051 +#define ICE_AQC_CAPS_SENSOR_READING 0x0067 #define ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE 0x0076 #define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077 #define ICE_AQC_CAPS_NVM_MGMT 0x0080 @@ -491,10 +492,10 @@ struct ice_aqc_vsi_props { #define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M (0xF << ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S) #define ICE_AQ_VSI_Q_OPT_RSS_HASH_S 6 #define ICE_AQ_VSI_Q_OPT_RSS_HASH_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) -#define ICE_AQ_VSI_Q_OPT_RSS_TPLZ (0x0 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) -#define ICE_AQ_VSI_Q_OPT_RSS_SYM_TPLZ (0x1 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) -#define ICE_AQ_VSI_Q_OPT_RSS_XOR (0x2 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) -#define ICE_AQ_VSI_Q_OPT_RSS_JHASH (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) +#define ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ 0x0U +#define ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ 0x1U +#define ICE_AQ_VSI_Q_OPT_RSS_HASH_XOR 0x2U +#define ICE_AQ_VSI_Q_OPT_RSS_HASH_JHASH 0x3U u8 q_opt_tc; #define ICE_AQ_VSI_Q_OPT_TC_OVR_S 0 #define ICE_AQ_VSI_Q_OPT_TC_OVR_M (0x1F << ICE_AQ_VSI_Q_OPT_TC_OVR_S) @@ -1413,6 +1414,30 @@ struct ice_aqc_get_phy_rec_clk_out { __le16 node_handle; }; +/* Get sensor reading (direct 0x0632) */ +struct ice_aqc_get_sensor_reading { + u8 sensor; + u8 format; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +/* Get sensor reading response (direct 0x0632) */ +struct ice_aqc_get_sensor_reading_resp { + union { + u8 raw[8]; + /* Output data for sensor 0x00, format 0x00 */ + struct _packed { + s8 temp; + u8 temp_warning_threshold; + u8 temp_critical_threshold; + u8 temp_fatal_threshold; + u8 reserved[4]; + } s0f0; + } data; +}; + struct ice_aqc_link_topo_params { u8 lport_num; u8 lport_num_valid; @@ -2069,78 +2094,6 @@ struct ice_aqc_add_rdma_qset_data { struct ice_aqc_add_tx_rdma_qset_entry rdma_qsets[]; }; -/* Configure Firmware Logging Command (indirect 0xFF09) - * Logging Information Read Response (indirect 0xFF10) - * Note: The 0xFF10 command has no input parameters. - */ -struct ice_aqc_fw_logging { - u8 log_ctrl; -#define ICE_AQC_FW_LOG_AQ_EN BIT(0) -#define ICE_AQC_FW_LOG_UART_EN BIT(1) - u8 rsvd0; - u8 log_ctrl_valid; /* Not used by 0xFF10 Response */ -#define ICE_AQC_FW_LOG_AQ_VALID BIT(0) -#define ICE_AQC_FW_LOG_UART_VALID BIT(1) - u8 rsvd1[5]; - __le32 addr_high; - __le32 addr_low; -}; - -enum ice_aqc_fw_logging_mod { - ICE_AQC_FW_LOG_ID_GENERAL = 0, - ICE_AQC_FW_LOG_ID_CTRL, - ICE_AQC_FW_LOG_ID_LINK, - ICE_AQC_FW_LOG_ID_LINK_TOPO, - ICE_AQC_FW_LOG_ID_DNL, - ICE_AQC_FW_LOG_ID_I2C, - ICE_AQC_FW_LOG_ID_SDP, - ICE_AQC_FW_LOG_ID_MDIO, - ICE_AQC_FW_LOG_ID_ADMINQ, - ICE_AQC_FW_LOG_ID_HDMA, - ICE_AQC_FW_LOG_ID_LLDP, - ICE_AQC_FW_LOG_ID_DCBX, - ICE_AQC_FW_LOG_ID_DCB, - ICE_AQC_FW_LOG_ID_NETPROXY, - ICE_AQC_FW_LOG_ID_NVM, - ICE_AQC_FW_LOG_ID_AUTH, - ICE_AQC_FW_LOG_ID_VPD, - ICE_AQC_FW_LOG_ID_IOSF, - ICE_AQC_FW_LOG_ID_PARSER, - ICE_AQC_FW_LOG_ID_SW, - ICE_AQC_FW_LOG_ID_SCHEDULER, - ICE_AQC_FW_LOG_ID_TXQ, - ICE_AQC_FW_LOG_ID_RSVD, - ICE_AQC_FW_LOG_ID_POST, - ICE_AQC_FW_LOG_ID_WATCHDOG, - ICE_AQC_FW_LOG_ID_TASK_DISPATCH, - ICE_AQC_FW_LOG_ID_MNG, - ICE_AQC_FW_LOG_ID_MAX, -}; - -/* Defines for both above FW logging command/response buffers */ -#define ICE_AQC_FW_LOG_ID_S 0 -#define ICE_AQC_FW_LOG_ID_M (0xFFF << ICE_AQC_FW_LOG_ID_S) - -#define ICE_AQC_FW_LOG_CONF_SUCCESS 0 /* Used by response */ -#define ICE_AQC_FW_LOG_CONF_BAD_INDX BIT(12) /* Used by response */ - -#define ICE_AQC_FW_LOG_EN_S 12 -#define ICE_AQC_FW_LOG_EN_M (0xF << ICE_AQC_FW_LOG_EN_S) -#define ICE_AQC_FW_LOG_INFO_EN BIT(12) /* Used by command */ -#define ICE_AQC_FW_LOG_INIT_EN BIT(13) /* Used by command */ -#define ICE_AQC_FW_LOG_FLOW_EN BIT(14) /* Used by command */ -#define ICE_AQC_FW_LOG_ERR_EN BIT(15) /* Used by command */ - -/* Get/Clear FW Log (indirect 0xFF11) */ -struct ice_aqc_get_clear_fw_log { - u8 flags; -#define ICE_AQC_FW_LOG_CLEAR BIT(0) -#define ICE_AQC_FW_LOG_MORE_DATA_AVAIL BIT(1) - u8 rsvd1[7]; - __le32 addr_high; - __le32 addr_low; -}; - /* Download Package (indirect 0x0C40) */ /* Also used for Update Package (indirect 0x0C41 and 0x0C42) */ struct ice_aqc_download_pkg { @@ -2403,6 +2356,84 @@ struct ice_aqc_event_lan_overflow { u8 reserved[8]; }; +enum ice_aqc_fw_logging_mod { + ICE_AQC_FW_LOG_ID_GENERAL = 0, + ICE_AQC_FW_LOG_ID_CTRL, + ICE_AQC_FW_LOG_ID_LINK, + ICE_AQC_FW_LOG_ID_LINK_TOPO, + ICE_AQC_FW_LOG_ID_DNL, + ICE_AQC_FW_LOG_ID_I2C, + ICE_AQC_FW_LOG_ID_SDP, + ICE_AQC_FW_LOG_ID_MDIO, + ICE_AQC_FW_LOG_ID_ADMINQ, + ICE_AQC_FW_LOG_ID_HDMA, + ICE_AQC_FW_LOG_ID_LLDP, + ICE_AQC_FW_LOG_ID_DCBX, + ICE_AQC_FW_LOG_ID_DCB, + ICE_AQC_FW_LOG_ID_XLR, + ICE_AQC_FW_LOG_ID_NVM, + ICE_AQC_FW_LOG_ID_AUTH, + ICE_AQC_FW_LOG_ID_VPD, + ICE_AQC_FW_LOG_ID_IOSF, + ICE_AQC_FW_LOG_ID_PARSER, + ICE_AQC_FW_LOG_ID_SW, + ICE_AQC_FW_LOG_ID_SCHEDULER, + ICE_AQC_FW_LOG_ID_TXQ, + ICE_AQC_FW_LOG_ID_RSVD, + ICE_AQC_FW_LOG_ID_POST, + ICE_AQC_FW_LOG_ID_WATCHDOG, + ICE_AQC_FW_LOG_ID_TASK_DISPATCH, + ICE_AQC_FW_LOG_ID_MNG, + ICE_AQC_FW_LOG_ID_SYNCE, + ICE_AQC_FW_LOG_ID_HEALTH, + ICE_AQC_FW_LOG_ID_TSDRV, + ICE_AQC_FW_LOG_ID_PFREG, + ICE_AQC_FW_LOG_ID_MDLVER, + ICE_AQC_FW_LOG_ID_MAX, +}; + +/* Set FW Logging configuration (indirect 0xFF30) + * Register for FW Logging (indirect 0xFF31) + * Query FW Logging (indirect 0xFF32) + * FW Log Event (indirect 0xFF33) + */ +struct ice_aqc_fw_log { + u8 cmd_flags; +#define ICE_AQC_FW_LOG_CONF_UART_EN BIT(0) +#define ICE_AQC_FW_LOG_CONF_AQ_EN BIT(1) +#define ICE_AQC_FW_LOG_QUERY_REGISTERED BIT(2) +#define ICE_AQC_FW_LOG_CONF_SET_VALID BIT(3) +#define ICE_AQC_FW_LOG_AQ_REGISTER BIT(0) +#define ICE_AQC_FW_LOG_AQ_QUERY BIT(2) + + u8 rsp_flag; + __le16 fw_rt_msb; + union { + struct { + __le32 fw_rt_lsb; + } sync; + struct { + __le16 log_resolution; +#define ICE_AQC_FW_LOG_MIN_RESOLUTION (1) +#define ICE_AQC_FW_LOG_MAX_RESOLUTION (128) + + __le16 mdl_cnt; + } cfg; + } ops; + __le32 addr_high; + __le32 addr_low; +}; + +/* Response Buffer for: + * Set Firmware Logging Configuration (0xFF30) + * Query FW Logging (0xFF32) + */ +struct ice_aqc_fw_log_cfg_resp { + __le16 module_identifier; + u8 log_level; + u8 rsvd0; +}; + /** * struct ice_aq_desc - Admin Queue (AQ) descriptor * @flags: ICE_AQ_FLAG_* flags @@ -2443,6 +2474,8 @@ struct ice_aq_desc { struct ice_aqc_restart_an restart_an; struct ice_aqc_set_phy_rec_clk_out set_phy_rec_clk_out; struct ice_aqc_get_phy_rec_clk_out get_phy_rec_clk_out; + struct ice_aqc_get_sensor_reading get_sensor_reading; + struct ice_aqc_get_sensor_reading_resp get_sensor_reading_resp; struct ice_aqc_gpio read_write_gpio; struct ice_aqc_sff_eeprom read_write_sff_param; struct ice_aqc_set_port_id_led set_port_id_led; @@ -2480,8 +2513,6 @@ struct ice_aq_desc { struct ice_aqc_add_rdma_qset add_rdma_qset; struct ice_aqc_add_get_update_free_vsi vsi_cmd; struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res; - struct ice_aqc_fw_logging fw_logging; - struct ice_aqc_get_clear_fw_log get_clear_fw_log; struct ice_aqc_download_pkg download_pkg; struct ice_aqc_set_cgu_input_config set_cgu_input_config; struct ice_aqc_get_cgu_input_config get_cgu_input_config; @@ -2493,6 +2524,7 @@ struct ice_aq_desc { struct ice_aqc_get_cgu_ref_prio get_cgu_ref_prio; struct ice_aqc_get_cgu_info get_cgu_info; struct ice_aqc_driver_shared_params drv_shared_params; + struct ice_aqc_fw_log fw_log; struct ice_aqc_set_mac_lb set_mac_lb; struct ice_aqc_alloc_free_res_cmd sw_res_ctrl; struct ice_aqc_set_mac_cfg set_mac_cfg; @@ -2618,6 +2650,7 @@ enum ice_adminq_opc { ice_aqc_opc_set_mac_lb = 0x0620, ice_aqc_opc_set_phy_rec_clk_out = 0x0630, ice_aqc_opc_get_phy_rec_clk_out = 0x0631, + ice_aqc_opc_get_sensor_reading = 0x0632, ice_aqc_opc_get_link_topo = 0x06E0, ice_aqc_opc_read_i2c = 0x06E2, ice_aqc_opc_write_i2c = 0x06E3, @@ -2690,9 +2723,11 @@ enum ice_adminq_opc { /* Standalone Commands/Events */ ice_aqc_opc_event_lan_overflow = 0x1001, - /* debug commands */ - ice_aqc_opc_fw_logging = 0xFF09, - ice_aqc_opc_fw_logging_info = 0xFF10, + /* FW Logging Commands */ + ice_aqc_opc_fw_logs_config = 0xFF30, + ice_aqc_opc_fw_logs_register = 0xFF31, + ice_aqc_opc_fw_logs_query = 0xFF32, + ice_aqc_opc_fw_logs_event = 0xFF33, }; #endif /* _ICE_ADMINQ_CMD_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 7fa43827a3f0..b25b7f415965 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -189,10 +189,18 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) } q_vector = vsi->q_vectors[v_idx]; - ice_for_each_tx_ring(tx_ring, q_vector->tx) + ice_for_each_tx_ring(tx_ring, q_vector->tx) { + if (vsi->netdev) + netif_queue_set_napi(vsi->netdev, tx_ring->q_index, + NETDEV_QUEUE_TYPE_TX, NULL); tx_ring->q_vector = NULL; - ice_for_each_rx_ring(rx_ring, q_vector->rx) + } + ice_for_each_rx_ring(rx_ring, q_vector->rx) { + if (vsi->netdev) + netif_queue_set_napi(vsi->netdev, rx_ring->q_index, + NETDEV_QUEUE_TYPE_RX, NULL); rx_ring->q_vector = NULL; + } /* only VSI with an associated netdev is set up with NAPI */ if (vsi->netdev) @@ -519,6 +527,19 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring) return 0; } +static void ice_xsk_pool_fill_cb(struct ice_rx_ring *ring) +{ + void *ctx_ptr = &ring->pkt_ctx; + struct xsk_cb_desc desc = {}; + + XSK_CHECK_PRIV_TYPE(struct ice_xdp_buff); + desc.src = &ctx_ptr; + desc.off = offsetof(struct ice_xdp_buff, pkt_ctx) - + sizeof(struct xdp_buff); + desc.bytes = sizeof(ctx_ptr); + xsk_pool_fill_cb(ring->xsk_pool, &desc); +} + /** * ice_vsi_cfg_rxq - Configure an Rx queue * @ring: the ring being configured @@ -553,6 +574,7 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) if (err) return err; xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); + ice_xsk_pool_fill_cb(ring); dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n", ring->q_index); @@ -575,6 +597,7 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) xdp_init_buff(&ring->xdp, ice_rx_pg_size(ring) / 2, &ring->xdp_rxq); ring->xdp.data = NULL; + ring->xdp_ext.pkt_ctx = &ring->pkt_ctx; err = ice_setup_rx_ctx(ring); if (err) { dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n", diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 9a6c25f98632..08a1f699a34f 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -934,216 +934,6 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) } /** - * ice_get_fw_log_cfg - get FW logging configuration - * @hw: pointer to the HW struct - */ -static int ice_get_fw_log_cfg(struct ice_hw *hw) -{ - struct ice_aq_desc desc; - __le16 *config; - int status; - u16 size; - - size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX; - config = kzalloc(size, GFP_KERNEL); - if (!config) - return -ENOMEM; - - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info); - - status = ice_aq_send_cmd(hw, &desc, config, size, NULL); - if (!status) { - u16 i; - - /* Save FW logging information into the HW structure */ - for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { - u16 v, m, flgs; - - v = le16_to_cpu(config[i]); - m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; - flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S; - - if (m < ICE_AQC_FW_LOG_ID_MAX) - hw->fw_log.evnts[m].cur = flgs; - } - } - - kfree(config); - - return status; -} - -/** - * ice_cfg_fw_log - configure FW logging - * @hw: pointer to the HW struct - * @enable: enable certain FW logging events if true, disable all if false - * - * This function enables/disables the FW logging via Rx CQ events and a UART - * port based on predetermined configurations. FW logging via the Rx CQ can be - * enabled/disabled for individual PF's. However, FW logging via the UART can - * only be enabled/disabled for all PFs on the same device. - * - * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in - * hw->fw_log need to be set accordingly, e.g. based on user-provided input, - * before initializing the device. - * - * When re/configuring FW logging, callers need to update the "cfg" elements of - * the hw->fw_log.evnts array with the desired logging event configurations for - * modules of interest. When disabling FW logging completely, the callers can - * just pass false in the "enable" parameter. On completion, the function will - * update the "cur" element of the hw->fw_log.evnts array with the resulting - * logging event configurations of the modules that are being re/configured. FW - * logging modules that are not part of a reconfiguration operation retain their - * previous states. - * - * Before resetting the device, it is recommended that the driver disables FW - * logging before shutting down the control queue. When disabling FW logging - * ("enable" = false), the latest configurations of FW logging events stored in - * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after - * a device reset. - * - * When enabling FW logging to emit log messages via the Rx CQ during the - * device's initialization phase, a mechanism alternative to interrupt handlers - * needs to be used to extract FW log messages from the Rx CQ periodically and - * to prevent the Rx CQ from being full and stalling other types of control - * messages from FW to SW. Interrupts are typically disabled during the device's - * initialization phase. - */ -static int ice_cfg_fw_log(struct ice_hw *hw, bool enable) -{ - struct ice_aqc_fw_logging *cmd; - u16 i, chgs = 0, len = 0; - struct ice_aq_desc desc; - __le16 *data = NULL; - u8 actv_evnts = 0; - void *buf = NULL; - int status = 0; - - if (!hw->fw_log.cq_en && !hw->fw_log.uart_en) - return 0; - - /* Disable FW logging only when the control queue is still responsive */ - if (!enable && - (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq))) - return 0; - - /* Get current FW log settings */ - status = ice_get_fw_log_cfg(hw); - if (status) - return status; - - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging); - cmd = &desc.params.fw_logging; - - /* Indicate which controls are valid */ - if (hw->fw_log.cq_en) - cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID; - - if (hw->fw_log.uart_en) - cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID; - - if (enable) { - /* Fill in an array of entries with FW logging modules and - * logging events being reconfigured. - */ - for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { - u16 val; - - /* Keep track of enabled event types */ - actv_evnts |= hw->fw_log.evnts[i].cfg; - - if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur) - continue; - - if (!data) { - data = devm_kcalloc(ice_hw_to_dev(hw), - ICE_AQC_FW_LOG_ID_MAX, - sizeof(*data), - GFP_KERNEL); - if (!data) - return -ENOMEM; - } - - val = i << ICE_AQC_FW_LOG_ID_S; - val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S; - data[chgs++] = cpu_to_le16(val); - } - - /* Only enable FW logging if at least one module is specified. - * If FW logging is currently enabled but all modules are not - * enabled to emit log messages, disable FW logging altogether. - */ - if (actv_evnts) { - /* Leave if there is effectively no change */ - if (!chgs) - goto out; - - if (hw->fw_log.cq_en) - cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN; - - if (hw->fw_log.uart_en) - cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN; - - buf = data; - len = sizeof(*data) * chgs; - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); - } - } - - status = ice_aq_send_cmd(hw, &desc, buf, len, NULL); - if (!status) { - /* Update the current configuration to reflect events enabled. - * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW - * logging mode is enabled for the device. They do not reflect - * actual modules being enabled to emit log messages. So, their - * values remain unchanged even when all modules are disabled. - */ - u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX; - - hw->fw_log.actv_evnts = actv_evnts; - for (i = 0; i < cnt; i++) { - u16 v, m; - - if (!enable) { - /* When disabling all FW logging events as part - * of device's de-initialization, the original - * configurations are retained, and can be used - * to reconfigure FW logging later if the device - * is re-initialized. - */ - hw->fw_log.evnts[i].cur = 0; - continue; - } - - v = le16_to_cpu(data[i]); - m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; - hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg; - } - } - -out: - devm_kfree(ice_hw_to_dev(hw), data); - - return status; -} - -/** - * ice_output_fw_log - * @hw: pointer to the HW struct - * @desc: pointer to the AQ message descriptor - * @buf: pointer to the buffer accompanying the AQ message - * - * Formats a FW Log message and outputs it via the standard driver logs. - */ -void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf) -{ - ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n"); - ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf, - le16_to_cpu(desc->datalen)); - ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n"); -} - -/** * ice_get_itr_intrl_gran * @hw: pointer to the HW struct * @@ -1200,10 +990,10 @@ int ice_init_hw(struct ice_hw *hw) if (status) goto err_unroll_cqinit; - /* Enable FW logging. Not fatal if this fails. */ - status = ice_cfg_fw_log(hw, true); + status = ice_fwlog_init(hw); if (status) - ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n"); + ice_debug(hw, ICE_DBG_FW_LOG, "Error initializing FW logging: %d\n", + status); status = ice_clear_pf_cfg(hw); if (status) @@ -1354,8 +1144,7 @@ void ice_deinit_hw(struct ice_hw *hw) ice_free_hw_tbls(hw); mutex_destroy(&hw->tnl_lock); - /* Attempt to disable FW logging before shutting down control queues */ - ice_cfg_fw_log(hw, false); + ice_fwlog_deinit(hw); ice_destroy_all_ctrlq(hw); /* Clear VSI contexts if not already cleared */ @@ -2711,6 +2500,26 @@ ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, } /** + * ice_parse_sensor_reading_cap - Parse ICE_AQC_CAPS_SENSOR_READING cap + * @hw: pointer to the HW struct + * @dev_p: pointer to device capabilities structure + * @cap: capability element to parse + * + * Parse ICE_AQC_CAPS_SENSOR_READING for device capability for reading + * enabled sensors. + */ +static void +ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, + struct ice_aqc_list_caps_elem *cap) +{ + dev_p->supported_sensors = le32_to_cpu(cap->number); + + ice_debug(hw, ICE_DBG_INIT, + "dev caps: supported sensors (bitmap) = 0x%x\n", + dev_p->supported_sensors); +} + +/** * ice_parse_dev_caps - Parse device capabilities * @hw: pointer to the HW struct * @dev_p: pointer to device capabilities structure @@ -2755,9 +2564,12 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, case ICE_AQC_CAPS_1588: ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]); break; - case ICE_AQC_CAPS_FD: + case ICE_AQC_CAPS_FD: ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]); break; + case ICE_AQC_CAPS_SENSOR_READING: + ice_parse_sensor_reading_cap(hw, dev_p, &cap_resp[i]); + break; default: /* Don't list common capabilities as unknown */ if (!found) @@ -5541,6 +5353,35 @@ ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num, } /** + * ice_aq_get_sensor_reading + * @hw: pointer to the HW struct + * @data: pointer to data to be read from the sensor + * + * Get sensor reading (0x0632) + */ +int ice_aq_get_sensor_reading(struct ice_hw *hw, + struct ice_aqc_get_sensor_reading_resp *data) +{ + struct ice_aqc_get_sensor_reading *cmd; + struct ice_aq_desc desc; + int status; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sensor_reading); + cmd = &desc.params.get_sensor_reading; +#define ICE_INTERNAL_TEMP_SENSOR_FORMAT 0 +#define ICE_INTERNAL_TEMP_SENSOR 0 + cmd->sensor = ICE_INTERNAL_TEMP_SENSOR; + cmd->format = ICE_INTERNAL_TEMP_SENSOR_FORMAT; + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (!status) + memcpy(data, &desc.params.get_sensor_reading_resp, + sizeof(*data)); + + return status; +} + +/** * ice_replay_pre_init - replay pre initialization * @hw: pointer to the HW struct * diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 31fdcac33986..3e933f75e948 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -6,6 +6,7 @@ #include <linux/bitfield.h> +#include "ice.h" #include "ice_type.h" #include "ice_nvm.h" #include "ice_flex_pipe.h" @@ -199,7 +200,6 @@ ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf, struct ice_sq_cd *cd); int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle); void ice_replay_post(struct ice_hw *hw); -void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf); struct ice_q_ctx * ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle); int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in); @@ -241,6 +241,8 @@ ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable, int ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num, u8 *flags, u16 *node_handle); +int ice_aq_get_sensor_reading(struct ice_hw *hw, + struct ice_aqc_get_sensor_reading_resp *data); void ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); diff --git a/drivers/net/ethernet/intel/ice/ice_debugfs.c b/drivers/net/ethernet/intel/ice/ice_debugfs.c new file mode 100644 index 000000000000..c2bfba6b9ead --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_debugfs.c @@ -0,0 +1,667 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022, Intel Corporation. */ + +#include <linux/fs.h> +#include <linux/debugfs.h> +#include <linux/random.h> +#include <linux/vmalloc.h> +#include "ice.h" + +static struct dentry *ice_debugfs_root; + +/* create a define that has an extra module that doesn't really exist. this + * is so we can add a module 'all' to easily enable/disable all the modules + */ +#define ICE_NR_FW_LOG_MODULES (ICE_AQC_FW_LOG_ID_MAX + 1) + +/* the ordering in this array is important. it matches the ordering of the + * values in the FW so the index is the same value as in ice_aqc_fw_logging_mod + */ +static const char * const ice_fwlog_module_string[] = { + "general", + "ctrl", + "link", + "link_topo", + "dnl", + "i2c", + "sdp", + "mdio", + "adminq", + "hdma", + "lldp", + "dcbx", + "dcb", + "xlr", + "nvm", + "auth", + "vpd", + "iosf", + "parser", + "sw", + "scheduler", + "txq", + "rsvd", + "post", + "watchdog", + "task_dispatch", + "mng", + "synce", + "health", + "tsdrv", + "pfreg", + "mdlver", + "all", +}; + +/* the ordering in this array is important. it matches the ordering of the + * values in the FW so the index is the same value as in ice_fwlog_level + */ +static const char * const ice_fwlog_level_string[] = { + "none", + "error", + "warning", + "normal", + "verbose", +}; + +/* the order in this array is important. it matches the ordering of the + * values in the FW so the index is the same value as in ice_fwlog_level + */ +static const char * const ice_fwlog_log_size[] = { + "128K", + "256K", + "512K", + "1M", + "2M", +}; + +/** + * ice_fwlog_print_module_cfg - print current FW logging module configuration + * @hw: pointer to the HW structure + * @module: module to print + * @s: the seq file to put data into + */ +static void +ice_fwlog_print_module_cfg(struct ice_hw *hw, int module, struct seq_file *s) +{ + struct ice_fwlog_cfg *cfg = &hw->fwlog_cfg; + struct ice_fwlog_module_entry *entry; + + if (module != ICE_AQC_FW_LOG_ID_MAX) { + entry = &cfg->module_entries[module]; + + seq_printf(s, "\tModule: %s, Log Level: %s\n", + ice_fwlog_module_string[entry->module_id], + ice_fwlog_level_string[entry->log_level]); + } else { + int i; + + for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { + entry = &cfg->module_entries[i]; + + seq_printf(s, "\tModule: %s, Log Level: %s\n", + ice_fwlog_module_string[entry->module_id], + ice_fwlog_level_string[entry->log_level]); + } + } +} + +static int ice_find_module_by_dentry(struct ice_pf *pf, struct dentry *d) +{ + int i, module; + + module = -1; + /* find the module based on the dentry */ + for (i = 0; i < ICE_NR_FW_LOG_MODULES; i++) { + if (d == pf->ice_debugfs_pf_fwlog_modules[i]) { + module = i; + break; + } + } + + return module; +} + +/** + * ice_debugfs_module_show - read from 'module' file + * @s: the opened file + * @v: pointer to the offset + */ +static int ice_debugfs_module_show(struct seq_file *s, void *v) +{ + const struct file *filp = s->file; + struct dentry *dentry; + struct ice_pf *pf; + int module; + + dentry = file_dentry(filp); + pf = s->private; + + module = ice_find_module_by_dentry(pf, dentry); + if (module < 0) { + dev_info(ice_pf_to_dev(pf), "unknown module\n"); + return -EINVAL; + } + + ice_fwlog_print_module_cfg(&pf->hw, module, s); + + return 0; +} + +static int ice_debugfs_module_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, ice_debugfs_module_show, inode->i_private); +} + +/** + * ice_debugfs_module_write - write into 'module' file + * @filp: the opened file + * @buf: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + */ +static ssize_t +ice_debugfs_module_write(struct file *filp, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct ice_pf *pf = file_inode(filp)->i_private; + struct dentry *dentry = file_dentry(filp); + struct device *dev = ice_pf_to_dev(pf); + char user_val[16], *cmd_buf; + int module, log_level, cnt; + + /* don't allow partial writes or invalid input */ + if (*ppos != 0 || count > 8) + return -EINVAL; + + cmd_buf = memdup_user(buf, count); + if (IS_ERR(cmd_buf)) + return PTR_ERR(cmd_buf); + + module = ice_find_module_by_dentry(pf, dentry); + if (module < 0) { + dev_info(dev, "unknown module\n"); + return -EINVAL; + } + + cnt = sscanf(cmd_buf, "%s", user_val); + if (cnt != 1) + return -EINVAL; + + log_level = sysfs_match_string(ice_fwlog_level_string, user_val); + if (log_level < 0) { + dev_info(dev, "unknown log level '%s'\n", user_val); + return -EINVAL; + } + + if (module != ICE_AQC_FW_LOG_ID_MAX) { + ice_pf_fwlog_update_module(pf, log_level, module); + } else { + /* the module 'all' is a shortcut so that we can set + * all of the modules to the same level quickly + */ + int i; + + for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) + ice_pf_fwlog_update_module(pf, log_level, i); + } + + return count; +} + +static const struct file_operations ice_debugfs_module_fops = { + .owner = THIS_MODULE, + .open = ice_debugfs_module_open, + .read = seq_read, + .release = single_release, + .write = ice_debugfs_module_write, +}; + +/** + * ice_debugfs_nr_messages_read - read from 'nr_messages' file + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + */ +static ssize_t ice_debugfs_nr_messages_read(struct file *filp, + char __user *buffer, size_t count, + loff_t *ppos) +{ + struct ice_pf *pf = filp->private_data; + struct ice_hw *hw = &pf->hw; + char buff[32] = {}; + + snprintf(buff, sizeof(buff), "%d\n", + hw->fwlog_cfg.log_resolution); + + return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff)); +} + +/** + * ice_debugfs_nr_messages_write - write into 'nr_messages' file + * @filp: the opened file + * @buf: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + */ +static ssize_t +ice_debugfs_nr_messages_write(struct file *filp, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct ice_pf *pf = filp->private_data; + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + char user_val[8], *cmd_buf; + s16 nr_messages; + ssize_t ret; + + /* don't allow partial writes or invalid input */ + if (*ppos != 0 || count > 4) + return -EINVAL; + + cmd_buf = memdup_user(buf, count); + if (IS_ERR(cmd_buf)) + return PTR_ERR(cmd_buf); + + ret = sscanf(cmd_buf, "%s", user_val); + if (ret != 1) + return -EINVAL; + + ret = kstrtos16(user_val, 0, &nr_messages); + if (ret) + return ret; + + if (nr_messages < ICE_AQC_FW_LOG_MIN_RESOLUTION || + nr_messages > ICE_AQC_FW_LOG_MAX_RESOLUTION) { + dev_err(dev, "Invalid FW log number of messages %d, value must be between %d - %d\n", + nr_messages, ICE_AQC_FW_LOG_MIN_RESOLUTION, + ICE_AQC_FW_LOG_MAX_RESOLUTION); + return -EINVAL; + } + + hw->fwlog_cfg.log_resolution = nr_messages; + + return count; +} + +static const struct file_operations ice_debugfs_nr_messages_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ice_debugfs_nr_messages_read, + .write = ice_debugfs_nr_messages_write, +}; + +/** + * ice_debugfs_enable_read - read from 'enable' file + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + */ +static ssize_t ice_debugfs_enable_read(struct file *filp, + char __user *buffer, size_t count, + loff_t *ppos) +{ + struct ice_pf *pf = filp->private_data; + struct ice_hw *hw = &pf->hw; + char buff[32] = {}; + + snprintf(buff, sizeof(buff), "%u\n", + (u16)(hw->fwlog_cfg.options & + ICE_FWLOG_OPTION_IS_REGISTERED) >> 3); + + return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff)); +} + +/** + * ice_debugfs_enable_write - write into 'enable' file + * @filp: the opened file + * @buf: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + */ +static ssize_t +ice_debugfs_enable_write(struct file *filp, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct ice_pf *pf = filp->private_data; + struct ice_hw *hw = &pf->hw; + char user_val[8], *cmd_buf; + bool enable; + ssize_t ret; + + /* don't allow partial writes or invalid input */ + if (*ppos != 0 || count > 2) + return -EINVAL; + + cmd_buf = memdup_user(buf, count); + if (IS_ERR(cmd_buf)) + return PTR_ERR(cmd_buf); + + ret = sscanf(cmd_buf, "%s", user_val); + if (ret != 1) + return -EINVAL; + + ret = kstrtobool(user_val, &enable); + if (ret) + goto enable_write_error; + + if (enable) + hw->fwlog_cfg.options |= ICE_FWLOG_OPTION_ARQ_ENA; + else + hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_ARQ_ENA; + + ret = ice_fwlog_set(hw, &hw->fwlog_cfg); + if (ret) + goto enable_write_error; + + if (enable) + ret = ice_fwlog_register(hw); + else + ret = ice_fwlog_unregister(hw); + + if (ret) + goto enable_write_error; + + /* if we get here, nothing went wrong; return count since we didn't + * really write anything + */ + ret = (ssize_t)count; + +enable_write_error: + /* This function always consumes all of the written input, or produces + * an error. Check and enforce this. Otherwise, the write operation + * won't complete properly. + */ + if (WARN_ON(ret != (ssize_t)count && ret >= 0)) + ret = -EIO; + + return ret; +} + +static const struct file_operations ice_debugfs_enable_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ice_debugfs_enable_read, + .write = ice_debugfs_enable_write, +}; + +/** + * ice_debugfs_log_size_read - read from 'log_size' file + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + */ +static ssize_t ice_debugfs_log_size_read(struct file *filp, + char __user *buffer, size_t count, + loff_t *ppos) +{ + struct ice_pf *pf = filp->private_data; + struct ice_hw *hw = &pf->hw; + char buff[32] = {}; + int index; + + index = hw->fwlog_ring.index; + snprintf(buff, sizeof(buff), "%s\n", ice_fwlog_log_size[index]); + + return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff)); +} + +/** + * ice_debugfs_log_size_write - write into 'log_size' file + * @filp: the opened file + * @buf: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + */ +static ssize_t +ice_debugfs_log_size_write(struct file *filp, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct ice_pf *pf = filp->private_data; + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + char user_val[8], *cmd_buf; + ssize_t ret; + int index; + + /* don't allow partial writes or invalid input */ + if (*ppos != 0 || count > 5) + return -EINVAL; + + cmd_buf = memdup_user(buf, count); + if (IS_ERR(cmd_buf)) + return PTR_ERR(cmd_buf); + + ret = sscanf(cmd_buf, "%s", user_val); + if (ret != 1) + return -EINVAL; + + index = sysfs_match_string(ice_fwlog_log_size, user_val); + if (index < 0) { + dev_info(dev, "Invalid log size '%s'. The value must be one of 128K, 256K, 512K, 1M, 2M\n", + user_val); + ret = -EINVAL; + goto log_size_write_error; + } else if (hw->fwlog_cfg.options & ICE_FWLOG_OPTION_IS_REGISTERED) { + dev_info(dev, "FW logging is currently running. Please disable FW logging to change log_size\n"); + ret = -EINVAL; + goto log_size_write_error; + } + + /* free all the buffers and the tracking info and resize */ + ice_fwlog_realloc_rings(hw, index); + + /* if we get here, nothing went wrong; return count since we didn't + * really write anything + */ + ret = (ssize_t)count; + +log_size_write_error: + /* This function always consumes all of the written input, or produces + * an error. Check and enforce this. Otherwise, the write operation + * won't complete properly. + */ + if (WARN_ON(ret != (ssize_t)count && ret >= 0)) + ret = -EIO; + + return ret; +} + +static const struct file_operations ice_debugfs_log_size_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ice_debugfs_log_size_read, + .write = ice_debugfs_log_size_write, +}; + +/** + * ice_debugfs_data_read - read from 'data' file + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + */ +static ssize_t ice_debugfs_data_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ice_pf *pf = filp->private_data; + struct ice_hw *hw = &pf->hw; + int data_copied = 0; + bool done = false; + + if (ice_fwlog_ring_empty(&hw->fwlog_ring)) + return 0; + + while (!ice_fwlog_ring_empty(&hw->fwlog_ring) && !done) { + struct ice_fwlog_data *log; + u16 cur_buf_len; + + log = &hw->fwlog_ring.rings[hw->fwlog_ring.head]; + cur_buf_len = log->data_size; + if (cur_buf_len >= count) { + done = true; + continue; + } + + if (copy_to_user(buffer, log->data, cur_buf_len)) { + /* if there is an error then bail and return whatever + * the driver has copied so far + */ + done = true; + continue; + } + + data_copied += cur_buf_len; + buffer += cur_buf_len; + count -= cur_buf_len; + *ppos += cur_buf_len; + ice_fwlog_ring_increment(&hw->fwlog_ring.head, + hw->fwlog_ring.size); + } + + return data_copied; +} + +/** + * ice_debugfs_data_write - write into 'data' file + * @filp: the opened file + * @buf: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + */ +static ssize_t +ice_debugfs_data_write(struct file *filp, const char __user *buf, size_t count, + loff_t *ppos) +{ + struct ice_pf *pf = filp->private_data; + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + ssize_t ret; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + + /* any value is allowed to clear the buffer so no need to even look at + * what the value is + */ + if (!(hw->fwlog_cfg.options & ICE_FWLOG_OPTION_IS_REGISTERED)) { + hw->fwlog_ring.head = 0; + hw->fwlog_ring.tail = 0; + } else { + dev_info(dev, "Can't clear FW log data while FW log running\n"); + ret = -EINVAL; + goto nr_buffs_write_error; + } + + /* if we get here, nothing went wrong; return count since we didn't + * really write anything + */ + ret = (ssize_t)count; + +nr_buffs_write_error: + /* This function always consumes all of the written input, or produces + * an error. Check and enforce this. Otherwise, the write operation + * won't complete properly. + */ + if (WARN_ON(ret != (ssize_t)count && ret >= 0)) + ret = -EIO; + + return ret; +} + +static const struct file_operations ice_debugfs_data_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ice_debugfs_data_read, + .write = ice_debugfs_data_write, +}; + +/** + * ice_debugfs_fwlog_init - setup the debugfs directory + * @pf: the ice that is starting up + */ +void ice_debugfs_fwlog_init(struct ice_pf *pf) +{ + const char *name = pci_name(pf->pdev); + struct dentry *fw_modules_dir; + struct dentry **fw_modules; + int i; + + /* only support fw log commands on PF 0 */ + if (pf->hw.bus.func) + return; + + /* allocate space for this first because if it fails then we don't + * need to unwind + */ + fw_modules = kcalloc(ICE_NR_FW_LOG_MODULES, sizeof(*fw_modules), + GFP_KERNEL); + if (!fw_modules) + return; + + pf->ice_debugfs_pf = debugfs_create_dir(name, ice_debugfs_root); + if (IS_ERR(pf->ice_debugfs_pf)) + goto err_create_module_files; + + pf->ice_debugfs_pf_fwlog = debugfs_create_dir("fwlog", + pf->ice_debugfs_pf); + if (IS_ERR(pf->ice_debugfs_pf)) + goto err_create_module_files; + + fw_modules_dir = debugfs_create_dir("modules", + pf->ice_debugfs_pf_fwlog); + if (IS_ERR(fw_modules_dir)) + goto err_create_module_files; + + for (i = 0; i < ICE_NR_FW_LOG_MODULES; i++) { + fw_modules[i] = debugfs_create_file(ice_fwlog_module_string[i], + 0600, fw_modules_dir, pf, + &ice_debugfs_module_fops); + if (IS_ERR(fw_modules[i])) + goto err_create_module_files; + } + + debugfs_create_file("nr_messages", 0600, + pf->ice_debugfs_pf_fwlog, pf, + &ice_debugfs_nr_messages_fops); + + pf->ice_debugfs_pf_fwlog_modules = fw_modules; + + debugfs_create_file("enable", 0600, pf->ice_debugfs_pf_fwlog, + pf, &ice_debugfs_enable_fops); + + debugfs_create_file("log_size", 0600, pf->ice_debugfs_pf_fwlog, + pf, &ice_debugfs_log_size_fops); + + debugfs_create_file("data", 0600, pf->ice_debugfs_pf_fwlog, + pf, &ice_debugfs_data_fops); + + return; + +err_create_module_files: + debugfs_remove_recursive(pf->ice_debugfs_pf_fwlog); + kfree(fw_modules); +} + +/** + * ice_debugfs_init - create root directory for debugfs entries + */ +void ice_debugfs_init(void) +{ + ice_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); + if (IS_ERR(ice_debugfs_root)) + pr_info("init of debugfs failed\n"); +} + +/** + * ice_debugfs_exit - remove debugfs entries + */ +void ice_debugfs_exit(void) +{ + debugfs_remove_recursive(ice_debugfs_root); + ice_debugfs_root = NULL; +} diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c index 80dc5445b50d..65be56f2af9e 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c @@ -193,6 +193,24 @@ ice_info_pending_netlist_build(struct ice_pf __always_unused *pf, snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash); } +static void ice_info_cgu_fw_build(struct ice_pf *pf, struct ice_info_ctx *ctx) +{ + u32 id, cfg_ver, fw_ver; + + if (!ice_is_feature_supported(pf, ICE_F_CGU)) + return; + if (ice_aq_get_cgu_info(&pf->hw, &id, &cfg_ver, &fw_ver)) + return; + snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", id, cfg_ver, fw_ver); +} + +static void ice_info_cgu_id(struct ice_pf *pf, struct ice_info_ctx *ctx) +{ + if (!ice_is_feature_supported(pf, ICE_F_CGU)) + return; + snprintf(ctx->buf, sizeof(ctx->buf), "%u", pf->hw.cgu_part_number); +} + #define fixed(key, getter) { ICE_VERSION_FIXED, key, getter, NULL } #define running(key, getter) { ICE_VERSION_RUNNING, key, getter, NULL } #define stored(key, getter, fallback) { ICE_VERSION_STORED, key, getter, fallback } @@ -235,6 +253,8 @@ static const struct ice_devlink_version { running("fw.app.bundle_id", ice_info_ddp_pkg_bundle_id), combined("fw.netlist", ice_info_netlist_ver, ice_info_pending_netlist_ver), combined("fw.netlist.build", ice_info_netlist_build, ice_info_pending_netlist_build), + fixed("cgu.id", ice_info_cgu_id), + running("fw.cgu", ice_info_cgu_fw_build), }; /** @@ -810,6 +830,10 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node struct ice_vf *vf; int i; + if (node->rate_node) + /* already added, skip to the next */ + goto traverse_children; + if (node->parent == tc_node) { /* create root node */ rate_node = devl_rate_node_create(devlink, node, node->name, NULL); @@ -831,6 +855,7 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node if (rate_node && !IS_ERR(rate_node)) node->rate_node = rate_node; +traverse_children: for (i = 0; i < node->num_children; i++) ice_traverse_tx_tree(devlink, node->children[i], tc_node, pf); } @@ -861,6 +886,30 @@ int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *v return 0; } +static void ice_clear_rate_nodes(struct ice_sched_node *node) +{ + node->rate_node = NULL; + + for (int i = 0; i < node->num_children; i++) + ice_clear_rate_nodes(node->children[i]); +} + +/** + * ice_devlink_rate_clear_tx_topology - clear node->rate_node + * @vsi: main vsi struct + * + * Clear rate_node to cleanup creation of Tx topology. + * + */ +void ice_devlink_rate_clear_tx_topology(struct ice_vsi *vsi) +{ + struct ice_port_info *pi = vsi->port_info; + + mutex_lock(&pi->sched_lock); + ice_clear_rate_nodes(pi->root->children[0]); + mutex_unlock(&pi->sched_lock); +} + /** * ice_set_object_tx_share - sets node scheduling parameter * @pi: devlink struct instance diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/ice_devlink.h index 6ec96779f52e..d291c0e2e17b 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.h +++ b/drivers/net/ethernet/intel/ice/ice_devlink.h @@ -20,5 +20,6 @@ void ice_devlink_destroy_regions(struct ice_pf *pf); int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *vsi); void ice_tear_down_devlink_rate_tree(struct ice_pf *pf); +void ice_devlink_rate_clear_tx_topology(struct ice_vsi *vsi); #endif /* _ICE_DEVLINK_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c index 86b180cb32a0..b9c5eced6326 100644 --- a/drivers/net/ethernet/intel/ice/ice_dpll.c +++ b/drivers/net/ethernet/intel/ice/ice_dpll.c @@ -513,31 +513,6 @@ ice_dpll_lock_status_get(const struct dpll_device *dpll, void *dpll_priv, } /** - * ice_dpll_mode_supported - check if dpll's working mode is supported - * @dpll: registered dpll pointer - * @dpll_priv: private data pointer passed on dpll registration - * @mode: mode to be checked for support - * @extack: error reporting - * - * Dpll subsystem callback. Provides information if working mode is supported - * by dpll. - * - * Return: - * * true - mode is supported - * * false - mode is not supported - */ -static bool ice_dpll_mode_supported(const struct dpll_device *dpll, - void *dpll_priv, - enum dpll_mode mode, - struct netlink_ext_ack *extack) -{ - if (mode == DPLL_MODE_AUTOMATIC) - return true; - - return false; -} - -/** * ice_dpll_mode_get - get dpll's working mode * @dpll: registered dpll pointer * @dpll_priv: private data pointer passed on dpll registration @@ -1197,7 +1172,6 @@ static const struct dpll_pin_ops ice_dpll_output_ops = { static const struct dpll_device_ops ice_dpll_ops = { .lock_status_get = ice_dpll_lock_status_get, - .mode_supported = ice_dpll_mode_supported, .mode_get = ice_dpll_mode_get, }; diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c index a655d499abfa..ca118bc37e44 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c @@ -11,17 +11,34 @@ #include "ice_tc_lib.h" /** - * ice_eswitch_add_vf_sp_rule - add adv rule with VF's VSI index + * ice_eswitch_del_sp_rules - delete adv rules added on PRs + * @pf: pointer to the PF struct + * + * Delete all advanced rules that were used to forward packets with the + * device's VSI index to the corresponding eswitch ctrl VSI queue. + */ +static void ice_eswitch_del_sp_rules(struct ice_pf *pf) +{ + struct ice_repr *repr; + unsigned long id; + + xa_for_each(&pf->eswitch.reprs, id, repr) { + if (repr->sp_rule.rid) + ice_rem_adv_rule_by_id(&pf->hw, &repr->sp_rule); + } +} + +/** + * ice_eswitch_add_sp_rule - add adv rule with device's VSI index * @pf: pointer to PF struct - * @vf: pointer to VF struct + * @repr: pointer to the repr struct * * This function adds advanced rule that forwards packets with - * VF's VSI index to the corresponding switchdev ctrl VSI queue. + * device's VSI index to the corresponding eswitch ctrl VSI queue. */ -static int -ice_eswitch_add_vf_sp_rule(struct ice_pf *pf, struct ice_vf *vf) +static int ice_eswitch_add_sp_rule(struct ice_pf *pf, struct ice_repr *repr) { - struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; + struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi; struct ice_adv_rule_info rule_info = { 0 }; struct ice_adv_lkup_elem *list; struct ice_hw *hw = &pf->hw; @@ -38,39 +55,42 @@ ice_eswitch_add_vf_sp_rule(struct ice_pf *pf, struct ice_vf *vf) rule_info.sw_act.vsi_handle = ctrl_vsi->idx; rule_info.sw_act.fltr_act = ICE_FWD_TO_Q; rule_info.sw_act.fwd_id.q_id = hw->func_caps.common_cap.rxq_first_id + - ctrl_vsi->rxq_map[vf->vf_id]; + ctrl_vsi->rxq_map[repr->q_id]; rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE; rule_info.flags_info.act_valid = true; rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN; - rule_info.src_vsi = vf->lan_vsi_idx; + rule_info.src_vsi = repr->src_vsi->idx; err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, - &vf->repr->sp_rule); + &repr->sp_rule); if (err) - dev_err(ice_pf_to_dev(pf), "Unable to add VF slow-path rule in switchdev mode for VF %d", - vf->vf_id); + dev_err(ice_pf_to_dev(pf), "Unable to add slow-path rule for eswitch for PR %d", + repr->id); kfree(list); return err; } -/** - * ice_eswitch_del_vf_sp_rule - delete adv rule with VF's VSI index - * @vf: pointer to the VF struct - * - * Delete the advanced rule that was used to forward packets with the VF's VSI - * index to the corresponding switchdev ctrl VSI queue. - */ -static void ice_eswitch_del_vf_sp_rule(struct ice_vf *vf) +static int +ice_eswitch_add_sp_rules(struct ice_pf *pf) { - if (!vf->repr) - return; + struct ice_repr *repr; + unsigned long id; + int err; + + xa_for_each(&pf->eswitch.reprs, id, repr) { + err = ice_eswitch_add_sp_rule(pf, repr); + if (err) { + ice_eswitch_del_sp_rules(pf); + return err; + } + } - ice_rem_adv_rule_by_id(&vf->pf->hw, &vf->repr->sp_rule); + return 0; } /** - * ice_eswitch_setup_env - configure switchdev HW filters + * ice_eswitch_setup_env - configure eswitch HW filters * @pf: pointer to PF struct * * This function adds HW filters configuration specific for switchdev @@ -78,18 +98,18 @@ static void ice_eswitch_del_vf_sp_rule(struct ice_vf *vf) */ static int ice_eswitch_setup_env(struct ice_pf *pf) { - struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi; - struct net_device *uplink_netdev = uplink_vsi->netdev; - struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; + struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi; + struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi; + struct net_device *netdev = uplink_vsi->netdev; struct ice_vsi_vlan_ops *vlan_ops; bool rule_added = false; ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx); - netif_addr_lock_bh(uplink_netdev); - __dev_uc_unsync(uplink_netdev, NULL); - __dev_mc_unsync(uplink_netdev, NULL); - netif_addr_unlock_bh(uplink_netdev); + netif_addr_lock_bh(netdev); + __dev_uc_unsync(netdev, NULL); + __dev_mc_unsync(netdev, NULL); + netif_addr_unlock_bh(netdev); if (ice_vsi_add_vlan_zero(uplink_vsi)) goto err_def_rx; @@ -132,19 +152,20 @@ err_def_rx: } /** - * ice_eswitch_remap_rings_to_vectors - reconfigure rings of switchdev ctrl VSI - * @pf: pointer to PF struct + * ice_eswitch_remap_rings_to_vectors - reconfigure rings of eswitch ctrl VSI + * @eswitch: pointer to eswitch struct * - * In switchdev number of allocated Tx/Rx rings is equal. + * In eswitch number of allocated Tx/Rx rings is equal. * * This function fills q_vectors structures associated with representor and * move each ring pairs to port representor netdevs. Each port representor * will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to * number of VFs. */ -static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf) +static void ice_eswitch_remap_rings_to_vectors(struct ice_eswitch *eswitch) { - struct ice_vsi *vsi = pf->switchdev.control_vsi; + struct ice_vsi *vsi = eswitch->control_vsi; + unsigned long repr_id = 0; int q_id; ice_for_each_txq(vsi, q_id) { @@ -152,13 +173,14 @@ static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf) struct ice_tx_ring *tx_ring; struct ice_rx_ring *rx_ring; struct ice_repr *repr; - struct ice_vf *vf; - vf = ice_get_vf_by_id(pf, q_id); - if (WARN_ON(!vf)) - continue; + repr = xa_find(&eswitch->reprs, &repr_id, U32_MAX, + XA_PRESENT); + if (!repr) + break; - repr = vf->repr; + repr_id += 1; + repr->q_id = q_id; q_vector = repr->q_vector; tx_ring = vsi->tx_rings[q_id]; rx_ring = vsi->rx_rings[q_id]; @@ -181,136 +203,96 @@ static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf) rx_ring->q_vector = q_vector; rx_ring->next = NULL; rx_ring->netdev = repr->netdev; - - ice_put_vf(vf); } } /** - * ice_eswitch_release_reprs - clear PR VSIs configuration + * ice_eswitch_release_repr - clear PR VSI configuration * @pf: poiner to PF struct - * @ctrl_vsi: pointer to switchdev control VSI + * @repr: pointer to PR */ static void -ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi) +ice_eswitch_release_repr(struct ice_pf *pf, struct ice_repr *repr) { - struct ice_vf *vf; - unsigned int bkt; + struct ice_vsi *vsi = repr->src_vsi; - lockdep_assert_held(&pf->vfs.table_lock); - - ice_for_each_vf(pf, bkt, vf) { - struct ice_vsi *vsi = vf->repr->src_vsi; - - /* Skip VFs that aren't configured */ - if (!vf->repr->dst) - continue; + /* Skip representors that aren't configured */ + if (!repr->dst) + return; - ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); - metadata_dst_free(vf->repr->dst); - vf->repr->dst = NULL; - ice_eswitch_del_vf_sp_rule(vf); - ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, - ICE_FWD_TO_VSI); + ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); + metadata_dst_free(repr->dst); + repr->dst = NULL; + ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac, + ICE_FWD_TO_VSI); - netif_napi_del(&vf->repr->q_vector->napi); - } + netif_napi_del(&repr->q_vector->napi); } /** - * ice_eswitch_setup_reprs - configure port reprs to run in switchdev mode + * ice_eswitch_setup_repr - configure PR to run in switchdev mode * @pf: pointer to PF struct + * @repr: pointer to PR struct */ -static int ice_eswitch_setup_reprs(struct ice_pf *pf) +static int ice_eswitch_setup_repr(struct ice_pf *pf, struct ice_repr *repr) { - struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; - int max_vsi_num = 0; - struct ice_vf *vf; - unsigned int bkt; - - lockdep_assert_held(&pf->vfs.table_lock); - - ice_for_each_vf(pf, bkt, vf) { - struct ice_vsi *vsi = vf->repr->src_vsi; - - ice_remove_vsi_fltr(&pf->hw, vsi->idx); - vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, - GFP_KERNEL); - if (!vf->repr->dst) { - ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, - ICE_FWD_TO_VSI); - goto err; - } + struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi; + struct ice_vsi *vsi = repr->src_vsi; + struct metadata_dst *dst; - if (ice_eswitch_add_vf_sp_rule(pf, vf)) { - ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, - ICE_FWD_TO_VSI); - goto err; - } + ice_remove_vsi_fltr(&pf->hw, vsi->idx); + repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, + GFP_KERNEL); + if (!repr->dst) + goto err_add_mac_fltr; - if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) { - ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, - ICE_FWD_TO_VSI); - ice_eswitch_del_vf_sp_rule(vf); - metadata_dst_free(vf->repr->dst); - vf->repr->dst = NULL; - goto err; - } + if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) + goto err_dst_free; - if (ice_vsi_add_vlan_zero(vsi)) { - ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, - ICE_FWD_TO_VSI); - ice_eswitch_del_vf_sp_rule(vf); - metadata_dst_free(vf->repr->dst); - vf->repr->dst = NULL; - ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); - goto err; - } - - if (max_vsi_num < vsi->vsi_num) - max_vsi_num = vsi->vsi_num; + if (ice_vsi_add_vlan_zero(vsi)) + goto err_update_security; - netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi, - ice_napi_poll); + netif_napi_add(repr->netdev, &repr->q_vector->napi, + ice_napi_poll); - netif_keep_dst(vf->repr->netdev); - } + netif_keep_dst(repr->netdev); - ice_for_each_vf(pf, bkt, vf) { - struct ice_repr *repr = vf->repr; - struct ice_vsi *vsi = repr->src_vsi; - struct metadata_dst *dst; - - dst = repr->dst; - dst->u.port_info.port_id = vsi->vsi_num; - dst->u.port_info.lower_dev = repr->netdev; - ice_repr_set_traffic_vsi(repr, ctrl_vsi); - } + dst = repr->dst; + dst->u.port_info.port_id = vsi->vsi_num; + dst->u.port_info.lower_dev = repr->netdev; + ice_repr_set_traffic_vsi(repr, ctrl_vsi); return 0; -err: - ice_eswitch_release_reprs(pf, ctrl_vsi); +err_update_security: + ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); +err_dst_free: + metadata_dst_free(repr->dst); + repr->dst = NULL; +err_add_mac_fltr: + ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac, ICE_FWD_TO_VSI); return -ENODEV; } /** - * ice_eswitch_update_repr - reconfigure VF port representor - * @vsi: VF VSI for which port representor is configured + * ice_eswitch_update_repr - reconfigure port representor + * @repr_id: representor ID + * @vsi: VSI for which port representor is configured */ -void ice_eswitch_update_repr(struct ice_vsi *vsi) +void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; struct ice_repr *repr; - struct ice_vf *vf; int ret; if (!ice_is_switchdev_running(pf)) return; - vf = vsi->vf; - repr = vf->repr; + repr = xa_load(&pf->eswitch.reprs, repr_id); + if (!repr) + return; + repr->src_vsi = vsi; repr->dst->u.port_info.port_id = vsi->vsi_num; @@ -319,9 +301,10 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi) ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof); if (ret) { - ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, ICE_FWD_TO_VSI); - dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor", - vsi->vf->vf_id); + ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac, + ICE_FWD_TO_VSI); + dev_err(ice_pf_to_dev(pf), "Failed to update VSI of port representor %d", + repr->id); } } @@ -353,13 +336,13 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev) skb_dst_drop(skb); dst_hold((struct dst_entry *)repr->dst); skb_dst_set(skb, (struct dst_entry *)repr->dst); - skb->queue_mapping = repr->vf->vf_id; + skb->queue_mapping = repr->q_id; return ice_start_xmit(skb, netdev); } /** - * ice_eswitch_set_target_vsi - set switchdev context in Tx context descriptor + * ice_eswitch_set_target_vsi - set eswitch context in Tx context descriptor * @skb: pointer to send buffer * @off: pointer to offload struct */ @@ -382,7 +365,7 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb, } /** - * ice_eswitch_release_env - clear switchdev HW filters + * ice_eswitch_release_env - clear eswitch HW filters * @pf: pointer to PF struct * * This function removes HW filters configuration specific for switchdev @@ -390,8 +373,8 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb, */ static void ice_eswitch_release_env(struct ice_pf *pf) { - struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi; - struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; + struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi; + struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi; struct ice_vsi_vlan_ops *vlan_ops; vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi); @@ -407,7 +390,7 @@ static void ice_eswitch_release_env(struct ice_pf *pf) } /** - * ice_eswitch_vsi_setup - configure switchdev control VSI + * ice_eswitch_vsi_setup - configure eswitch control VSI * @pf: pointer to PF structure * @pi: pointer to port_info structure */ @@ -424,48 +407,29 @@ ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) } /** - * ice_eswitch_napi_del - remove NAPI handle for all port representors - * @pf: pointer to PF structure - */ -static void ice_eswitch_napi_del(struct ice_pf *pf) -{ - struct ice_vf *vf; - unsigned int bkt; - - lockdep_assert_held(&pf->vfs.table_lock); - - ice_for_each_vf(pf, bkt, vf) - netif_napi_del(&vf->repr->q_vector->napi); -} - -/** * ice_eswitch_napi_enable - enable NAPI for all port representors - * @pf: pointer to PF structure + * @reprs: xarray of reprs */ -static void ice_eswitch_napi_enable(struct ice_pf *pf) +static void ice_eswitch_napi_enable(struct xarray *reprs) { - struct ice_vf *vf; - unsigned int bkt; - - lockdep_assert_held(&pf->vfs.table_lock); + struct ice_repr *repr; + unsigned long id; - ice_for_each_vf(pf, bkt, vf) - napi_enable(&vf->repr->q_vector->napi); + xa_for_each(reprs, id, repr) + napi_enable(&repr->q_vector->napi); } /** * ice_eswitch_napi_disable - disable NAPI for all port representors - * @pf: pointer to PF structure + * @reprs: xarray of reprs */ -static void ice_eswitch_napi_disable(struct ice_pf *pf) +static void ice_eswitch_napi_disable(struct xarray *reprs) { - struct ice_vf *vf; - unsigned int bkt; - - lockdep_assert_held(&pf->vfs.table_lock); + struct ice_repr *repr; + unsigned long id; - ice_for_each_vf(pf, bkt, vf) - napi_disable(&vf->repr->q_vector->napi); + xa_for_each(reprs, id, repr) + napi_disable(&repr->q_vector->napi); } /** @@ -486,39 +450,26 @@ static int ice_eswitch_enable_switchdev(struct ice_pf *pf) return -EINVAL; } - pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info); - if (!pf->switchdev.control_vsi) + pf->eswitch.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info); + if (!pf->eswitch.control_vsi) return -ENODEV; - ctrl_vsi = pf->switchdev.control_vsi; - pf->switchdev.uplink_vsi = uplink_vsi; + ctrl_vsi = pf->eswitch.control_vsi; + /* cp VSI is createad with 1 queue as default */ + pf->eswitch.qs.value = 1; + pf->eswitch.uplink_vsi = uplink_vsi; if (ice_eswitch_setup_env(pf)) goto err_vsi; - if (ice_repr_add_for_all_vfs(pf)) - goto err_repr_add; - - if (ice_eswitch_setup_reprs(pf)) - goto err_setup_reprs; - - ice_eswitch_remap_rings_to_vectors(pf); - - if (ice_vsi_open(ctrl_vsi)) - goto err_setup_reprs; - if (ice_eswitch_br_offloads_init(pf)) goto err_br_offloads; - ice_eswitch_napi_enable(pf); + pf->eswitch.is_running = true; return 0; err_br_offloads: - ice_vsi_close(ctrl_vsi); -err_setup_reprs: - ice_repr_rem_from_all_vfs(pf); -err_repr_add: ice_eswitch_release_env(pf); err_vsi: ice_vsi_release(ctrl_vsi); @@ -526,19 +477,19 @@ err_vsi: } /** - * ice_eswitch_disable_switchdev - disable switchdev resources + * ice_eswitch_disable_switchdev - disable eswitch resources * @pf: pointer to PF structure */ static void ice_eswitch_disable_switchdev(struct ice_pf *pf) { - struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; + struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi; - ice_eswitch_napi_disable(pf); ice_eswitch_br_offloads_deinit(pf); ice_eswitch_release_env(pf); - ice_eswitch_release_reprs(pf, ctrl_vsi); ice_vsi_release(ctrl_vsi); - ice_repr_rem_from_all_vfs(pf); + + pf->eswitch.is_running = false; + pf->eswitch.qs.is_reaching = false; } /** @@ -566,6 +517,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode, case DEVLINK_ESWITCH_MODE_LEGACY: dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to legacy", pf->hw.pf_id); + xa_destroy(&pf->eswitch.reprs); NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to legacy"); break; case DEVLINK_ESWITCH_MODE_SWITCHDEV: @@ -578,6 +530,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode, dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev", pf->hw.pf_id); + xa_init_flags(&pf->eswitch.reprs, XA_FLAGS_ALLOC); NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev"); break; } @@ -616,74 +569,168 @@ bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf) } /** - * ice_eswitch_release - cleanup eswitch + * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors * @pf: pointer to PF structure */ -void ice_eswitch_release(struct ice_pf *pf) +static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf) { - if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY) + struct ice_repr *repr; + unsigned long id; + + if (test_bit(ICE_DOWN, pf->state)) return; - ice_eswitch_disable_switchdev(pf); - pf->switchdev.is_running = false; + xa_for_each(&pf->eswitch.reprs, id, repr) + ice_repr_start_tx_queues(repr); } /** - * ice_eswitch_configure - configure eswitch + * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors * @pf: pointer to PF structure */ -int ice_eswitch_configure(struct ice_pf *pf) +void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) { - int status; - - if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY || pf->switchdev.is_running) - return 0; + struct ice_repr *repr; + unsigned long id; - status = ice_eswitch_enable_switchdev(pf); - if (status) - return status; + if (test_bit(ICE_DOWN, pf->state)) + return; - pf->switchdev.is_running = true; - return 0; + xa_for_each(&pf->eswitch.reprs, id, repr) + ice_repr_stop_tx_queues(repr); } -/** - * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors - * @pf: pointer to PF structure - */ -static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf) +static void ice_eswitch_stop_reprs(struct ice_pf *pf) { - struct ice_vf *vf; - unsigned int bkt; + ice_eswitch_del_sp_rules(pf); + ice_eswitch_stop_all_tx_queues(pf); + ice_eswitch_napi_disable(&pf->eswitch.reprs); +} - lockdep_assert_held(&pf->vfs.table_lock); +static void ice_eswitch_start_reprs(struct ice_pf *pf) +{ + ice_eswitch_napi_enable(&pf->eswitch.reprs); + ice_eswitch_start_all_tx_queues(pf); + ice_eswitch_add_sp_rules(pf); +} - if (test_bit(ICE_DOWN, pf->state)) - return; +static void +ice_eswitch_cp_change_queues(struct ice_eswitch *eswitch, int change) +{ + struct ice_vsi *cp = eswitch->control_vsi; + int queues = 0; + + if (eswitch->qs.is_reaching) { + if (eswitch->qs.to_reach >= eswitch->qs.value + change) { + queues = eswitch->qs.to_reach; + eswitch->qs.is_reaching = false; + } else { + queues = 0; + } + } else if ((change > 0 && cp->alloc_txq <= eswitch->qs.value) || + change < 0) { + queues = cp->alloc_txq + change; + } - ice_for_each_vf(pf, bkt, vf) { - if (vf->repr) - ice_repr_start_tx_queues(vf->repr); + if (queues) { + cp->req_txq = queues; + cp->req_rxq = queues; + ice_vsi_close(cp); + ice_vsi_rebuild(cp, ICE_VSI_FLAG_NO_INIT); + ice_vsi_open(cp); + } else if (!change) { + /* change == 0 means that VSI wasn't open, open it here */ + ice_vsi_open(cp); } + + eswitch->qs.value += change; + ice_eswitch_remap_rings_to_vectors(eswitch); } -/** - * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors - * @pf: pointer to PF structure - */ -void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) +int +ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf) { - struct ice_vf *vf; - unsigned int bkt; + struct ice_repr *repr; + int change = 1; + int err; - lockdep_assert_held(&pf->vfs.table_lock); + if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY) + return 0; - if (test_bit(ICE_DOWN, pf->state)) + if (xa_empty(&pf->eswitch.reprs)) { + err = ice_eswitch_enable_switchdev(pf); + if (err) + return err; + /* Control plane VSI is created with 1 queue as default */ + pf->eswitch.qs.to_reach -= 1; + change = 0; + } + + ice_eswitch_stop_reprs(pf); + + repr = ice_repr_add_vf(vf); + if (IS_ERR(repr)) { + err = PTR_ERR(repr); + goto err_create_repr; + } + + err = ice_eswitch_setup_repr(pf, repr); + if (err) + goto err_setup_repr; + + err = xa_alloc(&pf->eswitch.reprs, &repr->id, repr, + XA_LIMIT(1, INT_MAX), GFP_KERNEL); + if (err) + goto err_xa_alloc; + + vf->repr_id = repr->id; + + ice_eswitch_cp_change_queues(&pf->eswitch, change); + ice_eswitch_start_reprs(pf); + + return 0; + +err_xa_alloc: + ice_eswitch_release_repr(pf, repr); +err_setup_repr: + ice_repr_rem_vf(repr); +err_create_repr: + if (xa_empty(&pf->eswitch.reprs)) + ice_eswitch_disable_switchdev(pf); + ice_eswitch_start_reprs(pf); + + return err; +} + +void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) +{ + struct ice_repr *repr = xa_load(&pf->eswitch.reprs, vf->repr_id); + struct devlink *devlink = priv_to_devlink(pf); + + if (!repr) return; - ice_for_each_vf(pf, bkt, vf) { - if (vf->repr) - ice_repr_stop_tx_queues(vf->repr); + ice_eswitch_stop_reprs(pf); + xa_erase(&pf->eswitch.reprs, repr->id); + + if (xa_empty(&pf->eswitch.reprs)) + ice_eswitch_disable_switchdev(pf); + else + ice_eswitch_cp_change_queues(&pf->eswitch, -1); + + ice_eswitch_release_repr(pf, repr); + ice_repr_rem_vf(repr); + + if (xa_empty(&pf->eswitch.reprs)) { + /* since all port representors are destroyed, there is + * no point in keeping the nodes + */ + ice_devlink_rate_clear_tx_topology(ice_get_main_vsi(pf)); + devl_lock(devlink); + devl_rate_nodes_destroy(devlink); + devl_unlock(devlink); + } else { + ice_eswitch_start_reprs(pf); } } @@ -693,30 +740,35 @@ void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) */ int ice_eswitch_rebuild(struct ice_pf *pf) { - struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; - int status; - - ice_eswitch_napi_disable(pf); - ice_eswitch_napi_del(pf); - - status = ice_eswitch_setup_env(pf); - if (status) - return status; + struct ice_repr *repr; + unsigned long id; + int err; - status = ice_eswitch_setup_reprs(pf); - if (status) - return status; + if (!ice_is_switchdev_running(pf)) + return 0; - ice_eswitch_remap_rings_to_vectors(pf); + err = ice_vsi_rebuild(pf->eswitch.control_vsi, ICE_VSI_FLAG_INIT); + if (err) + return err; - ice_replay_tc_fltrs(pf); + xa_for_each(&pf->eswitch.reprs, id, repr) + ice_eswitch_detach(pf, repr->vf); - status = ice_vsi_open(ctrl_vsi); - if (status) - return status; + return 0; +} - ice_eswitch_napi_enable(pf); - ice_eswitch_start_all_tx_queues(pf); +/** + * ice_eswitch_reserve_cp_queues - reserve control plane VSI queues + * @pf: pointer to PF structure + * @change: how many more (or less) queues is needed + * + * Remember to call ice_eswitch_attach/detach() the "change" times. + */ +void ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change) +{ + if (pf->eswitch.qs.value + change < 0) + return; - return 0; + pf->eswitch.qs.to_reach = pf->eswitch.qs.value + change; + pf->eswitch.qs.is_reaching = true; } diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h index b18bf83a2f5b..1a288a03a79a 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.h +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h @@ -7,8 +7,9 @@ #include <net/devlink.h> #ifdef CONFIG_ICE_SWITCHDEV -void ice_eswitch_release(struct ice_pf *pf); -int ice_eswitch_configure(struct ice_pf *pf); +void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf); +int +ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf); int ice_eswitch_rebuild(struct ice_pf *pf); int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode); @@ -17,7 +18,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode, struct netlink_ext_ack *extack); bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf); -void ice_eswitch_update_repr(struct ice_vsi *vsi); +void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi); void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf); @@ -25,8 +26,15 @@ void ice_eswitch_set_target_vsi(struct sk_buff *skb, struct ice_tx_offload_params *off); netdev_tx_t ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev); +void ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change); #else /* CONFIG_ICE_SWITCHDEV */ -static inline void ice_eswitch_release(struct ice_pf *pf) { } +static inline void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) { } + +static inline int +ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf) +{ + return -EOPNOTSUPP; +} static inline void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) { } @@ -34,7 +42,8 @@ static inline void ice_eswitch_set_target_vsi(struct sk_buff *skb, struct ice_tx_offload_params *off) { } -static inline void ice_eswitch_update_repr(struct ice_vsi *vsi) { } +static inline void +ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi) { } static inline int ice_eswitch_configure(struct ice_pf *pf) { @@ -68,5 +77,8 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev) { return NETDEV_TX_BUSY; } + +static inline void +ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change) { } #endif /* CONFIG_ICE_SWITCHDEV */ #endif /* _ICE_ESWITCH_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c index 6ae0269bdf73..ac5beecd028b 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c @@ -893,10 +893,14 @@ ice_eswitch_br_port_deinit(struct ice_esw_br *bridge, ice_eswitch_br_fdb_entry_delete(bridge, fdb_entry); } - if (br_port->type == ICE_ESWITCH_BR_UPLINK_PORT && vsi->back) + if (br_port->type == ICE_ESWITCH_BR_UPLINK_PORT && vsi->back) { vsi->back->br_port = NULL; - else if (vsi->vf && vsi->vf->repr) - vsi->vf->repr->br_port = NULL; + } else { + struct ice_repr *repr = ice_repr_get_by_vsi(vsi); + + if (repr) + repr->br_port = NULL; + } xa_erase(&bridge->ports, br_port->vsi_idx); ice_eswitch_br_port_vlans_flush(br_port); @@ -947,7 +951,7 @@ ice_eswitch_br_vf_repr_port_init(struct ice_esw_br *bridge, static int ice_eswitch_br_uplink_port_init(struct ice_esw_br *bridge, struct ice_pf *pf) { - struct ice_vsi *vsi = pf->switchdev.uplink_vsi; + struct ice_vsi *vsi = pf->eswitch.uplink_vsi; struct ice_esw_br_port *br_port; int err; @@ -1185,7 +1189,7 @@ ice_eswitch_br_port_event(struct notifier_block *nb, static void ice_eswitch_br_offloads_dealloc(struct ice_pf *pf) { - struct ice_esw_br_offloads *br_offloads = pf->switchdev.br_offloads; + struct ice_esw_br_offloads *br_offloads = pf->eswitch.br_offloads; ASSERT_RTNL(); @@ -1194,7 +1198,7 @@ ice_eswitch_br_offloads_dealloc(struct ice_pf *pf) ice_eswitch_br_deinit(br_offloads, br_offloads->bridge); - pf->switchdev.br_offloads = NULL; + pf->eswitch.br_offloads = NULL; kfree(br_offloads); } @@ -1205,14 +1209,14 @@ ice_eswitch_br_offloads_alloc(struct ice_pf *pf) ASSERT_RTNL(); - if (pf->switchdev.br_offloads) + if (pf->eswitch.br_offloads) return ERR_PTR(-EEXIST); br_offloads = kzalloc(sizeof(*br_offloads), GFP_KERNEL); if (!br_offloads) return ERR_PTR(-ENOMEM); - pf->switchdev.br_offloads = br_offloads; + pf->eswitch.br_offloads = br_offloads; br_offloads->pf = pf; return br_offloads; @@ -1223,7 +1227,7 @@ ice_eswitch_br_offloads_deinit(struct ice_pf *pf) { struct ice_esw_br_offloads *br_offloads; - br_offloads = pf->switchdev.br_offloads; + br_offloads = pf->eswitch.br_offloads; if (!br_offloads) return; diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index bde9bc74f928..2244d41fd933 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -1142,8 +1142,7 @@ __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data, switch (stringset) { case ETH_SS_STATS: for (i = 0; i < ICE_VSI_STATS_LEN; i++) - ethtool_sprintf(&p, "%s", - ice_gstrings_vsi_stats[i].stat_string); + ethtool_puts(&p, ice_gstrings_vsi_stats[i].stat_string); if (ice_is_port_repr_netdev(netdev)) return; @@ -1162,8 +1161,7 @@ __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data, return; for (i = 0; i < ICE_PF_STATS_LEN; i++) - ethtool_sprintf(&p, "%s", - ice_gstrings_pf_stats[i].stat_string); + ethtool_puts(&p, ice_gstrings_pf_stats[i].stat_string); for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { ethtool_sprintf(&p, "tx_priority_%u_xon.nic", i); @@ -1179,8 +1177,7 @@ __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data, break; case ETH_SS_PRIV_FLAGS: for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) - ethtool_sprintf(&p, "%s", - ice_gstrings_priv_flags[i].name); + ethtool_puts(&p, ice_gstrings_priv_flags[i].name); break; default: break; @@ -2505,27 +2502,15 @@ static u32 ice_parse_hdrs(struct ethtool_rxnfc *nfc) return hdrs; } -#define ICE_FLOW_HASH_FLD_IPV4_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) -#define ICE_FLOW_HASH_FLD_IPV6_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) -#define ICE_FLOW_HASH_FLD_IPV4_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) -#define ICE_FLOW_HASH_FLD_IPV6_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) -#define ICE_FLOW_HASH_FLD_TCP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) -#define ICE_FLOW_HASH_FLD_TCP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT) -#define ICE_FLOW_HASH_FLD_UDP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) -#define ICE_FLOW_HASH_FLD_UDP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT) -#define ICE_FLOW_HASH_FLD_SCTP_SRC_PORT \ - BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) -#define ICE_FLOW_HASH_FLD_SCTP_DST_PORT \ - BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT) - /** * ice_parse_hash_flds - parses hash fields from RSS hash input * @nfc: ethtool rxnfc command + * @symm: true if Symmetric Topelitz is set * * This function parses the rxnfc command and returns intended * hash fields for RSS configuration */ -static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc) +static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc, bool symm) { u64 hfld = ICE_HASH_INVALID; @@ -2594,9 +2579,11 @@ static int ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc) { struct ice_pf *pf = vsi->back; + struct ice_rss_hash_cfg cfg; struct device *dev; u64 hashed_flds; int status; + bool symm; u32 hdrs; dev = ice_pf_to_dev(pf); @@ -2606,7 +2593,8 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc) return -EINVAL; } - hashed_flds = ice_parse_hash_flds(nfc); + symm = !!(vsi->rss_hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ); + hashed_flds = ice_parse_hash_flds(nfc, symm); if (hashed_flds == ICE_HASH_INVALID) { dev_dbg(dev, "Invalid hash fields, vsi num = %d\n", vsi->vsi_num); @@ -2620,7 +2608,12 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc) return -EINVAL; } - status = ice_add_rss_cfg(&pf->hw, vsi->idx, hashed_flds, hdrs); + cfg.hash_flds = hashed_flds; + cfg.addl_hdrs = hdrs; + cfg.hdr_type = ICE_RSS_ANY_HEADERS; + cfg.symm = symm; + + status = ice_add_rss_cfg(&pf->hw, vsi, &cfg); if (status) { dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %d\n", vsi->vsi_num, status); @@ -2641,6 +2634,7 @@ ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc) struct ice_pf *pf = vsi->back; struct device *dev; u64 hash_flds; + bool symm; u32 hdrs; dev = ice_pf_to_dev(pf); @@ -2659,7 +2653,7 @@ ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc) return; } - hash_flds = ice_get_rss_cfg(&pf->hw, vsi->idx, hdrs); + hash_flds = ice_get_rss_cfg(&pf->hw, vsi->idx, hdrs, &symm); if (hash_flds == ICE_HASH_INVALID) { dev_dbg(dev, "No hash fields found for the given header type, vsi num = %d\n", vsi->vsi_num); @@ -3198,11 +3192,18 @@ static u32 ice_get_rxfh_indir_size(struct net_device *netdev) return np->vsi->rss_table_size; } +/** + * ice_get_rxfh - get the Rx flow hash indirection table + * @netdev: network interface device structure + * @rxfh: pointer to param struct (indir, key, hfunc) + * + * Reads the indirection table directly from the hardware. + */ static int -ice_get_rxfh_context(struct net_device *netdev, u32 *indir, - u8 *key, u8 *hfunc, u32 rss_context) +ice_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh) { struct ice_netdev_priv *np = netdev_priv(netdev); + u32 rss_context = rxfh->rss_context; struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; u16 qcount, offset; @@ -3233,17 +3234,18 @@ ice_get_rxfh_context(struct net_device *netdev, u32 *indir, vsi = vsi->tc_map_vsi[rss_context]; } - if (hfunc) - *hfunc = ETH_RSS_HASH_TOP; + rxfh->hfunc = ETH_RSS_HASH_TOP; + if (vsi->rss_hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ) + rxfh->input_xfrm |= RXH_XFRM_SYM_XOR; - if (!indir) + if (!rxfh->indir) return 0; lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); if (!lut) return -ENOMEM; - err = ice_get_rss_key(vsi, key); + err = ice_get_rss_key(vsi, rxfh->key); if (err) goto out; @@ -3253,12 +3255,12 @@ ice_get_rxfh_context(struct net_device *netdev, u32 *indir, if (ice_is_adq_active(pf)) { for (i = 0; i < vsi->rss_table_size; i++) - indir[i] = offset + lut[i] % qcount; + rxfh->indir[i] = offset + lut[i] % qcount; goto out; } for (i = 0; i < vsi->rss_table_size; i++) - indir[i] = lut[i]; + rxfh->indir[i] = lut[i]; out: kfree(lut); @@ -3266,42 +3268,31 @@ out: } /** - * ice_get_rxfh - get the Rx flow hash indirection table - * @netdev: network interface device structure - * @indir: indirection table - * @key: hash key - * @hfunc: hash function - * - * Reads the indirection table directly from the hardware. - */ -static int -ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) -{ - return ice_get_rxfh_context(netdev, indir, key, hfunc, 0); -} - -/** * ice_set_rxfh - set the Rx flow hash indirection table * @netdev: network interface device structure - * @indir: indirection table - * @key: hash key - * @hfunc: hash function + * @rxfh: pointer to param struct (indir, key, hfunc) + * @extack: extended ACK from the Netlink message * * Returns -EINVAL if the table specifies an invalid queue ID, otherwise * returns 0 after programming the table. */ static int -ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, - const u8 hfunc) +ice_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) { struct ice_netdev_priv *np = netdev_priv(netdev); + u8 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ; struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; struct device *dev; int err; dev = ice_pf_to_dev(pf); - if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && + rxfh->hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + if (rxfh->rss_context) return -EOPNOTSUPP; if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { @@ -3315,7 +3306,15 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, return -EOPNOTSUPP; } - if (key) { + /* Update the VSI's hash function */ + if (rxfh->input_xfrm & RXH_XFRM_SYM_XOR) + hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ; + + err = ice_set_rss_hfunc(vsi, hfunc); + if (err) + return err; + + if (rxfh->key) { if (!vsi->rss_hkey_user) { vsi->rss_hkey_user = devm_kzalloc(dev, ICE_VSIQF_HKEY_ARRAY_SIZE, @@ -3323,7 +3322,8 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, if (!vsi->rss_hkey_user) return -ENOMEM; } - memcpy(vsi->rss_hkey_user, key, ICE_VSIQF_HKEY_ARRAY_SIZE); + memcpy(vsi->rss_hkey_user, rxfh->key, + ICE_VSIQF_HKEY_ARRAY_SIZE); err = ice_set_rss_key(vsi, vsi->rss_hkey_user); if (err) @@ -3338,11 +3338,11 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, } /* Each 32 bits pointed by 'indir' is stored with a lut entry */ - if (indir) { + if (rxfh->indir) { int i; for (i = 0; i < vsi->rss_table_size; i++) - vsi->rss_lut_user[i] = (u8)(indir[i]); + vsi->rss_lut_user[i] = (u8)(rxfh->indir[i]); } else { ice_fill_rss_lut(vsi->rss_lut_user, vsi->rss_table_size, vsi->rss_size); @@ -4220,9 +4220,11 @@ ice_get_module_eeprom(struct net_device *netdev, } static const struct ethtool_ops ice_ethtool_ops = { + .cap_rss_ctx_supported = true, .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE | ETHTOOL_COALESCE_RX_USECS_HIGH, + .cap_rss_sym_xor_supported = true, .get_link_ksettings = ice_get_link_ksettings, .set_link_ksettings = ice_set_link_ksettings, .get_drvinfo = ice_get_drvinfo, @@ -4253,7 +4255,6 @@ static const struct ethtool_ops ice_ethtool_ops = { .set_pauseparam = ice_set_pauseparam, .get_rxfh_key_size = ice_get_rxfh_key_size, .get_rxfh_indir_size = ice_get_rxfh_indir_size, - .get_rxfh_context = ice_get_rxfh_context, .get_rxfh = ice_get_rxfh, .set_rxfh = ice_set_rxfh, .get_channels = ice_get_channels, diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c index d151e5bacfec..54e4536219aa 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c @@ -302,9 +302,7 @@ void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx) continue; for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { - u64 prof_id; - - prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; + u64 prof_id = prof->prof_id[tun]; for (i = 0; i < prof->cnt; i++) { if (prof->vsi_h[i] != vsi_idx) @@ -362,10 +360,9 @@ ice_fdir_erase_flow_from_hw(struct ice_hw *hw, enum ice_block blk, int flow) return; for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { - u64 prof_id; + u64 prof_id = prof->prof_id[tun]; int j; - prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; for (j = 0; j < prof->cnt; j++) { u16 vsi_num; @@ -439,14 +436,12 @@ void ice_fdir_replay_flows(struct ice_hw *hw) for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { struct ice_flow_prof *hw_prof; struct ice_fd_hw_prof *prof; - u64 prof_id; int j; prof = hw->fdir_prof[flow]; - prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; - ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, + ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof->fdir_seg[tun], TNL_SEG_CNT(tun), - &hw_prof); + false, &hw_prof); for (j = 0; j < prof->cnt; j++) { enum ice_flow_priority prio; u64 entry_h = 0; @@ -454,7 +449,7 @@ void ice_fdir_replay_flows(struct ice_hw *hw) prio = ICE_FLOW_PRIO_NORMAL; err = ice_flow_add_entry(hw, ICE_BLK_FD, - prof_id, + hw_prof->id, prof->vsi_h[0], prof->vsi_h[j], prio, prof->fdir_seg, @@ -464,6 +459,7 @@ void ice_fdir_replay_flows(struct ice_hw *hw) flow); continue; } + prof->prof_id[tun] = hw_prof->id; prof->entry_h[j][tun] = entry_h; } } @@ -638,7 +634,6 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg, u64 entry1_h = 0; u64 entry2_h = 0; bool del_last; - u64 prof_id; int err; int idx; @@ -668,7 +663,7 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg, * then return error. */ if (hw->fdir_fltr_cnt[flow]) { - dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n"); + dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n"); return -EINVAL; } @@ -686,23 +681,23 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg, * That is the final parameters are 1 header (segment), no * actions (NULL) and zero actions 0. */ - prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; - err = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, - TNL_SEG_CNT(tun), &prof); + err = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, seg, + TNL_SEG_CNT(tun), false, &prof); if (err) return err; - err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx, + err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, main_vsi->idx, main_vsi->idx, ICE_FLOW_PRIO_NORMAL, seg, &entry1_h); if (err) goto err_prof; - err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx, + err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, main_vsi->idx, ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, seg, &entry2_h); if (err) goto err_entry; hw_prof->fdir_seg[tun] = seg; + hw_prof->prof_id[tun] = prof->id; hw_prof->entry_h[0][tun] = entry1_h; hw_prof->entry_h[1][tun] = entry2_h; hw_prof->vsi_h[0] = main_vsi->idx; @@ -719,7 +714,7 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg, entry1_h = 0; vsi_h = main_vsi->tc_map_vsi[idx]->idx; - err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, + err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, main_vsi->idx, vsi_h, ICE_FLOW_PRIO_NORMAL, seg, &entry1_h); @@ -756,7 +751,7 @@ err_unroll: if (!hw_prof->entry_h[idx][tun]) continue; - ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id); + ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof->id); ice_flow_rem_entry(hw, ICE_BLK_FD, hw_prof->entry_h[idx][tun]); hw_prof->entry_h[idx][tun] = 0; if (del_last) @@ -766,11 +761,11 @@ err_unroll: hw_prof->cnt = 0; err_entry: ice_rem_prof_id_flow(hw, ICE_BLK_FD, - ice_get_hw_vsi_num(hw, main_vsi->idx), prof_id); + ice_get_hw_vsi_num(hw, main_vsi->idx), prof->id); ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h); err_prof: - ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id); - dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n"); + ice_flow_rem_prof(hw, ICE_BLK_FD, prof->id); + dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n"); return err; } @@ -1853,6 +1848,7 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd) struct ice_pf *pf; struct ice_hw *hw; int fltrs_needed; + u32 max_location; u16 tunnel_port; int ret; @@ -1884,8 +1880,10 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd) if (ret) return ret; - if (fsp->location >= ice_get_fdir_cnt_all(hw)) { - dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n"); + max_location = ice_get_fdir_cnt_all(hw); + if (fsp->location >= max_location) { + dev_err(dev, "Failed to add filter. The number of ntuple filters or provided location exceed max %d.\n", + max_location); return -ENOSPC; } @@ -1893,7 +1891,7 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd) fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port, TNL_ALL) ? 2 : 1; if (!ice_fdir_find_fltr_by_idx(hw, fsp->location) && ice_fdir_num_avail_fltr(hw, pf->vsi[vsi->idx]) < fltrs_needed) { - dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n"); + dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n"); return -ENOSPC; } diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index 5ce413965930..85c57ec315c4 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -1218,11 +1218,13 @@ ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks) * @blk: HW block * @fv: field vector to search for * @masks: masks for FV + * @symm: symmetric setting for RSS flows * @prof_id: receives the profile ID */ static int ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk, - struct ice_fv_word *fv, u16 *masks, u8 *prof_id) + struct ice_fv_word *fv, u16 *masks, bool symm, + u8 *prof_id) { struct ice_es *es = &hw->blk[blk].es; u8 i; @@ -1236,6 +1238,9 @@ ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk, for (i = 0; i < (u8)es->count; i++) { u16 off = i * es->fvw; + if (blk == ICE_BLK_RSS && es->symm[i] != symm) + continue; + if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv))) continue; @@ -1716,15 +1721,16 @@ ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id, } /** - * ice_write_es - write an extraction sequence to hardware + * ice_write_es - write an extraction sequence and symmetric setting to hardware * @hw: pointer to the HW struct * @blk: the block in which to write the extraction sequence * @prof_id: the profile ID to write * @fv: pointer to the extraction sequence to write - NULL to clear extraction + * @symm: symmetric setting for RSS profiles */ static void ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id, - struct ice_fv_word *fv) + struct ice_fv_word *fv, bool symm) { u16 off; @@ -1737,6 +1743,9 @@ ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id, memcpy(&hw->blk[blk].es.t[off], fv, hw->blk[blk].es.fvw * sizeof(*fv)); } + + if (blk == ICE_BLK_RSS) + hw->blk[blk].es.symm[prof_id] = symm; } /** @@ -1753,7 +1762,7 @@ ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) if (hw->blk[blk].es.ref_count[prof_id] > 0) { if (!--hw->blk[blk].es.ref_count[prof_id]) { - ice_write_es(hw, blk, prof_id, NULL); + ice_write_es(hw, blk, prof_id, NULL, false); ice_free_prof_masks(hw, blk, prof_id); return ice_free_prof_id(hw, blk, prof_id); } @@ -2116,8 +2125,10 @@ void ice_free_hw_tbls(struct ice_hw *hw) devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_redir.t); devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t); devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.symm); devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written); devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.mask_ena); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_id.id); } list_for_each_entry_safe(r, rt, &hw->rss_list_head, l_entry) { @@ -2150,6 +2161,7 @@ void ice_clear_hw_tbls(struct ice_hw *hw) for (i = 0; i < ICE_BLK_COUNT; i++) { struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; + struct ice_prof_id *prof_id = &hw->blk[i].prof_id; struct ice_prof_tcam *prof = &hw->blk[i].prof; struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1; struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2; @@ -2178,8 +2190,11 @@ void ice_clear_hw_tbls(struct ice_hw *hw) memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw); memset(es->ref_count, 0, es->count * sizeof(*es->ref_count)); + memset(es->symm, 0, es->count * sizeof(*es->symm)); memset(es->written, 0, es->count * sizeof(*es->written)); memset(es->mask_ena, 0, es->count * sizeof(*es->mask_ena)); + + memset(prof_id->id, 0, prof_id->count * sizeof(*prof_id->id)); } } @@ -2196,6 +2211,7 @@ int ice_init_hw_tbls(struct ice_hw *hw) ice_init_all_prof_masks(hw); for (i = 0; i < ICE_BLK_COUNT; i++) { struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; + struct ice_prof_id *prof_id = &hw->blk[i].prof_id; struct ice_prof_tcam *prof = &hw->blk[i].prof; struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1; struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2; @@ -2292,6 +2308,11 @@ int ice_init_hw_tbls(struct ice_hw *hw) if (!es->ref_count) goto err; + es->symm = devm_kcalloc(ice_hw_to_dev(hw), es->count, + sizeof(*es->symm), GFP_KERNEL); + if (!es->symm) + goto err; + es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count, sizeof(*es->written), GFP_KERNEL); if (!es->written) @@ -2301,6 +2322,12 @@ int ice_init_hw_tbls(struct ice_hw *hw) sizeof(*es->mask_ena), GFP_KERNEL); if (!es->mask_ena) goto err; + + prof_id->count = blk_sizes[i].prof_id; + prof_id->id = devm_kcalloc(ice_hw_to_dev(hw), prof_id->count, + sizeof(*prof_id->id), GFP_KERNEL); + if (!prof_id->id) + goto err; } return 0; @@ -2963,6 +2990,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype, * @attr_cnt: number of elements in attr array * @es: extraction sequence (length of array is determined by the block) * @masks: mask for extraction sequence + * @symm: symmetric setting for RSS profiles * * This function registers a profile, which matches a set of PTYPES with a * particular extraction sequence. While the hardware profile is allocated @@ -2972,7 +3000,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype, int ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], const struct ice_ptype_attributes *attr, u16 attr_cnt, - struct ice_fv_word *es, u16 *masks) + struct ice_fv_word *es, u16 *masks, bool symm) { u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE); DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT); @@ -2986,7 +3014,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], mutex_lock(&hw->blk[blk].es.prof_map_lock); /* search for existing profile */ - status = ice_find_prof_id_with_mask(hw, blk, es, masks, &prof_id); + status = ice_find_prof_id_with_mask(hw, blk, es, masks, symm, &prof_id); if (status) { /* allocate profile ID */ status = ice_alloc_prof_id(hw, blk, &prof_id); @@ -3009,7 +3037,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], goto err_ice_add_prof; /* and write new es */ - ice_write_es(hw, blk, prof_id, es); + ice_write_es(hw, blk, prof_id, es, symm); } ice_prof_inc_ref(hw, blk, prof_id); @@ -3097,7 +3125,7 @@ err_ice_add_prof: * This will search for a profile tracking ID which was previously added. * The profile map lock should be held before calling this function. */ -static struct ice_prof_map * +struct ice_prof_map * ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id) { struct ice_prof_map *entry = NULL; diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h index 7af7c8e9aa4e..b39d7cdc381f 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h @@ -42,7 +42,9 @@ bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype); int ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], const struct ice_ptype_attributes *attr, u16 attr_cnt, - struct ice_fv_word *es, u16 *masks); + struct ice_fv_word *es, u16 *masks, bool symm); +struct ice_prof_map * +ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id); int ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl); int diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h index 4f42e14ed3ae..d427a79d001a 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_type.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h @@ -146,6 +146,7 @@ struct ice_es { u32 *mask_ena; struct list_head prof_map; struct ice_fv_word *t; + u8 *symm; /* symmetric setting per profile (RSS blk)*/ struct mutex prof_map_lock; /* protect access to profiles list */ u8 *written; u8 reverse; /* set to true to reverse FV order */ @@ -304,10 +305,16 @@ struct ice_masks { struct ice_mask masks[ICE_PROF_MASK_COUNT]; }; +struct ice_prof_id { + unsigned long *id; + int count; +}; + /* Tables per block */ struct ice_blk_info { struct ice_xlt1 xlt1; struct ice_xlt2 xlt2; + struct ice_prof_id prof_id; struct ice_prof_tcam prof; struct ice_prof_redir prof_redir; struct ice_es es; diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c index fb8b925aaf8b..fc2b58f56279 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.c +++ b/drivers/net/ethernet/intel/ice/ice_flow.c @@ -1235,6 +1235,7 @@ ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params) #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004 +#define ICE_FLOW_FIND_PROF_CHK_SYMM 0x00000008 /** * ice_flow_find_prof_conds - Find a profile matching headers and conditions @@ -1243,13 +1244,14 @@ ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params) * @dir: flow direction * @segs: array of one or more packet segments that describe the flow * @segs_cnt: number of packet segments provided + * @symm: symmetric setting for RSS profiles * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI) * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*) */ static struct ice_flow_prof * ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, struct ice_flow_seg_info *segs, - u8 segs_cnt, u16 vsi_handle, u32 conds) + u8 segs_cnt, bool symm, u16 vsi_handle, u32 conds) { struct ice_flow_prof *p, *prof = NULL; @@ -1265,6 +1267,11 @@ ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk, !test_bit(vsi_handle, p->vsis)) continue; + /* Check for symmetric settings */ + if ((conds & ICE_FLOW_FIND_PROF_CHK_SYMM) && + p->symm != symm) + continue; + /* Protocol headers must be checked. Matched fields are * checked if specified. */ @@ -1328,26 +1335,33 @@ ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk, * @hw: pointer to the HW struct * @blk: classification stage * @dir: flow direction - * @prof_id: unique ID to identify this flow profile * @segs: array of one or more packet segments that describe the flow * @segs_cnt: number of packet segments provided + * @symm: symmetric setting for RSS profiles * @prof: stores the returned flow profile added * * Assumption: the caller has acquired the lock to the profile list */ static int ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk, - enum ice_flow_dir dir, u64 prof_id, + enum ice_flow_dir dir, struct ice_flow_seg_info *segs, u8 segs_cnt, - struct ice_flow_prof **prof) + bool symm, struct ice_flow_prof **prof) { struct ice_flow_prof_params *params; + struct ice_prof_id *ids; int status; + u64 prof_id; u8 i; if (!prof) return -EINVAL; + ids = &hw->blk[blk].prof_id; + prof_id = find_first_zero_bit(ids->id, ids->count); + if (prof_id >= ids->count) + return -ENOSPC; + params = kzalloc(sizeof(*params), GFP_KERNEL); if (!params) return -ENOMEM; @@ -1369,6 +1383,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk, params->prof->id = prof_id; params->prof->dir = dir; params->prof->segs_cnt = segs_cnt; + params->prof->symm = symm; /* Make a copy of the segments that need to be persistent in the flow * profile instance @@ -1385,7 +1400,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk, /* Add a HW profile for this flow profile */ status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes, params->attr, params->attr_cnt, params->es, - params->mask); + params->mask, symm); if (status) { ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n"); goto out; @@ -1393,6 +1408,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk, INIT_LIST_HEAD(¶ms->prof->entries); mutex_init(¶ms->prof->entries_lock); + set_bit(prof_id, ids->id); *prof = params->prof; out: @@ -1436,6 +1452,7 @@ ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk, /* Remove all hardware profiles associated with this flow profile */ status = ice_rem_prof(hw, blk, prof->id); if (!status) { + clear_bit(prof->id, hw->blk[blk].prof_id.id); list_del(&prof->l_entry); mutex_destroy(&prof->entries_lock); devm_kfree(ice_hw_to_dev(hw), prof); @@ -1511,15 +1528,15 @@ ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk, * @hw: pointer to the HW struct * @blk: classification stage * @dir: flow direction - * @prof_id: unique ID to identify this flow profile * @segs: array of one or more packet segments that describe the flow * @segs_cnt: number of packet segments provided + * @symm: symmetric setting for RSS profiles * @prof: stores the returned flow profile added */ int ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, - u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt, - struct ice_flow_prof **prof) + struct ice_flow_seg_info *segs, u8 segs_cnt, + bool symm, struct ice_flow_prof **prof) { int status; @@ -1538,8 +1555,8 @@ ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, mutex_lock(&hw->fl_profs_locks[blk]); - status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt, - prof); + status = ice_flow_add_prof_sync(hw, blk, dir, segs, segs_cnt, + symm, prof); if (!status) list_add(&(*prof)->l_entry, &hw->fl_profs[blk]); @@ -1855,37 +1872,49 @@ int ice_flow_rem_vsi_prof(struct ice_hw *hw, u16 vsi_handle, u64 prof_id) /** * ice_flow_set_rss_seg_info - setup packet segments for RSS * @segs: pointer to the flow field segment(s) - * @hash_fields: fields to be hashed on for the segment(s) - * @flow_hdr: protocol header fields within a packet segment + * @seg_cnt: segment count + * @cfg: configure parameters * * Helper function to extract fields from hash bitmap and use flow * header value to set flow field segment for further use in flow * profile entry or removal. */ static int -ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields, - u32 flow_hdr) +ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt, + const struct ice_rss_hash_cfg *cfg) { + struct ice_flow_seg_info *seg; u64 val; - u8 i; + u16 i; - for_each_set_bit(i, (unsigned long *)&hash_fields, - ICE_FLOW_FIELD_IDX_MAX) - ice_flow_set_fld(segs, (enum ice_flow_field)i, + /* set inner most segment */ + seg = &segs[seg_cnt - 1]; + + for_each_set_bit(i, (const unsigned long *)&cfg->hash_flds, + (u16)ICE_FLOW_FIELD_IDX_MAX) + ice_flow_set_fld(seg, (enum ice_flow_field)i, ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false); - ICE_FLOW_SET_HDRS(segs, flow_hdr); + ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs); + + /* set outer most header */ + if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4) + segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER; + else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6) + segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER; - if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS & + if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS & ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER) return -EINVAL; - val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS); + val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS); if (val && !is_power_of_2(val)) return -EIO; - val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS); + val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS); if (val && !is_power_of_2(val)) return -EIO; @@ -1956,6 +1985,39 @@ int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle) } /** + * ice_get_rss_hdr_type - get a RSS profile's header type + * @prof: RSS flow profile + */ +static enum ice_rss_cfg_hdr_type +ice_get_rss_hdr_type(struct ice_flow_prof *prof) +{ + if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) { + return ICE_RSS_OUTER_HEADERS; + } else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) { + const struct ice_flow_seg_info *s; + + s = &prof->segs[ICE_RSS_OUTER_HEADERS]; + if (s->hdrs == ICE_FLOW_SEG_HDR_NONE) + return ICE_RSS_INNER_HEADERS; + if (s->hdrs & ICE_FLOW_SEG_HDR_IPV4) + return ICE_RSS_INNER_HEADERS_W_OUTER_IPV4; + if (s->hdrs & ICE_FLOW_SEG_HDR_IPV6) + return ICE_RSS_INNER_HEADERS_W_OUTER_IPV6; + } + + return ICE_RSS_ANY_HEADERS; +} + +static bool +ice_rss_match_prof(struct ice_rss_cfg *r, struct ice_flow_prof *prof, + enum ice_rss_cfg_hdr_type hdr_type) +{ + return (r->hash.hdr_type == hdr_type && + r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match && + r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs); +} + +/** * ice_rem_rss_list - remove RSS configuration from list * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle @@ -1966,15 +2028,16 @@ int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle) static void ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) { + enum ice_rss_cfg_hdr_type hdr_type; struct ice_rss_cfg *r, *tmp; /* Search for RSS hash fields associated to the VSI that match the * hash configurations associated to the flow profile. If found * remove from the RSS entry list of the VSI context and delete entry. */ + hdr_type = ice_get_rss_hdr_type(prof); list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry) - if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match && - r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) { + if (ice_rss_match_prof(r, prof, hdr_type)) { clear_bit(vsi_handle, r->vsis); if (bitmap_empty(r->vsis, ICE_MAX_VSI)) { list_del(&r->l_entry); @@ -1995,11 +2058,12 @@ ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) static int ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) { + enum ice_rss_cfg_hdr_type hdr_type; struct ice_rss_cfg *r, *rss_cfg; + hdr_type = ice_get_rss_hdr_type(prof); list_for_each_entry(r, &hw->rss_list_head, l_entry) - if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match && - r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) { + if (ice_rss_match_prof(r, prof, hdr_type)) { set_bit(vsi_handle, r->vsis); return 0; } @@ -2009,8 +2073,10 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) if (!rss_cfg) return -ENOMEM; - rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match; - rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs; + rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match; + rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs; + rss_cfg->hash.hdr_type = hdr_type; + rss_cfg->hash.symm = prof->symm; set_bit(vsi_handle, rss_cfg->vsis); list_add_tail(&rss_cfg->l_entry, &hw->rss_list_head); @@ -2018,65 +2084,177 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) return 0; } -#define ICE_FLOW_PROF_HASH_S 0 -#define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S) -#define ICE_FLOW_PROF_HDR_S 32 -#define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S) -#define ICE_FLOW_PROF_ENCAP_S 63 -#define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S)) +/** + * ice_rss_config_xor_word - set the HSYMM registers for one input set word + * @hw: pointer to the hardware structure + * @prof_id: RSS hardware profile id + * @src: the FV index used by the protocol's source field + * @dst: the FV index used by the protocol's destination field + * + * Write to the HSYMM register with the index of @src FV the value of the @dst + * FV index. This will tell the hardware to XOR HSYMM[src] with INSET[dst] + * while calculating the RSS input set. + */ +static void +ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst) +{ + u32 val, reg, bits_shift; + u8 reg_idx; + + reg_idx = src / GLQF_HSYMM_REG_SIZE; + bits_shift = ((src % GLQF_HSYMM_REG_SIZE) << 3); + val = dst | GLQF_HSYMM_ENABLE_BIT; + + reg = rd32(hw, GLQF_HSYMM(prof_id, reg_idx)); + reg = (reg & ~(0xff << bits_shift)) | (val << bits_shift); + wr32(hw, GLQF_HSYMM(prof_id, reg_idx), reg); +} -#define ICE_RSS_OUTER_HEADERS 1 -#define ICE_RSS_INNER_HEADERS 2 +/** + * ice_rss_config_xor - set the symmetric registers for a profile's protocol + * @hw: pointer to the hardware structure + * @prof_id: RSS hardware profile id + * @src: the FV index used by the protocol's source field + * @dst: the FV index used by the protocol's destination field + * @len: length of the source/destination fields in words + */ +static void +ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len) +{ + int fv_last_word = + ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1; + int i; + + for (i = 0; i < len; i++) { + ice_rss_config_xor_word(hw, prof_id, + /* Yes, field vector in GLQF_HSYMM and + * GLQF_HINSET is inversed! + */ + fv_last_word - (src + i), + fv_last_word - (dst + i)); + ice_rss_config_xor_word(hw, prof_id, + fv_last_word - (dst + i), + fv_last_word - (src + i)); + } +} -/* Flow profile ID format: - * [0:31] - Packet match fields - * [32:62] - Protocol header - * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled +/** + * ice_rss_set_symm - set the symmetric settings for an RSS profile + * @hw: pointer to the hardware structure + * @prof: pointer to flow profile + * + * The symmetric hash will result from XORing the protocol's fields with + * indexes in GLQF_HSYMM and GLQF_HINSET. This function configures the profile's + * GLQF_HSYMM registers. */ -#define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \ - ((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \ - (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \ - ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))) +static void ice_rss_set_symm(struct ice_hw *hw, struct ice_flow_prof *prof) +{ + struct ice_prof_map *map; + u8 prof_id, m; + + mutex_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock); + map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id); + if (map) + prof_id = map->prof_id; + mutex_unlock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock); + + if (!map) + return; + + /* clear to default */ + for (m = 0; m < GLQF_HSYMM_REG_PER_PROF; m++) + wr32(hw, GLQF_HSYMM(prof_id, m), 0); + + if (prof->symm) { + struct ice_flow_seg_xtrct *ipv4_src, *ipv4_dst; + struct ice_flow_seg_xtrct *ipv6_src, *ipv6_dst; + struct ice_flow_seg_xtrct *sctp_src, *sctp_dst; + struct ice_flow_seg_xtrct *tcp_src, *tcp_dst; + struct ice_flow_seg_xtrct *udp_src, *udp_dst; + struct ice_flow_seg_info *seg; + + seg = &prof->segs[prof->segs_cnt - 1]; + + ipv4_src = &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct; + ipv4_dst = &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct; + + ipv6_src = &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct; + ipv6_dst = &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct; + + tcp_src = &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct; + tcp_dst = &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct; + + udp_src = &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct; + udp_dst = &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct; + + sctp_src = &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct; + sctp_dst = &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct; + + /* xor IPv4 */ + if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0) + ice_rss_config_xor(hw, prof_id, + ipv4_src->idx, ipv4_dst->idx, 2); + + /* xor IPv6 */ + if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0) + ice_rss_config_xor(hw, prof_id, + ipv6_src->idx, ipv6_dst->idx, 8); + + /* xor TCP */ + if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0) + ice_rss_config_xor(hw, prof_id, + tcp_src->idx, tcp_dst->idx, 1); + + /* xor UDP */ + if (udp_src->prot_id != 0 && udp_dst->prot_id != 0) + ice_rss_config_xor(hw, prof_id, + udp_src->idx, udp_dst->idx, 1); + + /* xor SCTP */ + if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0) + ice_rss_config_xor(hw, prof_id, + sctp_src->idx, sctp_dst->idx, 1); + } +} /** * ice_add_rss_cfg_sync - add an RSS configuration * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle - * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure - * @addl_hdrs: protocol header fields - * @segs_cnt: packet segment count + * @cfg: configure parameters * * Assumption: lock has already been acquired for RSS list */ static int -ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, - u32 addl_hdrs, u8 segs_cnt) +ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, + const struct ice_rss_hash_cfg *cfg) { const enum ice_block blk = ICE_BLK_RSS; struct ice_flow_prof *prof = NULL; struct ice_flow_seg_info *segs; + u8 segs_cnt; int status; - if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX) - return -EINVAL; + segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ? + ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX; segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL); if (!segs) return -ENOMEM; /* Construct the packet segment info from the hashed fields */ - status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds, - addl_hdrs); + status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg); if (status) goto exit; - /* Search for a flow profile that has matching headers, hash fields - * and has the input VSI associated to it. If found, no further + /* Search for a flow profile that has matching headers, hash fields, + * symm and has the input VSI associated to it. If found, no further * operations required and exit. */ prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, - vsi_handle, + cfg->symm, vsi_handle, ICE_FLOW_FIND_PROF_CHK_FLDS | + ICE_FLOW_FIND_PROF_CHK_SYMM | ICE_FLOW_FIND_PROF_CHK_VSI); if (prof) goto exit; @@ -2087,7 +2265,8 @@ ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, * the protocol header and new hash field configuration. */ prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, - vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI); + cfg->symm, vsi_handle, + ICE_FLOW_FIND_PROF_CHK_VSI); if (prof) { status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle); if (!status) @@ -2103,11 +2282,12 @@ ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, } } - /* Search for a profile that has same match fields only. If this - * exists then associate the VSI to this profile. + /* Search for a profile that has the same match fields and symmetric + * setting. If this exists then associate the VSI to this profile. */ prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, - vsi_handle, + cfg->symm, vsi_handle, + ICE_FLOW_FIND_PROF_CHK_SYMM | ICE_FLOW_FIND_PROF_CHK_FLDS); if (prof) { status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle); @@ -2116,17 +2296,14 @@ ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, goto exit; } - /* Create a new flow profile with generated profile and packet - * segment information. - */ + /* Create a new flow profile with packet segment information. */ status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX, - ICE_FLOW_GEN_PROFID(hashed_flds, - segs[segs_cnt - 1].hdrs, - segs_cnt), - segs, segs_cnt, &prof); + segs, segs_cnt, cfg->symm, &prof); if (status) goto exit; + prof->symm = cfg->symm; + ice_rss_set_symm(hw, prof); status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle); /* If association to a new flow profile failed then this profile can * be removed. @@ -2146,30 +2323,43 @@ exit: /** * ice_add_rss_cfg - add an RSS configuration with specified hashed fields * @hw: pointer to the hardware structure - * @vsi_handle: software VSI handle - * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure - * @addl_hdrs: protocol header fields + * @vsi: VSI to add the RSS configuration to + * @cfg: configure parameters * * This function will generate a flow profile based on fields associated with * the input fields to hash on, the flow type and use the VSI number to add * a flow entry to the profile. */ int -ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, - u32 addl_hdrs) +ice_add_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, + const struct ice_rss_hash_cfg *cfg) { + struct ice_rss_hash_cfg local_cfg; + u16 vsi_handle; int status; - if (hashed_flds == ICE_HASH_INVALID || - !ice_is_vsi_valid(hw, vsi_handle)) + if (!vsi) + return -EINVAL; + + vsi_handle = vsi->idx; + if (!ice_is_vsi_valid(hw, vsi_handle) || + !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS || + cfg->hash_flds == ICE_HASH_INVALID) return -EINVAL; mutex_lock(&hw->rss_locks); - status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs, - ICE_RSS_OUTER_HEADERS); - if (!status) - status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, - addl_hdrs, ICE_RSS_INNER_HEADERS); + local_cfg = *cfg; + if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) { + status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg); + } else { + local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS; + status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg); + if (!status) { + local_cfg.hdr_type = ICE_RSS_INNER_HEADERS; + status = ice_add_rss_cfg_sync(hw, vsi_handle, + &local_cfg); + } + } mutex_unlock(&hw->rss_locks); return status; @@ -2179,33 +2369,33 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, * ice_rem_rss_cfg_sync - remove an existing RSS configuration * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle - * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove - * @addl_hdrs: Protocol header fields within a packet segment - * @segs_cnt: packet segment count + * @cfg: configure parameters * * Assumption: lock has already been acquired for RSS list */ static int -ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, - u32 addl_hdrs, u8 segs_cnt) +ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, + const struct ice_rss_hash_cfg *cfg) { const enum ice_block blk = ICE_BLK_RSS; struct ice_flow_seg_info *segs; struct ice_flow_prof *prof; + u8 segs_cnt; int status; + segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ? + ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX; segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL); if (!segs) return -ENOMEM; /* Construct the packet segment info from the hashed fields */ - status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds, - addl_hdrs); + status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg); if (status) goto out; prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, - vsi_handle, + cfg->symm, vsi_handle, ICE_FLOW_FIND_PROF_CHK_FLDS); if (!prof) { status = -ENOENT; @@ -2233,31 +2423,39 @@ out: * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle - * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove - * @addl_hdrs: Protocol header fields within a packet segment + * @cfg: configure parameters * * This function will lookup the flow profile based on the input * hash field bitmap, iterate through the profile entry list of * that profile and find entry associated with input VSI to be - * removed. Calls are made to underlying flow s which will APIs + * removed. Calls are made to underlying flow apis which will in * turn build or update buffers for RSS XLT1 section. */ -int __maybe_unused -ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, - u32 addl_hdrs) +int +ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, + const struct ice_rss_hash_cfg *cfg) { + struct ice_rss_hash_cfg local_cfg; int status; - if (hashed_flds == ICE_HASH_INVALID || - !ice_is_vsi_valid(hw, vsi_handle)) + if (!ice_is_vsi_valid(hw, vsi_handle) || + !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS || + cfg->hash_flds == ICE_HASH_INVALID) return -EINVAL; mutex_lock(&hw->rss_locks); - status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs, - ICE_RSS_OUTER_HEADERS); - if (!status) - status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, - addl_hdrs, ICE_RSS_INNER_HEADERS); + local_cfg = *cfg; + if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) { + status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg); + } else { + local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS; + status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg); + if (!status) { + local_cfg.hdr_type = ICE_RSS_INNER_HEADERS; + status = ice_rem_rss_cfg_sync(hw, vsi_handle, + &local_cfg); + } + } mutex_unlock(&hw->rss_locks); return status; @@ -2298,18 +2496,24 @@ ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, /** * ice_add_avf_rss_cfg - add an RSS configuration for AVF driver * @hw: pointer to the hardware structure - * @vsi_handle: software VSI handle + * @vsi: VF's VSI * @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure * * This function will take the hash bitmap provided by the AVF driver via a * message, convert it to ICE-compatible values, and configure RSS flow * profiles. */ -int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash) +int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, u64 avf_hash) { + struct ice_rss_hash_cfg hcfg; + u16 vsi_handle; int status = 0; u64 hash_flds; + if (!vsi) + return -EINVAL; + + vsi_handle = vsi->idx; if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID || !ice_is_vsi_valid(hw, vsi_handle)) return -EINVAL; @@ -2379,8 +2583,11 @@ int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash) if (rss_hash == ICE_HASH_INVALID) return -EIO; - status = ice_add_rss_cfg(hw, vsi_handle, rss_hash, - ICE_FLOW_SEG_HDR_NONE); + hcfg.addl_hdrs = ICE_FLOW_SEG_HDR_NONE; + hcfg.hash_flds = rss_hash; + hcfg.hdr_type = ICE_RSS_ANY_HEADERS; + hcfg.symm = false; + status = ice_add_rss_cfg(hw, vsi, &hcfg); if (status) break; } @@ -2388,6 +2595,54 @@ int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash) return status; } +static bool rss_cfg_symm_valid(u64 hfld) +{ + return !((!!(hfld & ICE_FLOW_HASH_FLD_IPV4_SA) ^ + !!(hfld & ICE_FLOW_HASH_FLD_IPV4_DA)) || + (!!(hfld & ICE_FLOW_HASH_FLD_IPV6_SA) ^ + !!(hfld & ICE_FLOW_HASH_FLD_IPV6_DA)) || + (!!(hfld & ICE_FLOW_HASH_FLD_TCP_SRC_PORT) ^ + !!(hfld & ICE_FLOW_HASH_FLD_TCP_DST_PORT)) || + (!!(hfld & ICE_FLOW_HASH_FLD_UDP_SRC_PORT) ^ + !!(hfld & ICE_FLOW_HASH_FLD_UDP_DST_PORT)) || + (!!(hfld & ICE_FLOW_HASH_FLD_SCTP_SRC_PORT) ^ + !!(hfld & ICE_FLOW_HASH_FLD_SCTP_DST_PORT))); +} + +/** + * ice_set_rss_cfg_symm - set symmtery for all VSI's RSS configurations + * @hw: pointer to the hardware structure + * @vsi: VSI to set/unset Symmetric RSS + * @symm: TRUE to set Symmetric RSS hashing + */ +int ice_set_rss_cfg_symm(struct ice_hw *hw, struct ice_vsi *vsi, bool symm) +{ + struct ice_rss_hash_cfg local; + struct ice_rss_cfg *r, *tmp; + u16 vsi_handle = vsi->idx; + int status = 0; + + if (!ice_is_vsi_valid(hw, vsi_handle)) + return -EINVAL; + + mutex_lock(&hw->rss_locks); + list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry) { + if (test_bit(vsi_handle, r->vsis) && r->hash.symm != symm) { + local = r->hash; + local.symm = symm; + if (symm && !rss_cfg_symm_valid(r->hash.hash_flds)) + continue; + + status = ice_add_rss_cfg_sync(hw, vsi_handle, &local); + if (status) + break; + } + } + mutex_unlock(&hw->rss_locks); + + return status; +} + /** * ice_replay_rss_cfg - replay RSS configurations associated with VSI * @hw: pointer to the hardware structure @@ -2404,16 +2659,7 @@ int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle) mutex_lock(&hw->rss_locks); list_for_each_entry(r, &hw->rss_list_head, l_entry) { if (test_bit(vsi_handle, r->vsis)) { - status = ice_add_rss_cfg_sync(hw, vsi_handle, - r->hashed_flds, - r->packet_hdr, - ICE_RSS_OUTER_HEADERS); - if (status) - break; - status = ice_add_rss_cfg_sync(hw, vsi_handle, - r->hashed_flds, - r->packet_hdr, - ICE_RSS_INNER_HEADERS); + status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash); if (status) break; } @@ -2428,11 +2674,12 @@ int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle) * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle * @hdrs: protocol header type + * @symm: whether the RSS is symmetric (bool, output) * * This function will return the match fields of the first instance of flow * profile having the given header types and containing input VSI */ -u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs) +u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs, bool *symm) { u64 rss_hash = ICE_HASH_INVALID; struct ice_rss_cfg *r; @@ -2444,8 +2691,9 @@ u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs) mutex_lock(&hw->rss_locks); list_for_each_entry(r, &hw->rss_list_head, l_entry) if (test_bit(vsi_handle, r->vsis) && - r->packet_hdr == hdrs) { - rss_hash = r->hashed_flds; + r->hash.addl_hdrs == hdrs) { + rss_hash = r->hash.hash_flds; + *symm = r->hash.symm; break; } mutex_unlock(&hw->rss_locks); diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h index 96923ef0a5a8..ff82915ab497 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.h +++ b/drivers/net/ethernet/intel/ice/ice_flow.h @@ -34,6 +34,8 @@ #define ICE_HASH_TCP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_TCP_PORT) #define ICE_HASH_UDP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT) #define ICE_HASH_UDP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT) +#define ICE_HASH_SCTP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_SCTP_PORT) +#define ICE_HASH_SCTP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_SCTP_PORT) #define ICE_FLOW_HASH_GTP_TEID \ (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID)) @@ -227,6 +229,19 @@ enum ice_flow_field { ICE_FLOW_FIELD_IDX_MAX }; +#define ICE_FLOW_HASH_FLD_IPV4_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) +#define ICE_FLOW_HASH_FLD_IPV6_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) +#define ICE_FLOW_HASH_FLD_IPV4_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) +#define ICE_FLOW_HASH_FLD_IPV6_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) +#define ICE_FLOW_HASH_FLD_TCP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) +#define ICE_FLOW_HASH_FLD_TCP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT) +#define ICE_FLOW_HASH_FLD_UDP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) +#define ICE_FLOW_HASH_FLD_UDP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT) +#define ICE_FLOW_HASH_FLD_SCTP_SRC_PORT \ + BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) +#define ICE_FLOW_HASH_FLD_SCTP_DST_PORT \ + BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT) + /* Flow headers and fields for AVF support */ enum ice_flow_avf_hdr_field { /* Values 0 - 28 are reserved for future use */ @@ -279,6 +294,24 @@ enum ice_flow_avf_hdr_field { BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP)) +enum ice_rss_cfg_hdr_type { + ICE_RSS_OUTER_HEADERS, /* take outer headers as inputset. */ + ICE_RSS_INNER_HEADERS, /* take inner headers as inputset. */ + /* take inner headers as inputset for packet with outer ipv4. */ + ICE_RSS_INNER_HEADERS_W_OUTER_IPV4, + /* take inner headers as inputset for packet with outer ipv6. */ + ICE_RSS_INNER_HEADERS_W_OUTER_IPV6, + /* take outer headers first then inner headers as inputset */ + ICE_RSS_ANY_HEADERS +}; + +struct ice_rss_hash_cfg { + u32 addl_hdrs; /* protocol header fields */ + u64 hash_flds; /* hash bit field (ICE_FLOW_HASH_*) to configure */ + enum ice_rss_cfg_hdr_type hdr_type; /* to specify inner or outer */ + bool symm; /* symmetric or asymmetric hash */ +}; + enum ice_flow_dir { ICE_FLOW_RX = 0x02, }; @@ -289,8 +322,10 @@ enum ice_flow_priority { ICE_FLOW_PRIO_HIGH }; +#define ICE_FLOW_SEG_SINGLE 1 #define ICE_FLOW_SEG_MAX 2 #define ICE_FLOW_SEG_RAW_FLD_MAX 2 +#define ICE_FLOW_SW_FIELD_VECTOR_MAX 48 #define ICE_FLOW_FV_EXTRACT_SZ 2 #define ICE_FLOW_SET_HDRS(seg, val) ((seg)->hdrs |= (u32)(val)) @@ -372,20 +407,21 @@ struct ice_flow_prof { /* software VSI handles referenced by this flow profile */ DECLARE_BITMAP(vsis, ICE_MAX_VSI); + + bool symm; /* Symmetric Hash for RSS */ }; struct ice_rss_cfg { struct list_head l_entry; /* bitmap of VSIs added to the RSS entry */ DECLARE_BITMAP(vsis, ICE_MAX_VSI); - u64 hashed_flds; - u32 packet_hdr; + struct ice_rss_hash_cfg hash; }; int ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, - u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt, - struct ice_flow_prof **prof); + struct ice_flow_seg_info *segs, u8 segs_cnt, + bool symm, struct ice_flow_prof **prof); int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id); int ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id, @@ -401,13 +437,13 @@ ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len, int ice_flow_rem_vsi_prof(struct ice_hw *hw, u16 vsi_handle, u64 prof_id); void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle); int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle); -int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds); +int ice_set_rss_cfg_symm(struct ice_hw *hw, struct ice_vsi *vsi, bool symm); +int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, + u64 hashed_flds); int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle); -int -ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, - u32 addl_hdrs); -int -ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, - u32 addl_hdrs); -u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs); +int ice_add_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, + const struct ice_rss_hash_cfg *cfg); +int ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, + const struct ice_rss_hash_cfg *cfg); +u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs, bool *symm); #endif /* _ICE_FLOW_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_fwlog.c b/drivers/net/ethernet/intel/ice/ice_fwlog.c new file mode 100644 index 000000000000..92b5dac481cd --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_fwlog.c @@ -0,0 +1,470 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022, Intel Corporation. */ + +#include <linux/vmalloc.h> +#include "ice.h" +#include "ice_common.h" +#include "ice_fwlog.h" + +bool ice_fwlog_ring_full(struct ice_fwlog_ring *rings) +{ + u16 head, tail; + + head = rings->head; + tail = rings->tail; + + if (head < tail && (tail - head == (rings->size - 1))) + return true; + else if (head > tail && (tail == (head - 1))) + return true; + + return false; +} + +bool ice_fwlog_ring_empty(struct ice_fwlog_ring *rings) +{ + return rings->head == rings->tail; +} + +void ice_fwlog_ring_increment(u16 *item, u16 size) +{ + *item = (*item + 1) & (size - 1); +} + +static int ice_fwlog_alloc_ring_buffs(struct ice_fwlog_ring *rings) +{ + int i, nr_bytes; + u8 *mem; + + nr_bytes = rings->size * ICE_AQ_MAX_BUF_LEN; + mem = vzalloc(nr_bytes); + if (!mem) + return -ENOMEM; + + for (i = 0; i < rings->size; i++) { + struct ice_fwlog_data *ring = &rings->rings[i]; + + ring->data_size = ICE_AQ_MAX_BUF_LEN; + ring->data = mem; + mem += ICE_AQ_MAX_BUF_LEN; + } + + return 0; +} + +static void ice_fwlog_free_ring_buffs(struct ice_fwlog_ring *rings) +{ + int i; + + for (i = 0; i < rings->size; i++) { + struct ice_fwlog_data *ring = &rings->rings[i]; + + /* the first ring is the base memory for the whole range so + * free it + */ + if (!i) + vfree(ring->data); + + ring->data = NULL; + ring->data_size = 0; + } +} + +#define ICE_FWLOG_INDEX_TO_BYTES(n) ((128 * 1024) << (n)) +/** + * ice_fwlog_realloc_rings - reallocate the FW log rings + * @hw: pointer to the HW structure + * @index: the new index to use to allocate memory for the log data + * + */ +void ice_fwlog_realloc_rings(struct ice_hw *hw, int index) +{ + struct ice_fwlog_ring ring; + int status, ring_size; + + /* convert the number of bytes into a number of 4K buffers. externally + * the driver presents the interface to the FW log data as a number of + * bytes because that's easy for users to understand. internally the + * driver uses a ring of buffers because the driver doesn't know where + * the beginning and end of any line of log data is so the driver has + * to overwrite data as complete blocks. when the data is returned to + * the user the driver knows that the data is correct and the FW log + * can be correctly parsed by the tools + */ + ring_size = ICE_FWLOG_INDEX_TO_BYTES(index) / ICE_AQ_MAX_BUF_LEN; + if (ring_size == hw->fwlog_ring.size) + return; + + /* allocate space for the new rings and buffers then release the + * old rings and buffers. that way if we don't have enough + * memory then we at least have what we had before + */ + ring.rings = kcalloc(ring_size, sizeof(*ring.rings), GFP_KERNEL); + if (!ring.rings) + return; + + ring.size = ring_size; + + status = ice_fwlog_alloc_ring_buffs(&ring); + if (status) { + dev_warn(ice_hw_to_dev(hw), "Unable to allocate memory for FW log ring data buffers\n"); + ice_fwlog_free_ring_buffs(&ring); + kfree(ring.rings); + return; + } + + ice_fwlog_free_ring_buffs(&hw->fwlog_ring); + kfree(hw->fwlog_ring.rings); + + hw->fwlog_ring.rings = ring.rings; + hw->fwlog_ring.size = ring.size; + hw->fwlog_ring.index = index; + hw->fwlog_ring.head = 0; + hw->fwlog_ring.tail = 0; +} + +/** + * ice_fwlog_init - Initialize FW logging configuration + * @hw: pointer to the HW structure + * + * This function should be called on driver initialization during + * ice_init_hw(). + */ +int ice_fwlog_init(struct ice_hw *hw) +{ + /* only support fw log commands on PF 0 */ + if (hw->bus.func) + return -EINVAL; + + ice_fwlog_set_supported(hw); + + if (ice_fwlog_supported(hw)) { + int status; + + /* read the current config from the FW and store it */ + status = ice_fwlog_get(hw, &hw->fwlog_cfg); + if (status) + return status; + + hw->fwlog_ring.rings = kcalloc(ICE_FWLOG_RING_SIZE_DFLT, + sizeof(*hw->fwlog_ring.rings), + GFP_KERNEL); + if (!hw->fwlog_ring.rings) { + dev_warn(ice_hw_to_dev(hw), "Unable to allocate memory for FW log rings\n"); + return -ENOMEM; + } + + hw->fwlog_ring.size = ICE_FWLOG_RING_SIZE_DFLT; + hw->fwlog_ring.index = ICE_FWLOG_RING_SIZE_INDEX_DFLT; + + status = ice_fwlog_alloc_ring_buffs(&hw->fwlog_ring); + if (status) { + dev_warn(ice_hw_to_dev(hw), "Unable to allocate memory for FW log ring data buffers\n"); + ice_fwlog_free_ring_buffs(&hw->fwlog_ring); + kfree(hw->fwlog_ring.rings); + return status; + } + + ice_debugfs_fwlog_init(hw->back); + } else { + dev_warn(ice_hw_to_dev(hw), "FW logging is not supported in this NVM image. Please update the NVM to get FW log support\n"); + } + + return 0; +} + +/** + * ice_fwlog_deinit - unroll FW logging configuration + * @hw: pointer to the HW structure + * + * This function should be called in ice_deinit_hw(). + */ +void ice_fwlog_deinit(struct ice_hw *hw) +{ + struct ice_pf *pf = hw->back; + int status; + + /* only support fw log commands on PF 0 */ + if (hw->bus.func) + return; + + /* make sure FW logging is disabled to not put the FW in a weird state + * for the next driver load + */ + hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_ARQ_ENA; + status = ice_fwlog_set(hw, &hw->fwlog_cfg); + if (status) + dev_warn(ice_hw_to_dev(hw), "Unable to turn off FW logging, status: %d\n", + status); + + kfree(pf->ice_debugfs_pf_fwlog_modules); + + pf->ice_debugfs_pf_fwlog_modules = NULL; + + status = ice_fwlog_unregister(hw); + if (status) + dev_warn(ice_hw_to_dev(hw), "Unable to unregister FW logging, status: %d\n", + status); + + if (hw->fwlog_ring.rings) { + ice_fwlog_free_ring_buffs(&hw->fwlog_ring); + kfree(hw->fwlog_ring.rings); + } +} + +/** + * ice_fwlog_supported - Cached for whether FW supports FW logging or not + * @hw: pointer to the HW structure + * + * This will always return false if called before ice_init_hw(), so it must be + * called after ice_init_hw(). + */ +bool ice_fwlog_supported(struct ice_hw *hw) +{ + return hw->fwlog_supported; +} + +/** + * ice_aq_fwlog_set - Set FW logging configuration AQ command (0xFF30) + * @hw: pointer to the HW structure + * @entries: entries to configure + * @num_entries: number of @entries + * @options: options from ice_fwlog_cfg->options structure + * @log_resolution: logging resolution + */ +static int +ice_aq_fwlog_set(struct ice_hw *hw, struct ice_fwlog_module_entry *entries, + u16 num_entries, u16 options, u16 log_resolution) +{ + struct ice_aqc_fw_log_cfg_resp *fw_modules; + struct ice_aqc_fw_log *cmd; + struct ice_aq_desc desc; + int status; + int i; + + fw_modules = kcalloc(num_entries, sizeof(*fw_modules), GFP_KERNEL); + if (!fw_modules) + return -ENOMEM; + + for (i = 0; i < num_entries; i++) { + fw_modules[i].module_identifier = + cpu_to_le16(entries[i].module_id); + fw_modules[i].log_level = entries[i].log_level; + } + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_config); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + cmd = &desc.params.fw_log; + + cmd->cmd_flags = ICE_AQC_FW_LOG_CONF_SET_VALID; + cmd->ops.cfg.log_resolution = cpu_to_le16(log_resolution); + cmd->ops.cfg.mdl_cnt = cpu_to_le16(num_entries); + + if (options & ICE_FWLOG_OPTION_ARQ_ENA) + cmd->cmd_flags |= ICE_AQC_FW_LOG_CONF_AQ_EN; + if (options & ICE_FWLOG_OPTION_UART_ENA) + cmd->cmd_flags |= ICE_AQC_FW_LOG_CONF_UART_EN; + + status = ice_aq_send_cmd(hw, &desc, fw_modules, + sizeof(*fw_modules) * num_entries, + NULL); + + kfree(fw_modules); + + return status; +} + +/** + * ice_fwlog_set - Set the firmware logging settings + * @hw: pointer to the HW structure + * @cfg: config used to set firmware logging + * + * This function should be called whenever the driver needs to set the firmware + * logging configuration. It can be called on initialization, reset, or during + * runtime. + * + * If the PF wishes to receive FW logging then it must register via + * ice_fwlog_register. Note, that ice_fwlog_register does not need to be called + * for init. + */ +int ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg) +{ + if (!ice_fwlog_supported(hw)) + return -EOPNOTSUPP; + + return ice_aq_fwlog_set(hw, cfg->module_entries, + ICE_AQC_FW_LOG_ID_MAX, cfg->options, + cfg->log_resolution); +} + +/** + * ice_aq_fwlog_get - Get the current firmware logging configuration (0xFF32) + * @hw: pointer to the HW structure + * @cfg: firmware logging configuration to populate + */ +static int ice_aq_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg) +{ + struct ice_aqc_fw_log_cfg_resp *fw_modules; + struct ice_aqc_fw_log *cmd; + struct ice_aq_desc desc; + u16 module_id_cnt; + int status; + void *buf; + int i; + + memset(cfg, 0, sizeof(*cfg)); + + buf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_query); + cmd = &desc.params.fw_log; + + cmd->cmd_flags = ICE_AQC_FW_LOG_AQ_QUERY; + + status = ice_aq_send_cmd(hw, &desc, buf, ICE_AQ_MAX_BUF_LEN, NULL); + if (status) { + ice_debug(hw, ICE_DBG_FW_LOG, "Failed to get FW log configuration\n"); + goto status_out; + } + + module_id_cnt = le16_to_cpu(cmd->ops.cfg.mdl_cnt); + if (module_id_cnt < ICE_AQC_FW_LOG_ID_MAX) { + ice_debug(hw, ICE_DBG_FW_LOG, "FW returned less than the expected number of FW log module IDs\n"); + } else if (module_id_cnt > ICE_AQC_FW_LOG_ID_MAX) { + ice_debug(hw, ICE_DBG_FW_LOG, "FW returned more than expected number of FW log module IDs, setting module_id_cnt to software expected max %u\n", + ICE_AQC_FW_LOG_ID_MAX); + module_id_cnt = ICE_AQC_FW_LOG_ID_MAX; + } + + cfg->log_resolution = le16_to_cpu(cmd->ops.cfg.log_resolution); + if (cmd->cmd_flags & ICE_AQC_FW_LOG_CONF_AQ_EN) + cfg->options |= ICE_FWLOG_OPTION_ARQ_ENA; + if (cmd->cmd_flags & ICE_AQC_FW_LOG_CONF_UART_EN) + cfg->options |= ICE_FWLOG_OPTION_UART_ENA; + if (cmd->cmd_flags & ICE_AQC_FW_LOG_QUERY_REGISTERED) + cfg->options |= ICE_FWLOG_OPTION_IS_REGISTERED; + + fw_modules = (struct ice_aqc_fw_log_cfg_resp *)buf; + + for (i = 0; i < module_id_cnt; i++) { + struct ice_aqc_fw_log_cfg_resp *fw_module = &fw_modules[i]; + + cfg->module_entries[i].module_id = + le16_to_cpu(fw_module->module_identifier); + cfg->module_entries[i].log_level = fw_module->log_level; + } + +status_out: + kfree(buf); + return status; +} + +/** + * ice_fwlog_get - Get the firmware logging settings + * @hw: pointer to the HW structure + * @cfg: config to populate based on current firmware logging settings + */ +int ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg) +{ + if (!ice_fwlog_supported(hw)) + return -EOPNOTSUPP; + + return ice_aq_fwlog_get(hw, cfg); +} + +/** + * ice_aq_fwlog_register - Register PF for firmware logging events (0xFF31) + * @hw: pointer to the HW structure + * @reg: true to register and false to unregister + */ +static int ice_aq_fwlog_register(struct ice_hw *hw, bool reg) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_register); + + if (reg) + desc.params.fw_log.cmd_flags = ICE_AQC_FW_LOG_AQ_REGISTER; + + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} + +/** + * ice_fwlog_register - Register the PF for firmware logging + * @hw: pointer to the HW structure + * + * After this call the PF will start to receive firmware logging based on the + * configuration set in ice_fwlog_set. + */ +int ice_fwlog_register(struct ice_hw *hw) +{ + int status; + + if (!ice_fwlog_supported(hw)) + return -EOPNOTSUPP; + + status = ice_aq_fwlog_register(hw, true); + if (status) + ice_debug(hw, ICE_DBG_FW_LOG, "Failed to register for firmware logging events over ARQ\n"); + else + hw->fwlog_cfg.options |= ICE_FWLOG_OPTION_IS_REGISTERED; + + return status; +} + +/** + * ice_fwlog_unregister - Unregister the PF from firmware logging + * @hw: pointer to the HW structure + */ +int ice_fwlog_unregister(struct ice_hw *hw) +{ + int status; + + if (!ice_fwlog_supported(hw)) + return -EOPNOTSUPP; + + status = ice_aq_fwlog_register(hw, false); + if (status) + ice_debug(hw, ICE_DBG_FW_LOG, "Failed to unregister from firmware logging events over ARQ\n"); + else + hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_IS_REGISTERED; + + return status; +} + +/** + * ice_fwlog_set_supported - Set if FW logging is supported by FW + * @hw: pointer to the HW struct + * + * If FW returns success to the ice_aq_fwlog_get call then it supports FW + * logging, else it doesn't. Set the fwlog_supported flag accordingly. + * + * This function is only meant to be called during driver init to determine if + * the FW support FW logging. + */ +void ice_fwlog_set_supported(struct ice_hw *hw) +{ + struct ice_fwlog_cfg *cfg; + int status; + + hw->fwlog_supported = false; + + cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); + if (!cfg) + return; + + /* don't call ice_fwlog_get() because that would check to see if FW + * logging is supported which is what the driver is determining now + */ + status = ice_aq_fwlog_get(hw, cfg); + if (status) + ice_debug(hw, ICE_DBG_FW_LOG, "ice_aq_fwlog_get failed, FW logging is not supported on this version of FW, status %d\n", + status); + else + hw->fwlog_supported = true; + + kfree(cfg); +} diff --git a/drivers/net/ethernet/intel/ice/ice_fwlog.h b/drivers/net/ethernet/intel/ice/ice_fwlog.h new file mode 100644 index 000000000000..287e71fa4b86 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_fwlog.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2022, Intel Corporation. */ + +#ifndef _ICE_FWLOG_H_ +#define _ICE_FWLOG_H_ +#include "ice_adminq_cmd.h" + +struct ice_hw; + +/* Only a single log level should be set and all log levels under the set value + * are enabled, e.g. if log level is set to ICE_FW_LOG_LEVEL_VERBOSE, then all + * other log levels are included (except ICE_FW_LOG_LEVEL_NONE) + */ +enum ice_fwlog_level { + ICE_FWLOG_LEVEL_NONE = 0, + ICE_FWLOG_LEVEL_ERROR = 1, + ICE_FWLOG_LEVEL_WARNING = 2, + ICE_FWLOG_LEVEL_NORMAL = 3, + ICE_FWLOG_LEVEL_VERBOSE = 4, + ICE_FWLOG_LEVEL_INVALID, /* all values >= this entry are invalid */ +}; + +struct ice_fwlog_module_entry { + /* module ID for the corresponding firmware logging event */ + u16 module_id; + /* verbosity level for the module_id */ + u8 log_level; +}; + +struct ice_fwlog_cfg { + /* list of modules for configuring log level */ + struct ice_fwlog_module_entry module_entries[ICE_AQC_FW_LOG_ID_MAX]; + /* options used to configure firmware logging */ + u16 options; +#define ICE_FWLOG_OPTION_ARQ_ENA BIT(0) +#define ICE_FWLOG_OPTION_UART_ENA BIT(1) + /* set before calling ice_fwlog_init() so the PF registers for firmware + * logging on initialization + */ +#define ICE_FWLOG_OPTION_REGISTER_ON_INIT BIT(2) + /* set in the ice_fwlog_get() response if the PF is registered for FW + * logging events over ARQ + */ +#define ICE_FWLOG_OPTION_IS_REGISTERED BIT(3) + + /* minimum number of log events sent per Admin Receive Queue event */ + u16 log_resolution; +}; + +struct ice_fwlog_data { + u16 data_size; + u8 *data; +}; + +struct ice_fwlog_ring { + struct ice_fwlog_data *rings; + u16 index; + u16 size; + u16 head; + u16 tail; +}; + +#define ICE_FWLOG_RING_SIZE_INDEX_DFLT 3 +#define ICE_FWLOG_RING_SIZE_DFLT 256 +#define ICE_FWLOG_RING_SIZE_MAX 512 + +bool ice_fwlog_ring_full(struct ice_fwlog_ring *rings); +bool ice_fwlog_ring_empty(struct ice_fwlog_ring *rings); +void ice_fwlog_ring_increment(u16 *item, u16 size); +void ice_fwlog_set_supported(struct ice_hw *hw); +bool ice_fwlog_supported(struct ice_hw *hw); +int ice_fwlog_init(struct ice_hw *hw); +void ice_fwlog_deinit(struct ice_hw *hw); +int ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg); +int ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg); +int ice_fwlog_register(struct ice_hw *hw); +int ice_fwlog_unregister(struct ice_hw *hw); +void ice_fwlog_realloc_rings(struct ice_hw *hw, int index); +#endif /* _ICE_FWLOG_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 86936b758ade..e126dbbd7b2c 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -404,6 +404,10 @@ #define GLQF_HMASK_SEL(_i) (0x00410000 + ((_i) * 4)) #define GLQF_HMASK_SEL_MAX_INDEX 127 #define GLQF_HMASK_SEL_MASK_SEL_S 0 +#define GLQF_HSYMM(_i, _j) (0x0040F000 + ((_i) * 4 + (_j) * 512)) +#define GLQF_HSYMM_REG_SIZE 4 +#define GLQF_HSYMM_REG_PER_PROF 6 +#define GLQF_HSYMM_ENABLE_BIT BIT(7) #define E800_PFQF_FD_CNT_FD_GCNT_M GENMASK(14, 0) #define E830_PFQF_FD_CNT_FD_GCNT_M GENMASK(15, 0) #define E800_PFQF_FD_CNT_FD_BCNT_M GENMASK(30, 16) diff --git a/drivers/net/ethernet/intel/ice/ice_hwmon.c b/drivers/net/ethernet/intel/ice/ice_hwmon.c new file mode 100644 index 000000000000..e4c2c1bff6c0 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_hwmon.c @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2023, Intel Corporation. */ + +#include "ice.h" +#include "ice_hwmon.h" +#include "ice_adminq_cmd.h" + +#include <linux/hwmon.h> + +#define TEMP_FROM_REG(reg) ((reg) * 1000) + +static const struct hwmon_channel_info *ice_hwmon_info[] = { + HWMON_CHANNEL_INFO(temp, + HWMON_T_INPUT | HWMON_T_MAX | + HWMON_T_CRIT | HWMON_T_EMERGENCY), + NULL +}; + +static int ice_hwmon_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct ice_aqc_get_sensor_reading_resp resp; + struct ice_pf *pf = dev_get_drvdata(dev); + int ret; + + if (type != hwmon_temp) + return -EOPNOTSUPP; + + ret = ice_aq_get_sensor_reading(&pf->hw, &resp); + if (ret) { + dev_warn_ratelimited(dev, + "%s HW read failure (%d)\n", + __func__, + ret); + return ret; + } + + switch (attr) { + case hwmon_temp_input: + *val = TEMP_FROM_REG(resp.data.s0f0.temp); + break; + case hwmon_temp_max: + *val = TEMP_FROM_REG(resp.data.s0f0.temp_warning_threshold); + break; + case hwmon_temp_crit: + *val = TEMP_FROM_REG(resp.data.s0f0.temp_critical_threshold); + break; + case hwmon_temp_emergency: + *val = TEMP_FROM_REG(resp.data.s0f0.temp_fatal_threshold); + break; + default: + dev_dbg(dev, "%s unsupported attribute (%d)\n", + __func__, attr); + return -EOPNOTSUPP; + } + + return 0; +} + +static umode_t ice_hwmon_is_visible(const void *data, + enum hwmon_sensor_types type, u32 attr, + int channel) +{ + if (type != hwmon_temp) + return 0; + + switch (attr) { + case hwmon_temp_input: + case hwmon_temp_crit: + case hwmon_temp_max: + case hwmon_temp_emergency: + return 0444; + } + + return 0; +} + +static const struct hwmon_ops ice_hwmon_ops = { + .is_visible = ice_hwmon_is_visible, + .read = ice_hwmon_read +}; + +static const struct hwmon_chip_info ice_chip_info = { + .ops = &ice_hwmon_ops, + .info = ice_hwmon_info +}; + +static bool ice_is_internal_reading_supported(struct ice_pf *pf) +{ + /* Only the first PF will report temperature for a chip. + * Note that internal temp reading is not supported + * for older FW (< v4.30). + */ + if (pf->hw.pf_id) + return false; + + unsigned long sensors = pf->hw.dev_caps.supported_sensors; + + return _test_bit(ICE_SENSOR_SUPPORT_E810_INT_TEMP_BIT, &sensors); +}; + +void ice_hwmon_init(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + struct device *hdev; + + if (!ice_is_internal_reading_supported(pf)) + return; + + hdev = hwmon_device_register_with_info(dev, "ice", pf, &ice_chip_info, + NULL); + if (IS_ERR(hdev)) { + dev_warn(dev, + "hwmon_device_register_with_info returns error (%ld)", + PTR_ERR(hdev)); + return; + } + pf->hwmon_dev = hdev; +} + +void ice_hwmon_exit(struct ice_pf *pf) +{ + if (!pf->hwmon_dev) + return; + hwmon_device_unregister(pf->hwmon_dev); +} diff --git a/drivers/net/ethernet/intel/ice/ice_hwmon.h b/drivers/net/ethernet/intel/ice/ice_hwmon.h new file mode 100644 index 000000000000..d66d40354f5a --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_hwmon.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2023, Intel Corporation. */ + +#ifndef _ICE_HWMON_H_ +#define _ICE_HWMON_H_ + +#ifdef CONFIG_ICE_HWMON +void ice_hwmon_init(struct ice_pf *pf); +void ice_hwmon_exit(struct ice_pf *pf); +#else /* CONFIG_ICE_HWMON */ +static inline void ice_hwmon_init(struct ice_pf *pf) { } +static inline void ice_hwmon_exit(struct ice_pf *pf) { } +#endif /* CONFIG_ICE_HWMON */ + +#endif /* _ICE_HWMON_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index 89f986a75cc8..d384ddfcb83e 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -673,6 +673,212 @@ struct ice_tlan_ctx { * Use the enum ice_rx_l2_ptype to decode the packet type * ENDIF */ +#define ICE_PTYPES \ + /* L2 Packet types */ \ + ICE_PTT_UNUSED_ENTRY(0), \ + ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), \ + ICE_PTT_UNUSED_ENTRY(2), \ + ICE_PTT_UNUSED_ENTRY(3), \ + ICE_PTT_UNUSED_ENTRY(4), \ + ICE_PTT_UNUSED_ENTRY(5), \ + ICE_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \ + ICE_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \ + ICE_PTT_UNUSED_ENTRY(8), \ + ICE_PTT_UNUSED_ENTRY(9), \ + ICE_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \ + ICE_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \ + ICE_PTT_UNUSED_ENTRY(12), \ + ICE_PTT_UNUSED_ENTRY(13), \ + ICE_PTT_UNUSED_ENTRY(14), \ + ICE_PTT_UNUSED_ENTRY(15), \ + ICE_PTT_UNUSED_ENTRY(16), \ + ICE_PTT_UNUSED_ENTRY(17), \ + ICE_PTT_UNUSED_ENTRY(18), \ + ICE_PTT_UNUSED_ENTRY(19), \ + ICE_PTT_UNUSED_ENTRY(20), \ + ICE_PTT_UNUSED_ENTRY(21), \ + \ + /* Non Tunneled IPv4 */ \ + ICE_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), \ + ICE_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), \ + ICE_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(25), \ + ICE_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), \ + ICE_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), \ + ICE_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), \ + \ + /* IPv4 --> IPv4 */ \ + ICE_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), \ + ICE_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), \ + ICE_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(32), \ + ICE_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), \ + ICE_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), \ + ICE_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), \ + \ + /* IPv4 --> IPv6 */ \ + ICE_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), \ + ICE_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), \ + ICE_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(39), \ + ICE_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), \ + ICE_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), \ + ICE_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), \ + \ + /* IPv4 --> GRE/NAT */ \ + ICE_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), \ + \ + /* IPv4 --> GRE/NAT --> IPv4 */ \ + ICE_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), \ + ICE_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), \ + ICE_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(47), \ + ICE_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), \ + ICE_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), \ + ICE_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), \ + \ + /* IPv4 --> GRE/NAT --> IPv6 */ \ + ICE_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), \ + ICE_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), \ + ICE_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(54), \ + ICE_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), \ + ICE_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), \ + ICE_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), \ + \ + /* IPv4 --> GRE/NAT --> MAC */ \ + ICE_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), \ + \ + /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ \ + ICE_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), \ + ICE_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), \ + ICE_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(62), \ + ICE_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), \ + ICE_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), \ + ICE_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), \ + \ + /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ \ + ICE_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), \ + ICE_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), \ + ICE_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(69), \ + ICE_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), \ + ICE_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), \ + ICE_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), \ + \ + /* IPv4 --> GRE/NAT --> MAC/VLAN */ \ + ICE_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), \ + \ + /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ \ + ICE_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), \ + ICE_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), \ + ICE_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(77), \ + ICE_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), \ + ICE_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), \ + ICE_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), \ + \ + /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ \ + ICE_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), \ + ICE_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), \ + ICE_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(84), \ + ICE_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), \ + ICE_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), \ + ICE_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), \ + \ + /* Non Tunneled IPv6 */ \ + ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), \ + ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), \ + ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(91), \ + ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), \ + ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), \ + ICE_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), \ + \ + /* IPv6 --> IPv4 */ \ + ICE_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), \ + ICE_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), \ + ICE_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(98), \ + ICE_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), \ + ICE_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), \ + ICE_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), \ + \ + /* IPv6 --> IPv6 */ \ + ICE_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), \ + ICE_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), \ + ICE_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(105), \ + ICE_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), \ + ICE_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), \ + ICE_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), \ + \ + /* IPv6 --> GRE/NAT */ \ + ICE_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), \ + \ + /* IPv6 --> GRE/NAT -> IPv4 */ \ + ICE_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), \ + ICE_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), \ + ICE_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(113), \ + ICE_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), \ + ICE_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), \ + ICE_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), \ + \ + /* IPv6 --> GRE/NAT -> IPv6 */ \ + ICE_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), \ + ICE_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), \ + ICE_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(120), \ + ICE_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), \ + ICE_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), \ + ICE_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), \ + \ + /* IPv6 --> GRE/NAT -> MAC */ \ + ICE_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), \ + \ + /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ \ + ICE_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), \ + ICE_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), \ + ICE_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(128), \ + ICE_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), \ + ICE_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), \ + ICE_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), \ + \ + /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ \ + ICE_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), \ + ICE_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), \ + ICE_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(135), \ + ICE_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), \ + ICE_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), \ + ICE_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), \ + \ + /* IPv6 --> GRE/NAT -> MAC/VLAN */ \ + ICE_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), \ + \ + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ \ + ICE_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), \ + ICE_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), \ + ICE_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(143), \ + ICE_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), \ + ICE_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), \ + ICE_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), \ + \ + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ \ + ICE_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), \ + ICE_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), \ + ICE_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(150), \ + ICE_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), \ + ICE_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), \ + ICE_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + +#define ICE_NUM_DEFINED_PTYPES 154 /* macro to make the table lines short, use explicit indexing with [PTYPE] */ #define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ @@ -695,212 +901,10 @@ struct ice_tlan_ctx { /* Lookup table mapping in the 10-bit HW PTYPE to the bit field for decoding */ static const struct ice_rx_ptype_decoded ice_ptype_lkup[BIT(10)] = { - /* L2 Packet types */ - ICE_PTT_UNUSED_ENTRY(0), - ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - ICE_PTT_UNUSED_ENTRY(2), - ICE_PTT_UNUSED_ENTRY(3), - ICE_PTT_UNUSED_ENTRY(4), - ICE_PTT_UNUSED_ENTRY(5), - ICE_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), - ICE_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), - ICE_PTT_UNUSED_ENTRY(8), - ICE_PTT_UNUSED_ENTRY(9), - ICE_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), - ICE_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), - ICE_PTT_UNUSED_ENTRY(12), - ICE_PTT_UNUSED_ENTRY(13), - ICE_PTT_UNUSED_ENTRY(14), - ICE_PTT_UNUSED_ENTRY(15), - ICE_PTT_UNUSED_ENTRY(16), - ICE_PTT_UNUSED_ENTRY(17), - ICE_PTT_UNUSED_ENTRY(18), - ICE_PTT_UNUSED_ENTRY(19), - ICE_PTT_UNUSED_ENTRY(20), - ICE_PTT_UNUSED_ENTRY(21), - - /* Non Tunneled IPv4 */ - ICE_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), - ICE_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), - ICE_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(25), - ICE_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), - ICE_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), - ICE_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), - - /* IPv4 --> IPv4 */ - ICE_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), - ICE_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), - ICE_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(32), - ICE_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), - ICE_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), - ICE_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), - - /* IPv4 --> IPv6 */ - ICE_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), - ICE_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), - ICE_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(39), - ICE_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), - ICE_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), - ICE_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT */ - ICE_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), - - /* IPv4 --> GRE/NAT --> IPv4 */ - ICE_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), - ICE_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), - ICE_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(47), - ICE_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), - ICE_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), - ICE_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT --> IPv6 */ - ICE_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), - ICE_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), - ICE_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(54), - ICE_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), - ICE_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), - ICE_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT --> MAC */ - ICE_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), - - /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ - ICE_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), - ICE_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), - ICE_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(62), - ICE_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), - ICE_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), - ICE_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ - ICE_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), - ICE_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), - ICE_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(69), - ICE_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), - ICE_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), - ICE_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT --> MAC/VLAN */ - ICE_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), - - /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ - ICE_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), - ICE_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), - ICE_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(77), - ICE_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), - ICE_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), - ICE_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), - - /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ - ICE_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), - ICE_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), - ICE_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(84), - ICE_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), - ICE_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), - ICE_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), - - /* Non Tunneled IPv6 */ - ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), - ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), - ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(91), - ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), - ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), - ICE_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), - - /* IPv6 --> IPv4 */ - ICE_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), - ICE_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), - ICE_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(98), - ICE_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), - ICE_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), - ICE_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> IPv6 */ - ICE_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), - ICE_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), - ICE_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(105), - ICE_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), - ICE_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), - ICE_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT */ - ICE_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), - - /* IPv6 --> GRE/NAT -> IPv4 */ - ICE_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), - ICE_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), - ICE_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(113), - ICE_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), - ICE_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), - ICE_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> IPv6 */ - ICE_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), - ICE_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), - ICE_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(120), - ICE_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), - ICE_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), - ICE_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC */ - ICE_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), - - /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ - ICE_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), - ICE_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), - ICE_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(128), - ICE_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), - ICE_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), - ICE_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ - ICE_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), - ICE_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), - ICE_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(135), - ICE_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), - ICE_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), - ICE_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC/VLAN */ - ICE_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), - - /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ - ICE_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), - ICE_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), - ICE_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(143), - ICE_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), - ICE_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), - ICE_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ - ICE_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), - ICE_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), - ICE_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(150), - ICE_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), - ICE_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), - ICE_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + ICE_PTYPES /* unused entries */ - [154 ... 1023] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } + [ICE_NUM_DEFINED_PTYPES ... 1023] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } }; static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype) diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 1bad6e17f9be..5af45932f460 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -212,11 +212,18 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi) vsi->alloc_txq)); break; case ICE_VSI_SWITCHDEV_CTRL: - /* The number of queues for ctrl VSI is equal to number of VFs. + /* The number of queues for ctrl VSI is equal to number of PRs * Each ring is associated to the corresponding VF_PR netdev. + * Tx and Rx rings are always equal */ - vsi->alloc_txq = ice_get_num_vfs(pf); - vsi->alloc_rxq = vsi->alloc_txq; + if (vsi->req_txq && vsi->req_rxq) { + vsi->alloc_txq = vsi->req_txq; + vsi->alloc_rxq = vsi->req_rxq; + } else { + vsi->alloc_txq = 1; + vsi->alloc_rxq = 1; + } + vsi->num_q_vectors = 1; break; case ICE_VSI_VF: @@ -519,16 +526,14 @@ static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *d { struct ice_q_vector *q_vector = (struct ice_q_vector *)data; struct ice_pf *pf = q_vector->vsi->back; - struct ice_vf *vf; - unsigned int bkt; + struct ice_repr *repr; + unsigned long id; if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring) return IRQ_HANDLED; - rcu_read_lock(); - ice_for_each_vf_rcu(pf, bkt, vf) - napi_schedule(&vf->repr->q_vector->napi); - rcu_read_unlock(); + xa_for_each(&pf->eswitch.reprs, id, repr) + napi_schedule(&repr->q_vector->napi); return IRQ_HANDLED; } @@ -1186,12 +1191,10 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) case ICE_VSI_PF: /* PF VSI will inherit RSS instance of PF */ lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF; - hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; break; case ICE_VSI_VF: /* VF VSI will gets a small RSS table which is a VSI LUT type */ lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; - hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; break; default: dev_dbg(dev, "Unsupported VSI type %s\n", @@ -1199,9 +1202,12 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) return; } - ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & - ICE_AQ_VSI_Q_OPT_RSS_LUT_M) | - (hash_type & ICE_AQ_VSI_Q_OPT_RSS_HASH_M); + hash_type = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ; + vsi->rss_hfunc = hash_type; + + ctxt->info.q_opt_rss = + FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_LUT_M, lut_type) | + FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hash_type); } static void @@ -1600,12 +1606,44 @@ static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi) return; } - status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA); + status = ice_add_avf_rss_cfg(&pf->hw, vsi, ICE_DEFAULT_RSS_HENA); if (status) dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n", vsi->vsi_num, status); } +static const struct ice_rss_hash_cfg default_rss_cfgs[] = { + /* configure RSS for IPv4 with input set IP src/dst */ + {ICE_FLOW_SEG_HDR_IPV4, ICE_FLOW_HASH_IPV4, ICE_RSS_ANY_HEADERS, false}, + /* configure RSS for IPv6 with input set IPv6 src/dst */ + {ICE_FLOW_SEG_HDR_IPV6, ICE_FLOW_HASH_IPV6, ICE_RSS_ANY_HEADERS, false}, + /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */ + {ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4, + ICE_HASH_TCP_IPV4, ICE_RSS_ANY_HEADERS, false}, + /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */ + {ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4, + ICE_HASH_UDP_IPV4, ICE_RSS_ANY_HEADERS, false}, + /* configure RSS for sctp4 with input set IP src/dst - only support + * RSS on SCTPv4 on outer headers (non-tunneled) + */ + {ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4, + ICE_HASH_SCTP_IPV4, ICE_RSS_OUTER_HEADERS, false}, + /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */ + {ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6, + ICE_HASH_TCP_IPV6, ICE_RSS_ANY_HEADERS, false}, + /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */ + {ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6, + ICE_HASH_UDP_IPV6, ICE_RSS_ANY_HEADERS, false}, + /* configure RSS for sctp6 with input set IPv6 src/dst - only support + * RSS on SCTPv6 on outer headers (non-tunneled) + */ + {ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6, + ICE_HASH_SCTP_IPV6, ICE_RSS_OUTER_HEADERS, false}, + /* configure RSS for IPSEC ESP SPI with input set MAC_IPV4_SPI */ + {ICE_FLOW_SEG_HDR_ESP, + ICE_FLOW_HASH_ESP_SPI, ICE_RSS_OUTER_HEADERS, false}, +}; + /** * ice_vsi_set_rss_flow_fld - Sets RSS input set for different flows * @vsi: VSI to be configured @@ -1619,11 +1657,12 @@ static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi) */ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi) { - u16 vsi_handle = vsi->idx, vsi_num = vsi->vsi_num; + u16 vsi_num = vsi->vsi_num; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; struct device *dev; int status; + u32 i; dev = ice_pf_to_dev(pf); if (ice_is_safe_mode(pf)) { @@ -1631,67 +1670,15 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi) vsi_num); return; } - /* configure RSS for IPv4 with input set IP src/dst */ - status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4, - ICE_FLOW_SEG_HDR_IPV4); - if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %d\n", - vsi_num, status); - - /* configure RSS for IPv6 with input set IPv6 src/dst */ - status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6, - ICE_FLOW_SEG_HDR_IPV6); - if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %d\n", - vsi_num, status); - - /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */ - status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV4, - ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4); - if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %d\n", - vsi_num, status); + for (i = 0; i < ARRAY_SIZE(default_rss_cfgs); i++) { + const struct ice_rss_hash_cfg *cfg = &default_rss_cfgs[i]; - /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */ - status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV4, - ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4); - if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %d\n", - vsi_num, status); - - /* configure RSS for sctp4 with input set IP src/dst */ - status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4, - ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4); - if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %d\n", - vsi_num, status); - - /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */ - status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV6, - ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6); - if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %d\n", - vsi_num, status); - - /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */ - status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV6, - ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6); - if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %d\n", - vsi_num, status); - - /* configure RSS for sctp6 with input set IPv6 src/dst */ - status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6, - ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6); - if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n", - vsi_num, status); - - status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_ESP_SPI, - ICE_FLOW_SEG_HDR_ESP); - if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for esp/spi flow, vsi = %d, error = %d\n", - vsi_num, status); + status = ice_add_rss_cfg(hw, vsi, cfg); + if (status) + dev_dbg(dev, "ice_add_rss_cfg failed, addl_hdrs = %x, hash_flds = %llx, hdr_type = %d, symm = %d\n", + cfg->addl_hdrs, cfg->hash_flds, + cfg->hdr_type, cfg->symm); + } } /** @@ -2450,6 +2437,10 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) goto unroll_vector_base; ice_vsi_map_rings_to_vectors(vsi); + + /* Associate q_vector rings to napi */ + ice_vsi_set_napi_queues(vsi, true); + vsi->stat_offsets_loaded = false; if (ice_is_xdp_ena_vsi(vsi)) { @@ -2926,6 +2917,71 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi) } /** + * ice_queue_set_napi - Set the napi instance for the queue + * @dev: device to which NAPI and queue belong + * @queue_index: Index of queue + * @type: queue type as RX or TX + * @napi: NAPI context + * @locked: is the rtnl_lock already held + * + * Set the napi instance for the queue + */ +static void +ice_queue_set_napi(struct net_device *dev, unsigned int queue_index, + enum netdev_queue_type type, struct napi_struct *napi, + bool locked) +{ + if (!locked) + rtnl_lock(); + netif_queue_set_napi(dev, queue_index, type, napi); + if (!locked) + rtnl_unlock(); +} + +/** + * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi + * @q_vector: q_vector pointer + * @locked: is the rtnl_lock already held + * + * Associate the q_vector napi with all the queue[s] on the vector + */ +void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked) +{ + struct ice_rx_ring *rx_ring; + struct ice_tx_ring *tx_ring; + + ice_for_each_rx_ring(rx_ring, q_vector->rx) + ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index, + NETDEV_QUEUE_TYPE_RX, &q_vector->napi, + locked); + + ice_for_each_tx_ring(tx_ring, q_vector->tx) + ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index, + NETDEV_QUEUE_TYPE_TX, &q_vector->napi, + locked); + /* Also set the interrupt number for the NAPI */ + netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq); +} + +/** + * ice_vsi_set_napi_queues + * @vsi: VSI pointer + * @locked: is the rtnl_lock already held + * + * Associate queue[s] with napi for all vectors + */ +void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked) +{ + int i; + + if (!vsi->netdev) + return; + + ice_for_each_q_vector(vsi, i) + ice_q_vector_set_napi_queues(vsi->q_vectors[i], locked); +} + +/** * ice_vsi_release - Delete a VSI and free its resources * @vsi: the VSI being removed * @@ -3070,27 +3126,26 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi, } /** - * ice_vsi_realloc_stat_arrays - Frees unused stat structures + * ice_vsi_realloc_stat_arrays - Frees unused stat structures or alloc new ones * @vsi: VSI pointer - * @prev_txq: Number of Tx rings before ring reallocation - * @prev_rxq: Number of Rx rings before ring reallocation */ -static void -ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq) +static int +ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi) { + u16 req_txq = vsi->req_txq ? vsi->req_txq : vsi->alloc_txq; + u16 req_rxq = vsi->req_rxq ? vsi->req_rxq : vsi->alloc_rxq; + struct ice_ring_stats **tx_ring_stats; + struct ice_ring_stats **rx_ring_stats; struct ice_vsi_stats *vsi_stat; struct ice_pf *pf = vsi->back; + u16 prev_txq = vsi->alloc_txq; + u16 prev_rxq = vsi->alloc_rxq; int i; - if (!prev_txq || !prev_rxq) - return; - if (vsi->type == ICE_VSI_CHNL) - return; - vsi_stat = pf->vsi_stats[vsi->idx]; - if (vsi->num_txq < prev_txq) { - for (i = vsi->num_txq; i < prev_txq; i++) { + if (req_txq < prev_txq) { + for (i = req_txq; i < prev_txq; i++) { if (vsi_stat->tx_ring_stats[i]) { kfree_rcu(vsi_stat->tx_ring_stats[i], rcu); WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL); @@ -3098,14 +3153,36 @@ ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq) } } - if (vsi->num_rxq < prev_rxq) { - for (i = vsi->num_rxq; i < prev_rxq; i++) { + tx_ring_stats = vsi_stat->rx_ring_stats; + vsi_stat->tx_ring_stats = + krealloc_array(vsi_stat->tx_ring_stats, req_txq, + sizeof(*vsi_stat->tx_ring_stats), + GFP_KERNEL | __GFP_ZERO); + if (!vsi_stat->tx_ring_stats) { + vsi_stat->tx_ring_stats = tx_ring_stats; + return -ENOMEM; + } + + if (req_rxq < prev_rxq) { + for (i = req_rxq; i < prev_rxq; i++) { if (vsi_stat->rx_ring_stats[i]) { kfree_rcu(vsi_stat->rx_ring_stats[i], rcu); WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL); } } } + + rx_ring_stats = vsi_stat->rx_ring_stats; + vsi_stat->rx_ring_stats = + krealloc_array(vsi_stat->rx_ring_stats, req_rxq, + sizeof(*vsi_stat->rx_ring_stats), + GFP_KERNEL | __GFP_ZERO); + if (!vsi_stat->rx_ring_stats) { + vsi_stat->rx_ring_stats = rx_ring_stats; + return -ENOMEM; + } + + return 0; } /** @@ -3122,9 +3199,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags) { struct ice_vsi_cfg_params params = {}; struct ice_coalesce_stored *coalesce; - int ret, prev_txq, prev_rxq; int prev_num_q_vectors = 0; struct ice_pf *pf; + int ret; if (!vsi) return -EINVAL; @@ -3143,8 +3220,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags) prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce); - prev_txq = vsi->num_txq; - prev_rxq = vsi->num_rxq; + ret = ice_vsi_realloc_stat_arrays(vsi); + if (ret) + goto err_vsi_cfg; ice_vsi_decfg(vsi); ret = ice_vsi_cfg_def(vsi, ¶ms); @@ -3162,8 +3240,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags) return ice_schedule_reset(pf, ICE_RESET_PFR); } - ice_vsi_realloc_stat_arrays(vsi, prev_txq, prev_rxq); - ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors); kfree(coalesce); diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index f24f5d1e6f9c..71bd27244941 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -91,6 +91,10 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc); struct ice_vsi * ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params); +void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked); + +void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked); + int ice_vsi_release(struct ice_vsi *vsi); void ice_vsi_close(struct ice_vsi *vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index fb9c93f37e84..b97d116650be 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -14,6 +14,7 @@ #include "ice_dcb_lib.h" #include "ice_dcb_nl.h" #include "ice_devlink.h" +#include "ice_hwmon.h" /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the * ice tracepoint functions. This must be done exactly once across the * ice driver. @@ -1252,6 +1253,32 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event) } /** + * ice_get_fwlog_data - copy the FW log data from ARQ event + * @pf: PF that the FW log event is associated with + * @event: event structure containing FW log data + */ +static void +ice_get_fwlog_data(struct ice_pf *pf, struct ice_rq_event_info *event) +{ + struct ice_fwlog_data *fwlog; + struct ice_hw *hw = &pf->hw; + + fwlog = &hw->fwlog_ring.rings[hw->fwlog_ring.tail]; + + memset(fwlog->data, 0, PAGE_SIZE); + fwlog->data_size = le16_to_cpu(event->desc.datalen); + + memcpy(fwlog->data, event->msg_buf, fwlog->data_size); + ice_fwlog_ring_increment(&hw->fwlog_ring.tail, hw->fwlog_ring.size); + + if (ice_fwlog_ring_full(&hw->fwlog_ring)) { + /* the rings are full so bump the head to create room */ + ice_fwlog_ring_increment(&hw->fwlog_ring.head, + hw->fwlog_ring.size); + } +} + +/** * ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware * @pf: pointer to the PF private structure * @task: intermediate helper storage and identifier for waiting @@ -1532,8 +1559,8 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) ice_vc_process_vf_msg(pf, &event, &data); break; - case ice_aqc_opc_fw_logging: - ice_output_fw_log(hw, &event.desc, event.msg_buf); + case ice_aqc_opc_fw_logs_event: + ice_get_fwlog_data(pf, &event); break; case ice_aqc_opc_lldp_set_mib_change: ice_dcb_process_lldp_set_mib_change(pf, &event); @@ -3150,7 +3177,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) if (oicr & PFINT_OICR_TSYN_TX_M) { ena_mask &= ~PFINT_OICR_TSYN_TX_M; - if (!hw->reset_ongoing && ice_ptp_pf_handles_tx_interrupt(pf)) + if (ice_ptp_pf_handles_tx_interrupt(pf)) set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread); } @@ -3375,9 +3402,11 @@ static void ice_napi_add(struct ice_vsi *vsi) if (!vsi->netdev) return; - ice_for_each_q_vector(vsi, v_idx) + ice_for_each_q_vector(vsi, v_idx) { netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, ice_napi_poll); + ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false); + } } /** @@ -3397,6 +3426,7 @@ static void ice_set_ops(struct ice_vsi *vsi) netdev->netdev_ops = &ice_netdev_ops; netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic; + netdev->xdp_metadata_ops = &ice_xdp_md_ops; ice_set_ethtool_ops(netdev); if (vsi->type != ICE_VSI_PF) @@ -4361,6 +4391,19 @@ static void ice_print_wake_reason(struct ice_pf *pf) } /** + * ice_pf_fwlog_update_module - update 1 module + * @pf: pointer to the PF struct + * @log_level: log_level to use for the @module + * @module: module to update + */ +void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module) +{ + struct ice_hw *hw = &pf->hw; + + hw->fwlog_cfg.module_entries[module].log_level = log_level; +} + +/** * ice_register_netdev - register netdev * @vsi: pointer to the VSI struct */ @@ -4685,6 +4728,8 @@ static void ice_init_features(struct ice_pf *pf) if (ice_init_lag(pf)) dev_warn(dev, "Failed to init link aggregation support\n"); + + ice_hwmon_init(pf); } static void ice_deinit_features(struct ice_pf *pf) @@ -4702,6 +4747,8 @@ static void ice_deinit_features(struct ice_pf *pf) ice_ptp_release(pf); if (test_bit(ICE_FLAG_DPLL, pf->flags)) ice_dpll_deinit(pf); + if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) + xa_destroy(&pf->eswitch.reprs); } static void ice_init_wakeup(struct ice_pf *pf) @@ -5203,11 +5250,15 @@ static void ice_remove(struct pci_dev *pdev) msleep(100); } + ice_debugfs_exit(); + if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { set_bit(ICE_VF_RESETS_DISABLED, pf->state); ice_free_vfs(pf); } + ice_hwmon_exit(pf); + ice_service_task_stop(pf); ice_aq_cancel_waiting_tasks(pf); set_bit(ICE_DOWN, pf->state); @@ -5672,6 +5723,8 @@ static int __init ice_module_init(void) goto err_dest_wq; } + ice_debugfs_init(); + status = pci_register_driver(&ice_driver); if (status) { pr_err("failed to register PCI driver, err %d\n", status); @@ -5682,6 +5735,7 @@ static int __init ice_module_init(void) err_dest_lag_wq: destroy_workqueue(ice_lag_wq); + ice_debugfs_exit(); err_dest_wq: destroy_workqueue(ice_wq); return status; @@ -6041,6 +6095,23 @@ ice_fix_features(struct net_device *netdev, netdev_features_t features) } /** + * ice_set_rx_rings_vlan_proto - update rings with new stripped VLAN proto + * @vsi: PF's VSI + * @vlan_ethertype: VLAN ethertype (802.1Q or 802.1ad) in network byte order + * + * Store current stripped VLAN proto in ring packet context, + * so it can be accessed more efficiently by packet processing code. + */ +static void +ice_set_rx_rings_vlan_proto(struct ice_vsi *vsi, __be16 vlan_ethertype) +{ + u16 i; + + ice_for_each_alloc_rxq(vsi, i) + vsi->rx_rings[i]->pkt_ctx.vlan_proto = vlan_ethertype; +} + +/** * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI * @vsi: PF's VSI * @features: features used to determine VLAN offload settings @@ -6082,6 +6153,9 @@ ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features) if (strip_err || insert_err) return -EIO; + ice_set_rx_rings_vlan_proto(vsi, enable_stripping ? + htons(vlan_ethertype) : 0); + return 0; } @@ -7401,9 +7475,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) goto err_vsi_rebuild; } - err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL); + err = ice_eswitch_rebuild(pf); if (err) { - dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err); + dev_err(dev, "Switchdev rebuild failed: %d\n", err); goto err_vsi_rebuild; } @@ -7704,6 +7778,59 @@ int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed) } /** + * ice_set_rss_hfunc - Set RSS HASH function + * @vsi: Pointer to VSI structure + * @hfunc: hash function (ICE_AQ_VSI_Q_OPT_RSS_*) + * + * Returns 0 on success, negative on failure + */ +int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc) +{ + struct ice_hw *hw = &vsi->back->hw; + struct ice_vsi_ctx *ctx; + bool symm; + int err; + + if (hfunc == vsi->rss_hfunc) + return 0; + + if (hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ && + hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ) + return -EOPNOTSUPP; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID); + ctx->info.q_opt_rss = vsi->info.q_opt_rss; + ctx->info.q_opt_rss &= ~ICE_AQ_VSI_Q_OPT_RSS_HASH_M; + ctx->info.q_opt_rss |= + FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hfunc); + ctx->info.q_opt_tc = vsi->info.q_opt_tc; + ctx->info.q_opt_flags = vsi->info.q_opt_rss; + + err = ice_update_vsi(hw, vsi->idx, ctx, NULL); + if (err) { + dev_err(ice_pf_to_dev(vsi->back), "Failed to configure RSS hash for VSI %d, error %d\n", + vsi->vsi_num, err); + } else { + vsi->info.q_opt_rss = ctx->info.q_opt_rss; + vsi->rss_hfunc = hfunc; + netdev_info(vsi->netdev, "Hash function set to: %sToeplitz\n", + hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ ? + "Symmetric " : ""); + } + kfree(ctx); + if (err) + return err; + + /* Fix the symmetry setting for all existing RSS configurations */ + symm = !!(hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ); + return ice_set_rss_cfg_symm(hw, vsi, symm); +} + +/** * ice_bridge_getlink - Get the hardware bridge mode * @skb: skb buff * @pid: process ID @@ -8138,13 +8265,12 @@ static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi) for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { enum ice_flow_priority prio; - u64 prof_id; /* add this VSI to FDir profile for this flow */ prio = ICE_FLOW_PRIO_NORMAL; prof = hw->fdir_prof[flow]; - prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; - status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, + status = ice_flow_add_entry(hw, ICE_BLK_FD, + prof->prof_id[tun], prof->vsi_h[0], vsi->idx, prio, prof->fdir_seg[tun], &entry_h); diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index 71f405f8a6fe..239cf8a2ee80 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -7,7 +7,7 @@ #define E810_OUT_PROP_DELAY_NS 1 -#define UNKNOWN_INCVAL_E822 0x100000000ULL +#define UNKNOWN_INCVAL_E82X 0x100000000ULL static const struct ptp_pin_desc ice_pin_desc_e810t[] = { /* name idx func chan */ @@ -705,7 +705,9 @@ static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf) /* Read the Tx ready status first */ err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready); - if (err || tstamp_ready) + if (err) + break; + else if (tstamp_ready) return ICE_TX_TSTAMP_WORK_PENDING; } @@ -875,7 +877,7 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) } /** - * ice_ptp_init_tx_e822 - Initialize tracking for Tx timestamps + * ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps * @pf: Board private structure * @tx: the Tx tracking structure to initialize * @port: the port this structure tracks @@ -886,11 +888,11 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) * registers into chunks based on the port number. */ static int -ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port) +ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port) { tx->block = port / ICE_PORTS_PER_QUAD; - tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E822; - tx->len = INDEX_PER_PORT_E822; + tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X; + tx->len = INDEX_PER_PORT_E82X; tx->verify_cached = 0; return ice_ptp_alloc_tx_tracker(tx); @@ -1093,10 +1095,10 @@ static u64 ice_base_incval(struct ice_pf *pf) if (ice_is_e810(hw)) incval = ICE_PTP_NOMINAL_INCVAL_E810; - else if (ice_e822_time_ref(hw) < NUM_ICE_TIME_REF_FREQ) - incval = ice_e822_nominal_incval(ice_e822_time_ref(hw)); + else if (ice_e82x_time_ref(hw) < NUM_ICE_TIME_REF_FREQ) + incval = ice_e82x_nominal_incval(ice_e82x_time_ref(hw)); else - incval = UNKNOWN_INCVAL_E822; + incval = UNKNOWN_INCVAL_E82X; dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n", incval); @@ -1125,10 +1127,10 @@ static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port) /* need to read FIFO state */ if (offs == 0 || offs == 1) - err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO01_STATUS, + err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO01_STATUS, &val); else - err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO23_STATUS, + err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO23_STATUS, &val); if (err) { @@ -1156,7 +1158,7 @@ static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port) dev_dbg(ice_pf_to_dev(pf), "Port %d Tx FIFO still not empty; resetting quad %d\n", port->port_num, quad); - ice_ptp_reset_ts_memory_quad_e822(hw, quad); + ice_ptp_reset_ts_memory_quad_e82x(hw, quad); port->tx_fifo_busy_cnt = FIFO_OK; return 0; } @@ -1201,8 +1203,8 @@ static void ice_ptp_wait_for_offsets(struct kthread_work *work) tx_err = ice_ptp_check_tx_fifo(port); if (!tx_err) - tx_err = ice_phy_cfg_tx_offset_e822(hw, port->port_num); - rx_err = ice_phy_cfg_rx_offset_e822(hw, port->port_num); + tx_err = ice_phy_cfg_tx_offset_e82x(hw, port->port_num); + rx_err = ice_phy_cfg_rx_offset_e82x(hw, port->port_num); if (tx_err || rx_err) { /* Tx and/or Rx offset not yet configured, try again later */ kthread_queue_delayed_work(pf->ptp.kworker, @@ -1231,7 +1233,7 @@ ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port) kthread_cancel_delayed_work_sync(&ptp_port->ov_work); - err = ice_stop_phy_timer_e822(hw, port, true); + err = ice_stop_phy_timer_e82x(hw, port, true); if (err) dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n", port, err); @@ -1274,7 +1276,7 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port) ptp_port->tx_fifo_busy_cnt = 0; /* Start the PHY timer in Vernier mode */ - err = ice_start_phy_timer_e822(hw, port); + err = ice_start_phy_timer_e82x(hw, port); if (err) goto out_unlock; @@ -1323,7 +1325,7 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) case ICE_PHY_E810: /* Do not reconfigure E810 PHY */ return; - case ICE_PHY_E822: + case ICE_PHY_E82X: ice_ptp_port_phy_restart(ptp_port); return; default: @@ -1349,7 +1351,7 @@ static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold) ice_ptp_reset_ts_memory(hw); for (quad = 0; quad < ICE_MAX_QUAD; quad++) { - err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, + err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val); if (err) break; @@ -1363,7 +1365,7 @@ static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold) val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M; } - err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, + err = ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, val); if (err) break; @@ -1601,7 +1603,7 @@ static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan, if (ice_is_e810(hw)) start_time -= E810_OUT_PROP_DELAY_NS; else - start_time -= ice_e822_pps_delay(ice_e822_time_ref(hw)); + start_time -= ice_e82x_pps_delay(ice_e82x_time_ref(hw)); /* 2. Write TARGET time */ wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time)); @@ -1840,7 +1842,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) ice_ptp_enable_all_clkout(pf); /* Recalibrate and re-enable timestamp blocks for E822/E823 */ - if (hw->phy_model == ICE_PHY_E822) + if (hw->phy_model == ICE_PHY_E82X) ice_ptp_restart_all_phy(pf); exit: if (err) { @@ -2127,30 +2129,26 @@ int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr) } /** - * ice_ptp_rx_hwtstamp - Check for an Rx timestamp - * @rx_ring: Ring to get the VSI info + * ice_ptp_get_rx_hwts - Get packet Rx timestamp in ns * @rx_desc: Receive descriptor - * @skb: Particular skb to send timestamp with + * @pkt_ctx: Packet context to get the cached time * * The driver receives a notification in the receive descriptor with timestamp. - * The timestamp is in ns, so we must convert the result first. */ -void -ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring, - union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) +u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc, + const struct ice_pkt_ctx *pkt_ctx) { - struct skb_shared_hwtstamps *hwtstamps; u64 ts_ns, cached_time; u32 ts_high; if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID)) - return; + return 0; - cached_time = READ_ONCE(rx_ring->cached_phctime); + cached_time = READ_ONCE(pkt_ctx->cached_phctime); /* Do not report a timestamp if we don't have a cached PHC time */ if (!cached_time) - return; + return 0; /* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached * PHC value, rather than accessing the PF. This also allows us to @@ -2161,9 +2159,7 @@ ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring, ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high); ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high); - hwtstamps = skb_hwtstamps(skb); - memset(hwtstamps, 0, sizeof(*hwtstamps)); - hwtstamps->hwtstamp = ns_to_ktime(ts_ns); + return ts_ns; } /** @@ -2440,6 +2436,54 @@ enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf) } } +/** + * ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt + * @pf: Board private structure + * + * The device PHY issues Tx timestamp interrupts to the driver for processing + * timestamp data from the PHY. It will not interrupt again until all + * current timestamp data is read. In rare circumstances, it is possible that + * the driver fails to read all outstanding data. + * + * To avoid getting permanently stuck, periodically check if the PHY has + * outstanding timestamp data. If so, trigger an interrupt from software to + * process this data. + */ +static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + bool trigger_oicr = false; + unsigned int i; + + if (ice_is_e810(hw)) + return; + + if (!ice_pf_src_tmr_owned(pf)) + return; + + for (i = 0; i < ICE_MAX_QUAD; i++) { + u64 tstamp_ready; + int err; + + err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready); + if (!err && tstamp_ready) { + trigger_oicr = true; + break; + } + } + + if (trigger_oicr) { + /* Trigger a software interrupt, to ensure this data + * gets processed. + */ + dev_dbg(dev, "PTP periodic task detected waiting timestamps. Triggering Tx timestamp interrupt now.\n"); + + wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); + ice_flush(hw); + } +} + static void ice_ptp_periodic_work(struct kthread_work *work) { struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work); @@ -2451,6 +2495,8 @@ static void ice_ptp_periodic_work(struct kthread_work *work) err = ice_ptp_update_cached_phctime(pf); + ice_ptp_maybe_trigger_tx_interrupt(pf); + /* Run twice a second or reschedule if phc update failed */ kthread_queue_delayed_work(ptp->kworker, &ptp->work, msecs_to_jiffies(err ? 10 : 500)); @@ -2468,12 +2514,10 @@ void ice_ptp_reset(struct ice_pf *pf) int err, itr = 1; u64 time_diff; - if (test_bit(ICE_PFR_REQ, pf->state)) + if (test_bit(ICE_PFR_REQ, pf->state) || + !ice_pf_src_tmr_owned(pf)) goto pfr; - if (!ice_pf_src_tmr_owned(pf)) - goto reset_ts; - err = ice_ptp_init_phc(hw); if (err) goto err; @@ -2517,10 +2561,6 @@ void ice_ptp_reset(struct ice_pf *pf) goto err; } -reset_ts: - /* Restart the PHY timestamping block */ - ice_ptp_reset_phy_timestamping(pf); - pfr: /* Init Tx structures */ if (ice_is_e810(&pf->hw)) { @@ -2528,7 +2568,7 @@ pfr: } else { kthread_init_delayed_work(&ptp->port.ov_work, ice_ptp_wait_for_offsets); - err = ice_ptp_init_tx_e822(pf, &ptp->port.tx, + err = ice_ptp_init_tx_e82x(pf, &ptp->port.tx, ptp->port.port_num); } if (err) @@ -2536,6 +2576,11 @@ pfr: set_bit(ICE_FLAG_PTP, pf->flags); + /* Restart the PHY timestamping block */ + if (!test_bit(ICE_PFR_REQ, pf->state) && + ice_pf_src_tmr_owned(pf)) + ice_ptp_restart_all_phy(pf); + /* Start periodic work going */ kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0); @@ -2896,11 +2941,11 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port) switch (hw->phy_model) { case ICE_PHY_E810: return ice_ptp_init_tx_e810(pf, &ptp_port->tx); - case ICE_PHY_E822: + case ICE_PHY_E82X: kthread_init_delayed_work(&ptp_port->ov_work, ice_ptp_wait_for_offsets); - return ice_ptp_init_tx_e822(pf, &ptp_port->tx, + return ice_ptp_init_tx_e82x(pf, &ptp_port->tx, ptp_port->port_num); default: return -ENODEV; @@ -2987,7 +3032,7 @@ static void ice_ptp_remove_auxbus_device(struct ice_pf *pf) static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf) { switch (pf->hw.phy_model) { - case ICE_PHY_E822: + case ICE_PHY_E82X: /* E822 based PHY has the clock owner process the interrupt * for all ports. */ diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h index 06a330867fc9..032653a7a133 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp.h @@ -147,7 +147,7 @@ struct ice_ptp_tx { /* Quad and port information for initializing timestamp blocks */ #define INDEX_PER_QUAD 64 -#define INDEX_PER_PORT_E822 16 +#define INDEX_PER_PORT_E82X 16 #define INDEX_PER_PORT_E810 64 /** @@ -298,9 +298,8 @@ void ice_ptp_extts_event(struct ice_pf *pf); s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb); enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf); -void -ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring, - union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb); +u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc, + const struct ice_pkt_ctx *pkt_ctx); void ice_ptp_reset(struct ice_pf *pf); void ice_ptp_prepare_for_reset(struct ice_pf *pf); void ice_ptp_init(struct ice_pf *pf); @@ -329,9 +328,14 @@ static inline bool ice_ptp_process_ts(struct ice_pf *pf) { return true; } -static inline void -ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring, - union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) { } + +static inline u64 +ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc, + const struct ice_pkt_ctx *pkt_ctx) +{ + return 0; +} + static inline void ice_ptp_reset(struct ice_pf *pf) { } static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf) { } static inline void ice_ptp_init(struct ice_pf *pf) { } diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h index 4109aa3b2fcd..2c4dab0c48ab 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h @@ -9,17 +9,17 @@ */ /* Constants defined for the PTP 1588 clock hardware. */ -/* struct ice_time_ref_info_e822 +/* struct ice_time_ref_info_e82x * * E822 hardware can use different sources as the reference for the PTP * hardware clock. Each clock has different characteristics such as a slightly * different frequency, etc. * * This lookup table defines several constants that depend on the current time - * reference. See the struct ice_time_ref_info_e822 for information about the + * reference. See the struct ice_time_ref_info_e82x for information about the * meaning of each constant. */ -const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ] = { +const struct ice_time_ref_info_e82x e822_time_ref[NUM_ICE_TIME_REF_FREQ] = { /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */ { /* pll_freq */ @@ -81,7 +81,7 @@ const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ] = { }, }; -const struct ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = { +const struct ice_cgu_pll_params_e82x e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = { /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */ { /* refclk_pre_div */ @@ -155,7 +155,7 @@ const struct ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = { }, }; -/* struct ice_vernier_info_e822 +/* struct ice_vernier_info_e82x * * E822 hardware calibrates the delay of the timestamp indication from the * actual packet transmission or reception during the initialization of the @@ -168,7 +168,7 @@ const struct ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = { * used by this link speed, and that the register should be cleared by writing * 0. Other values specify the clock frequency in Hz. */ -const struct ice_vernier_info_e822 e822_vernier[NUM_ICE_PTP_LNK_SPD] = { +const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD] = { /* ICE_PTP_LNK_SPD_1G */ { /* tx_par_clk */ diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c index a00b55e14aac..187ce9b54e1a 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c @@ -284,19 +284,19 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw) */ /** - * ice_fill_phy_msg_e822 - Fill message data for a PHY register access + * ice_fill_phy_msg_e82x - Fill message data for a PHY register access * @msg: the PHY message buffer to fill in * @port: the port to access * @offset: the register offset */ static void -ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset) +ice_fill_phy_msg_e82x(struct ice_sbq_msg_input *msg, u8 port, u16 offset) { int phy_port, phy, quadtype; - phy_port = port % ICE_PORTS_PER_PHY_E822; - phy = port / ICE_PORTS_PER_PHY_E822; - quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_QUADS_PER_PHY_E822; + phy_port = port % ICE_PORTS_PER_PHY_E82X; + phy = port / ICE_PORTS_PER_PHY_E82X; + quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_QUADS_PER_PHY_E82X; if (quadtype == 0) { msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port); @@ -315,7 +315,7 @@ ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset) } /** - * ice_is_64b_phy_reg_e822 - Check if this is a 64bit PHY register + * ice_is_64b_phy_reg_e82x - Check if this is a 64bit PHY register * @low_addr: the low address to check * @high_addr: on return, contains the high address of the 64bit register * @@ -323,7 +323,7 @@ ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset) * represented as two 32bit registers. If it is, return the appropriate high * register offset to use. */ -static bool ice_is_64b_phy_reg_e822(u16 low_addr, u16 *high_addr) +static bool ice_is_64b_phy_reg_e82x(u16 low_addr, u16 *high_addr) { switch (low_addr) { case P_REG_PAR_PCS_TX_OFFSET_L: @@ -368,7 +368,7 @@ static bool ice_is_64b_phy_reg_e822(u16 low_addr, u16 *high_addr) } /** - * ice_is_40b_phy_reg_e822 - Check if this is a 40bit PHY register + * ice_is_40b_phy_reg_e82x - Check if this is a 40bit PHY register * @low_addr: the low address to check * @high_addr: on return, contains the high address of the 40bit value * @@ -377,7 +377,7 @@ static bool ice_is_64b_phy_reg_e822(u16 low_addr, u16 *high_addr) * upper 32 bits in the high register. If it is, return the appropriate high * register offset to use. */ -static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr) +static bool ice_is_40b_phy_reg_e82x(u16 low_addr, u16 *high_addr) { switch (low_addr) { case P_REG_TIMETUS_L: @@ -413,7 +413,7 @@ static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr) } /** - * ice_read_phy_reg_e822 - Read a PHY register + * ice_read_phy_reg_e82x - Read a PHY register * @hw: pointer to the HW struct * @port: PHY port to read from * @offset: PHY register offset to read @@ -422,12 +422,12 @@ static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr) * Read a PHY register for the given port over the device sideband queue. */ static int -ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val) +ice_read_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 *val) { struct ice_sbq_msg_input msg = {0}; int err; - ice_fill_phy_msg_e822(&msg, port, offset); + ice_fill_phy_msg_e82x(&msg, port, offset); msg.opcode = ice_sbq_msg_rd; err = ice_sbq_rw_reg(hw, &msg); @@ -443,7 +443,7 @@ ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val) } /** - * ice_read_64b_phy_reg_e822 - Read a 64bit value from PHY registers + * ice_read_64b_phy_reg_e82x - Read a 64bit value from PHY registers * @hw: pointer to the HW struct * @port: PHY port to read from * @low_addr: offset of the lower register to read from @@ -455,7 +455,7 @@ ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val) * known to be two parts of a 64bit value. */ static int -ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val) +ice_read_64b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val) { u32 low, high; u16 high_addr; @@ -464,20 +464,20 @@ ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val) /* Only operate on registers known to be split into two 32bit * registers. */ - if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) { + if (!ice_is_64b_phy_reg_e82x(low_addr, &high_addr)) { ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n", low_addr); return -EINVAL; } - err = ice_read_phy_reg_e822(hw, port, low_addr, &low); + err = ice_read_phy_reg_e82x(hw, port, low_addr, &low); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register 0x%08x\n, err %d", low_addr, err); return err; } - err = ice_read_phy_reg_e822(hw, port, high_addr, &high); + err = ice_read_phy_reg_e82x(hw, port, high_addr, &high); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register 0x%08x\n, err %d", high_addr, err); @@ -490,7 +490,7 @@ ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val) } /** - * ice_write_phy_reg_e822 - Write a PHY register + * ice_write_phy_reg_e82x - Write a PHY register * @hw: pointer to the HW struct * @port: PHY port to write to * @offset: PHY register offset to write @@ -499,12 +499,12 @@ ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val) * Write a PHY register for the given port over the device sideband queue. */ static int -ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val) +ice_write_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 val) { struct ice_sbq_msg_input msg = {0}; int err; - ice_fill_phy_msg_e822(&msg, port, offset); + ice_fill_phy_msg_e82x(&msg, port, offset); msg.opcode = ice_sbq_msg_wr; msg.data = val; @@ -519,7 +519,7 @@ ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val) } /** - * ice_write_40b_phy_reg_e822 - Write a 40b value to the PHY + * ice_write_40b_phy_reg_e82x - Write a 40b value to the PHY * @hw: pointer to the HW struct * @port: port to write to * @low_addr: offset of the low register @@ -529,7 +529,7 @@ ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val) * it up into two chunks, the lower 8 bits and the upper 32 bits. */ static int -ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) +ice_write_40b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) { u32 low, high; u16 high_addr; @@ -538,7 +538,7 @@ ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) /* Only operate on registers known to be split into a lower 8 bit * register and an upper 32 bit register. */ - if (!ice_is_40b_phy_reg_e822(low_addr, &high_addr)) { + if (!ice_is_40b_phy_reg_e82x(low_addr, &high_addr)) { ice_debug(hw, ICE_DBG_PTP, "Invalid 40b register addr 0x%08x\n", low_addr); return -EINVAL; @@ -547,14 +547,14 @@ ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) low = (u32)(val & P_REG_40B_LOW_M); high = (u32)(val >> P_REG_40B_HIGH_S); - err = ice_write_phy_reg_e822(hw, port, low_addr, low); + err = ice_write_phy_reg_e82x(hw, port, low_addr, low); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d", low_addr, err); return err; } - err = ice_write_phy_reg_e822(hw, port, high_addr, high); + err = ice_write_phy_reg_e82x(hw, port, high_addr, high); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d", high_addr, err); @@ -565,7 +565,7 @@ ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) } /** - * ice_write_64b_phy_reg_e822 - Write a 64bit value to PHY registers + * ice_write_64b_phy_reg_e82x - Write a 64bit value to PHY registers * @hw: pointer to the HW struct * @port: PHY port to read from * @low_addr: offset of the lower register to read from @@ -577,7 +577,7 @@ ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) * a 64bit value. */ static int -ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) +ice_write_64b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) { u32 low, high; u16 high_addr; @@ -586,7 +586,7 @@ ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) /* Only operate on registers known to be split into two 32bit * registers. */ - if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) { + if (!ice_is_64b_phy_reg_e82x(low_addr, &high_addr)) { ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n", low_addr); return -EINVAL; @@ -595,14 +595,14 @@ ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) low = lower_32_bits(val); high = upper_32_bits(val); - err = ice_write_phy_reg_e822(hw, port, low_addr, low); + err = ice_write_phy_reg_e82x(hw, port, low_addr, low); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d", low_addr, err); return err; } - err = ice_write_phy_reg_e822(hw, port, high_addr, high); + err = ice_write_phy_reg_e82x(hw, port, high_addr, high); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d", high_addr, err); @@ -613,7 +613,7 @@ ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) } /** - * ice_fill_quad_msg_e822 - Fill message data for quad register access + * ice_fill_quad_msg_e82x - Fill message data for quad register access * @msg: the PHY message buffer to fill in * @quad: the quad to access * @offset: the register offset @@ -622,7 +622,7 @@ ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) * multiple PHYs. */ static int -ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset) +ice_fill_quad_msg_e82x(struct ice_sbq_msg_input *msg, u8 quad, u16 offset) { u32 addr; @@ -631,7 +631,7 @@ ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset) msg->dest_dev = rmn_0; - if ((quad % ICE_QUADS_PER_PHY_E822) == 0) + if ((quad % ICE_QUADS_PER_PHY_E82X) == 0) addr = Q_0_BASE + offset; else addr = Q_1_BASE + offset; @@ -643,7 +643,7 @@ ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset) } /** - * ice_read_quad_reg_e822 - Read a PHY quad register + * ice_read_quad_reg_e82x - Read a PHY quad register * @hw: pointer to the HW struct * @quad: quad to read from * @offset: quad register offset to read @@ -653,12 +653,12 @@ ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset) * shared between multiple PHYs. */ int -ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val) +ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val) { struct ice_sbq_msg_input msg = {0}; int err; - err = ice_fill_quad_msg_e822(&msg, quad, offset); + err = ice_fill_quad_msg_e82x(&msg, quad, offset); if (err) return err; @@ -677,7 +677,7 @@ ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val) } /** - * ice_write_quad_reg_e822 - Write a PHY quad register + * ice_write_quad_reg_e82x - Write a PHY quad register * @hw: pointer to the HW struct * @quad: quad to write to * @offset: quad register offset to write @@ -687,12 +687,12 @@ ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val) * shared between multiple PHYs. */ int -ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val) +ice_write_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 val) { struct ice_sbq_msg_input msg = {0}; int err; - err = ice_fill_quad_msg_e822(&msg, quad, offset); + err = ice_fill_quad_msg_e82x(&msg, quad, offset); if (err) return err; @@ -710,7 +710,7 @@ ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val) } /** - * ice_read_phy_tstamp_e822 - Read a PHY timestamp out of the quad block + * ice_read_phy_tstamp_e82x - Read a PHY timestamp out of the quad block * @hw: pointer to the HW struct * @quad: the quad to read from * @idx: the timestamp index to read @@ -721,7 +721,7 @@ ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val) * family of devices. */ static int -ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp) +ice_read_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp) { u16 lo_addr, hi_addr; u32 lo, hi; @@ -730,14 +730,14 @@ ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp) lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx); hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx); - err = ice_read_quad_reg_e822(hw, quad, lo_addr, &lo); + err = ice_read_quad_reg_e82x(hw, quad, lo_addr, &lo); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n", err); return err; } - err = ice_read_quad_reg_e822(hw, quad, hi_addr, &hi); + err = ice_read_quad_reg_e82x(hw, quad, hi_addr, &hi); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n", err); @@ -754,7 +754,7 @@ ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp) } /** - * ice_clear_phy_tstamp_e822 - Clear a timestamp from the quad block + * ice_clear_phy_tstamp_e82x - Clear a timestamp from the quad block * @hw: pointer to the HW struct * @quad: the quad to read from * @idx: the timestamp index to reset @@ -770,18 +770,18 @@ ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp) * * To directly clear the contents of the timestamp block entirely, discarding * all timestamp data at once, software should instead use - * ice_ptp_reset_ts_memory_quad_e822(). + * ice_ptp_reset_ts_memory_quad_e82x(). * * This function should only be called on an idx whose bit is set according to * ice_get_phy_tx_tstamp_ready(). */ static int -ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx) +ice_clear_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx) { u64 unused_tstamp; int err; - err = ice_read_phy_tstamp_e822(hw, quad, idx, &unused_tstamp); + err = ice_read_phy_tstamp_e82x(hw, quad, idx, &unused_tstamp); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read the timestamp register for quad %u, idx %u, err %d\n", quad, idx, err); @@ -792,33 +792,33 @@ ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx) } /** - * ice_ptp_reset_ts_memory_quad_e822 - Clear all timestamps from the quad block + * ice_ptp_reset_ts_memory_quad_e82x - Clear all timestamps from the quad block * @hw: pointer to the HW struct * @quad: the quad to read from * * Clear all timestamps from the PHY quad block that is shared between the * internal PHYs on the E822 devices. */ -void ice_ptp_reset_ts_memory_quad_e822(struct ice_hw *hw, u8 quad) +void ice_ptp_reset_ts_memory_quad_e82x(struct ice_hw *hw, u8 quad) { - ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, Q_REG_TS_CTRL_M); - ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, ~(u32)Q_REG_TS_CTRL_M); + ice_write_quad_reg_e82x(hw, quad, Q_REG_TS_CTRL, Q_REG_TS_CTRL_M); + ice_write_quad_reg_e82x(hw, quad, Q_REG_TS_CTRL, ~(u32)Q_REG_TS_CTRL_M); } /** - * ice_ptp_reset_ts_memory_e822 - Clear all timestamps from all quad blocks + * ice_ptp_reset_ts_memory_e82x - Clear all timestamps from all quad blocks * @hw: pointer to the HW struct */ -static void ice_ptp_reset_ts_memory_e822(struct ice_hw *hw) +static void ice_ptp_reset_ts_memory_e82x(struct ice_hw *hw) { unsigned int quad; for (quad = 0; quad < ICE_MAX_QUAD; quad++) - ice_ptp_reset_ts_memory_quad_e822(hw, quad); + ice_ptp_reset_ts_memory_quad_e82x(hw, quad); } /** - * ice_read_cgu_reg_e822 - Read a CGU register + * ice_read_cgu_reg_e82x - Read a CGU register * @hw: pointer to the HW struct * @addr: Register address to read * @val: storage for register value read @@ -827,7 +827,7 @@ static void ice_ptp_reset_ts_memory_e822(struct ice_hw *hw) * applicable to E822 devices. */ static int -ice_read_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 *val) +ice_read_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 *val) { struct ice_sbq_msg_input cgu_msg; int err; @@ -850,7 +850,7 @@ ice_read_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 *val) } /** - * ice_write_cgu_reg_e822 - Write a CGU register + * ice_write_cgu_reg_e82x - Write a CGU register * @hw: pointer to the HW struct * @addr: Register address to write * @val: value to write into the register @@ -859,7 +859,7 @@ ice_read_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 *val) * applicable to E822 devices. */ static int -ice_write_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 val) +ice_write_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 val) { struct ice_sbq_msg_input cgu_msg; int err; @@ -925,7 +925,7 @@ static const char *ice_clk_src_str(u8 clk_src) } /** - * ice_cfg_cgu_pll_e822 - Configure the Clock Generation Unit + * ice_cfg_cgu_pll_e82x - Configure the Clock Generation Unit * @hw: pointer to the HW struct * @clk_freq: Clock frequency to program * @clk_src: Clock source to select (TIME_REF, or TCX0) @@ -934,7 +934,7 @@ static const char *ice_clk_src_str(u8 clk_src) * time reference, enabling the PLL which drives the PTP hardware clock. */ static int -ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq, +ice_cfg_cgu_pll_e82x(struct ice_hw *hw, enum ice_time_ref_freq clk_freq, enum ice_clk_src clk_src) { union tspll_ro_bwm_lf bwm_lf; @@ -963,15 +963,15 @@ ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq, return -EINVAL; } - err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD9, &dw9.val); + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val); if (err) return err; - err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val); + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val); if (err) return err; - err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val); + err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val); if (err) return err; @@ -986,43 +986,43 @@ ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq, if (dw24.field.ts_pll_enable) { dw24.field.ts_pll_enable = 0; - err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val); + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val); if (err) return err; } /* Set the frequency */ dw9.field.time_ref_freq_sel = clk_freq; - err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD9, dw9.val); + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val); if (err) return err; /* Configure the TS PLL feedback divisor */ - err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD19, &dw19.val); + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val); if (err) return err; dw19.field.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div; dw19.field.tspll_ndivratio = 1; - err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD19, dw19.val); + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val); if (err) return err; /* Configure the TS PLL post divisor */ - err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD22, &dw22.val); + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val); if (err) return err; dw22.field.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div; dw22.field.time1588clk_sel_div2 = 0; - err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD22, dw22.val); + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val); if (err) return err; /* Configure the TS PLL pre divisor and clock source */ - err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val); + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val); if (err) return err; @@ -1030,21 +1030,21 @@ ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq, dw24.field.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div; dw24.field.time_ref_sel = clk_src; - err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val); + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val); if (err) return err; /* Finally, enable the PLL */ dw24.field.ts_pll_enable = 1; - err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val); + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val); if (err) return err; /* Wait to verify if the PLL locks */ usleep_range(1000, 5000); - err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val); + err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val); if (err) return err; @@ -1064,18 +1064,18 @@ ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq, } /** - * ice_init_cgu_e822 - Initialize CGU with settings from firmware + * ice_init_cgu_e82x - Initialize CGU with settings from firmware * @hw: pointer to the HW structure * * Initialize the Clock Generation Unit of the E822 device. */ -static int ice_init_cgu_e822(struct ice_hw *hw) +static int ice_init_cgu_e82x(struct ice_hw *hw) { struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info; union tspll_cntr_bist_settings cntr_bist; int err; - err = ice_read_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS, + err = ice_read_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS, &cntr_bist.val); if (err) return err; @@ -1084,7 +1084,7 @@ static int ice_init_cgu_e822(struct ice_hw *hw) cntr_bist.field.i_plllock_sel_0 = 0; cntr_bist.field.i_plllock_sel_1 = 0; - err = ice_write_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS, + err = ice_write_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS, cntr_bist.val); if (err) return err; @@ -1092,7 +1092,7 @@ static int ice_init_cgu_e822(struct ice_hw *hw) /* Configure the CGU PLL using the parameters from the function * capabilities. */ - err = ice_cfg_cgu_pll_e822(hw, ts_info->time_ref, + err = ice_cfg_cgu_pll_e82x(hw, ts_info->time_ref, (enum ice_clk_src)ts_info->clk_src); if (err) return err; @@ -1113,7 +1113,7 @@ static int ice_ptp_set_vernier_wl(struct ice_hw *hw) for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { int err; - err = ice_write_phy_reg_e822(hw, port, P_REG_WL, + err = ice_write_phy_reg_e82x(hw, port, P_REG_WL, PTP_VERNIER_WL); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to set vernier window length for port %u, err %d\n", @@ -1126,12 +1126,12 @@ static int ice_ptp_set_vernier_wl(struct ice_hw *hw) } /** - * ice_ptp_init_phc_e822 - Perform E822 specific PHC initialization + * ice_ptp_init_phc_e82x - Perform E822 specific PHC initialization * @hw: pointer to HW struct * * Perform PHC initialization steps specific to E822 devices. */ -static int ice_ptp_init_phc_e822(struct ice_hw *hw) +static int ice_ptp_init_phc_e82x(struct ice_hw *hw) { int err; u32 regval; @@ -1145,7 +1145,7 @@ static int ice_ptp_init_phc_e822(struct ice_hw *hw) wr32(hw, PF_SB_REM_DEV_CTL, regval); /* Initialize the Clock Generation Unit */ - err = ice_init_cgu_e822(hw); + err = ice_init_cgu_e82x(hw); if (err) return err; @@ -1154,7 +1154,7 @@ static int ice_ptp_init_phc_e822(struct ice_hw *hw) } /** - * ice_ptp_prep_phy_time_e822 - Prepare PHY port with initial time + * ice_ptp_prep_phy_time_e82x - Prepare PHY port with initial time * @hw: pointer to the HW struct * @time: Time to initialize the PHY port clocks to * @@ -1164,7 +1164,7 @@ static int ice_ptp_init_phc_e822(struct ice_hw *hw) * units of nominal nanoseconds. */ static int -ice_ptp_prep_phy_time_e822(struct ice_hw *hw, u32 time) +ice_ptp_prep_phy_time_e82x(struct ice_hw *hw, u32 time) { u64 phy_time; u8 port; @@ -1177,14 +1177,14 @@ ice_ptp_prep_phy_time_e822(struct ice_hw *hw, u32 time) for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { /* Tx case */ - err = ice_write_64b_phy_reg_e822(hw, port, + err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_TX_TIMER_INC_PRE_L, phy_time); if (err) goto exit_err; /* Rx case */ - err = ice_write_64b_phy_reg_e822(hw, port, + err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_RX_TIMER_INC_PRE_L, phy_time); if (err) @@ -1201,7 +1201,7 @@ exit_err: } /** - * ice_ptp_prep_port_adj_e822 - Prepare a single port for time adjust + * ice_ptp_prep_port_adj_e82x - Prepare a single port for time adjust * @hw: pointer to HW struct * @port: Port number to be programmed * @time: time in cycles to adjust the port Tx and Rx clocks @@ -1216,7 +1216,7 @@ exit_err: * Negative adjustments are supported using 2s complement arithmetic. */ static int -ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time) +ice_ptp_prep_port_adj_e82x(struct ice_hw *hw, u8 port, s64 time) { u32 l_time, u_time; int err; @@ -1225,23 +1225,23 @@ ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time) u_time = upper_32_bits(time); /* Tx case */ - err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_L, + err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TIMER_INC_PRE_L, l_time); if (err) goto exit_err; - err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_U, + err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TIMER_INC_PRE_U, u_time); if (err) goto exit_err; /* Rx case */ - err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_L, + err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TIMER_INC_PRE_L, l_time); if (err) goto exit_err; - err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_U, + err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TIMER_INC_PRE_U, u_time); if (err) goto exit_err; @@ -1255,7 +1255,7 @@ exit_err: } /** - * ice_ptp_prep_phy_adj_e822 - Prep PHY ports for a time adjustment + * ice_ptp_prep_phy_adj_e82x - Prep PHY ports for a time adjustment * @hw: pointer to HW struct * @adj: adjustment in nanoseconds * @@ -1264,7 +1264,7 @@ exit_err: * ICE_PTP_ADJ_TIME or ICE_PTP_ADJ_TIME_AT_TIME sync command. */ static int -ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj) +ice_ptp_prep_phy_adj_e82x(struct ice_hw *hw, s32 adj) { s64 cycles; u8 port; @@ -1281,7 +1281,7 @@ ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj) for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { int err; - err = ice_ptp_prep_port_adj_e822(hw, port, cycles); + err = ice_ptp_prep_port_adj_e82x(hw, port, cycles); if (err) return err; } @@ -1290,7 +1290,7 @@ ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj) } /** - * ice_ptp_prep_phy_incval_e822 - Prepare PHY ports for time adjustment + * ice_ptp_prep_phy_incval_e82x - Prepare PHY ports for time adjustment * @hw: pointer to HW struct * @incval: new increment value to prepare * @@ -1299,13 +1299,13 @@ ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj) * issuing an ICE_PTP_INIT_INCVAL command. */ static int -ice_ptp_prep_phy_incval_e822(struct ice_hw *hw, u64 incval) +ice_ptp_prep_phy_incval_e82x(struct ice_hw *hw, u64 incval) { int err; u8 port; for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { - err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L, + err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_TIMETUS_L, incval); if (err) goto exit_err; @@ -1337,7 +1337,7 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts) int err; /* Tx case */ - err = ice_read_64b_phy_reg_e822(hw, port, P_REG_TX_CAPTURE_L, tx_ts); + err = ice_read_64b_phy_reg_e82x(hw, port, P_REG_TX_CAPTURE_L, tx_ts); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read REG_TX_CAPTURE, err %d\n", err); @@ -1348,7 +1348,7 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts) (unsigned long long)*tx_ts); /* Rx case */ - err = ice_read_64b_phy_reg_e822(hw, port, P_REG_RX_CAPTURE_L, rx_ts); + err = ice_read_64b_phy_reg_e82x(hw, port, P_REG_RX_CAPTURE_L, rx_ts); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_CAPTURE, err %d\n", err); @@ -1362,7 +1362,7 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts) } /** - * ice_ptp_write_port_cmd_e822 - Prepare a single PHY port for a timer command + * ice_ptp_write_port_cmd_e82x - Prepare a single PHY port for a timer command * @hw: pointer to HW struct * @port: Port to which cmd has to be sent * @cmd: Command to be sent to the port @@ -1372,8 +1372,8 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts) * Do not use this function directly. If you want to configure exactly one * port, use ice_ptp_one_port_cmd() instead. */ -static int -ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd) +static int ice_ptp_write_port_cmd_e82x(struct ice_hw *hw, u8 port, + enum ice_ptp_tmr_cmd cmd) { u32 cmd_val, val; u8 tmr_idx; @@ -1403,7 +1403,7 @@ ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd /* Tx case */ /* Read, modify, write */ - err = ice_read_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, &val); + err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_TMR_CMD, &val); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_TMR_CMD, err %d\n", err); @@ -1414,7 +1414,7 @@ ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd val &= ~TS_CMD_MASK; val |= cmd_val; - err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, val); + err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TMR_CMD, val); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n", err); @@ -1423,7 +1423,7 @@ ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd /* Rx case */ /* Read, modify, write */ - err = ice_read_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, &val); + err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, &val); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_TMR_CMD, err %d\n", err); @@ -1434,7 +1434,7 @@ ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd val &= ~TS_CMD_MASK; val |= cmd_val; - err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, val); + err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, val); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n", err); @@ -1469,7 +1469,7 @@ ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port, else cmd = ICE_PTP_NOP; - err = ice_ptp_write_port_cmd_e822(hw, port, cmd); + err = ice_ptp_write_port_cmd_e82x(hw, port, cmd); if (err) return err; } @@ -1478,7 +1478,7 @@ ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port, } /** - * ice_ptp_port_cmd_e822 - Prepare all ports for a timer command + * ice_ptp_port_cmd_e82x - Prepare all ports for a timer command * @hw: pointer to the HW struct * @cmd: timer command to prepare * @@ -1486,14 +1486,14 @@ ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port, * command. */ static int -ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) +ice_ptp_port_cmd_e82x(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) { u8 port; for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { int err; - err = ice_ptp_write_port_cmd_e822(hw, port, cmd); + err = ice_ptp_write_port_cmd_e82x(hw, port, cmd); if (err) return err; } @@ -1509,7 +1509,7 @@ ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) */ /** - * ice_phy_get_speed_and_fec_e822 - Get link speed and FEC based on serdes mode + * ice_phy_get_speed_and_fec_e82x - Get link speed and FEC based on serdes mode * @hw: pointer to HW struct * @port: the port to read from * @link_out: if non-NULL, holds link speed on success @@ -1519,7 +1519,7 @@ ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) * algorithm. */ static int -ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port, +ice_phy_get_speed_and_fec_e82x(struct ice_hw *hw, u8 port, enum ice_ptp_link_spd *link_out, enum ice_ptp_fec_mode *fec_out) { @@ -1528,7 +1528,7 @@ ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port, u32 serdes; int err; - err = ice_read_phy_reg_e822(hw, port, P_REG_LINK_SPEED, &serdes); + err = ice_read_phy_reg_e82x(hw, port, P_REG_LINK_SPEED, &serdes); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read serdes info\n"); return err; @@ -1585,18 +1585,18 @@ ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port, } /** - * ice_phy_cfg_lane_e822 - Configure PHY quad for single/multi-lane timestamp + * ice_phy_cfg_lane_e82x - Configure PHY quad for single/multi-lane timestamp * @hw: pointer to HW struct * @port: to configure the quad for */ -static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port) +static void ice_phy_cfg_lane_e82x(struct ice_hw *hw, u8 port) { enum ice_ptp_link_spd link_spd; int err; u32 val; u8 quad; - err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, NULL); + err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, NULL); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to get PHY link speed, err %d\n", err); @@ -1605,7 +1605,7 @@ static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port) quad = port / ICE_PORTS_PER_QUAD; - err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val); + err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEM_GLB_CFG, err %d\n", err); @@ -1617,7 +1617,7 @@ static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port) else val |= Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M; - err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, val); + err = ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, val); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_MEM_GBL_CFG, err %d\n", err); @@ -1626,7 +1626,7 @@ static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port) } /** - * ice_phy_cfg_uix_e822 - Configure Serdes UI to TU conversion for E822 + * ice_phy_cfg_uix_e82x - Configure Serdes UI to TU conversion for E822 * @hw: pointer to the HW structure * @port: the port to configure * @@ -1671,12 +1671,12 @@ static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port) * a divide by 390,625,000. This does lose some precision, but avoids * miscalculation due to arithmetic overflow. */ -static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port) +static int ice_phy_cfg_uix_e82x(struct ice_hw *hw, u8 port) { u64 cur_freq, clk_incval, tu_per_sec, uix; int err; - cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw)); + cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw)); clk_incval = ice_ptp_read_src_incval(hw); /* Calculate TUs per second divided by 256 */ @@ -1688,7 +1688,7 @@ static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port) /* Program the 10Gb/40Gb conversion ratio */ uix = div_u64(tu_per_sec * LINE_UI_10G_40G, 390625000); - err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_10G_40G_L, + err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_UIX66_10G_40G_L, uix); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_10G_40G, err %d\n", @@ -1699,7 +1699,7 @@ static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port) /* Program the 25Gb/100Gb conversion ratio */ uix = div_u64(tu_per_sec * LINE_UI_25G_100G, 390625000); - err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_25G_100G_L, + err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_UIX66_25G_100G_L, uix); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_25G_100G, err %d\n", @@ -1711,7 +1711,7 @@ static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port) } /** - * ice_phy_cfg_parpcs_e822 - Configure TUs per PAR/PCS clock cycle + * ice_phy_cfg_parpcs_e82x - Configure TUs per PAR/PCS clock cycle * @hw: pointer to the HW struct * @port: port to configure * @@ -1753,18 +1753,18 @@ static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port) * frequency is ~29 bits, so multiplying them together should fit within the * 64 bit arithmetic. */ -static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) +static int ice_phy_cfg_parpcs_e82x(struct ice_hw *hw, u8 port) { u64 cur_freq, clk_incval, tu_per_sec, phy_tus; enum ice_ptp_link_spd link_spd; enum ice_ptp_fec_mode fec_mode; int err; - err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode); + err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, &fec_mode); if (err) return err; - cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw)); + cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw)); clk_incval = ice_ptp_read_src_incval(hw); /* Calculate TUs per cycle of the PHC clock */ @@ -1784,7 +1784,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) else phy_tus = 0; - err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_TX_TUS_L, + err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PAR_TX_TUS_L, phy_tus); if (err) return err; @@ -1796,7 +1796,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) else phy_tus = 0; - err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_RX_TUS_L, + err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PAR_RX_TUS_L, phy_tus); if (err) return err; @@ -1808,7 +1808,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) else phy_tus = 0; - err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_TX_TUS_L, + err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PCS_TX_TUS_L, phy_tus); if (err) return err; @@ -1820,7 +1820,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) else phy_tus = 0; - err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_RX_TUS_L, + err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PCS_RX_TUS_L, phy_tus); if (err) return err; @@ -1832,7 +1832,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) else phy_tus = 0; - err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_TX_TUS_L, + err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PAR_TX_TUS_L, phy_tus); if (err) return err; @@ -1844,7 +1844,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) else phy_tus = 0; - err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_RX_TUS_L, + err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PAR_RX_TUS_L, phy_tus); if (err) return err; @@ -1856,7 +1856,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) else phy_tus = 0; - err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_TX_TUS_L, + err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PCS_TX_TUS_L, phy_tus); if (err) return err; @@ -1868,23 +1868,23 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) else phy_tus = 0; - return ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_RX_TUS_L, + return ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PCS_RX_TUS_L, phy_tus); } /** - * ice_calc_fixed_tx_offset_e822 - Calculated Fixed Tx offset for a port + * ice_calc_fixed_tx_offset_e82x - Calculated Fixed Tx offset for a port * @hw: pointer to the HW struct * @link_spd: the Link speed to calculate for * * Calculate the fixed offset due to known static latency data. */ static u64 -ice_calc_fixed_tx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd) +ice_calc_fixed_tx_offset_e82x(struct ice_hw *hw, enum ice_ptp_link_spd link_spd) { u64 cur_freq, clk_incval, tu_per_sec, fixed_offset; - cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw)); + cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw)); clk_incval = ice_ptp_read_src_incval(hw); /* Calculate TUs per second */ @@ -1904,7 +1904,7 @@ ice_calc_fixed_tx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd) } /** - * ice_phy_cfg_tx_offset_e822 - Configure total Tx timestamp offset + * ice_phy_cfg_tx_offset_e82x - Configure total Tx timestamp offset * @hw: pointer to the HW struct * @port: the PHY port to configure * @@ -1926,7 +1926,7 @@ ice_calc_fixed_tx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd) * Returns zero on success, -EBUSY if the hardware vernier offset * calibration has not completed, or another error code on failure. */ -int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port) +int ice_phy_cfg_tx_offset_e82x(struct ice_hw *hw, u8 port) { enum ice_ptp_link_spd link_spd; enum ice_ptp_fec_mode fec_mode; @@ -1935,7 +1935,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port) u32 reg; /* Nothing to do if we've already programmed the offset */ - err = ice_read_phy_reg_e822(hw, port, P_REG_TX_OR, ®); + err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_OR, ®); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OR for port %u, err %d\n", port, err); @@ -1945,7 +1945,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port) if (reg) return 0; - err = ice_read_phy_reg_e822(hw, port, P_REG_TX_OV_STATUS, ®); + err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_OV_STATUS, ®); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OV_STATUS for port %u, err %d\n", port, err); @@ -1955,11 +1955,11 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port) if (!(reg & P_REG_TX_OV_STATUS_OV_M)) return -EBUSY; - err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode); + err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, &fec_mode); if (err) return err; - total_offset = ice_calc_fixed_tx_offset_e822(hw, link_spd); + total_offset = ice_calc_fixed_tx_offset_e82x(hw, link_spd); /* Read the first Vernier offset from the PHY register and add it to * the total offset. @@ -1970,7 +1970,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port) link_spd == ICE_PTP_LNK_SPD_25G_RS || link_spd == ICE_PTP_LNK_SPD_40G || link_spd == ICE_PTP_LNK_SPD_50G) { - err = ice_read_64b_phy_reg_e822(hw, port, + err = ice_read_64b_phy_reg_e82x(hw, port, P_REG_PAR_PCS_TX_OFFSET_L, &val); if (err) @@ -1985,7 +1985,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port) */ if (link_spd == ICE_PTP_LNK_SPD_50G_RS || link_spd == ICE_PTP_LNK_SPD_100G_RS) { - err = ice_read_64b_phy_reg_e822(hw, port, + err = ice_read_64b_phy_reg_e82x(hw, port, P_REG_PAR_TX_TIME_L, &val); if (err) @@ -1998,12 +1998,12 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port) * PHY and indicate that the Tx offset is ready. After this, * timestamps will be enabled. */ - err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_TX_OFFSET_L, + err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_TOTAL_TX_OFFSET_L, total_offset); if (err) return err; - err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 1); + err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_OR, 1); if (err) return err; @@ -2014,7 +2014,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port) } /** - * ice_phy_calc_pmd_adj_e822 - Calculate PMD adjustment for Rx + * ice_phy_calc_pmd_adj_e82x - Calculate PMD adjustment for Rx * @hw: pointer to the HW struct * @port: the PHY port to adjust for * @link_spd: the current link speed of the PHY @@ -2026,7 +2026,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port) * various delays caused when receiving a packet. */ static int -ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port, +ice_phy_calc_pmd_adj_e82x(struct ice_hw *hw, u8 port, enum ice_ptp_link_spd link_spd, enum ice_ptp_fec_mode fec_mode, u64 *pmd_adj) { @@ -2035,7 +2035,7 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port, u32 val; int err; - err = ice_read_phy_reg_e822(hw, port, P_REG_PMD_ALIGNMENT, &val); + err = ice_read_phy_reg_e82x(hw, port, P_REG_PMD_ALIGNMENT, &val); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read PMD alignment, err %d\n", err); @@ -2044,7 +2044,7 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port, pmd_align = (u8)val; - cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw)); + cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw)); clk_incval = ice_ptp_read_src_incval(hw); /* Calculate TUs per second */ @@ -2123,7 +2123,7 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port, u64 cycle_adj; u8 rx_cycle; - err = ice_read_phy_reg_e822(hw, port, P_REG_RX_40_TO_160_CNT, + err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_40_TO_160_CNT, &val); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read 25G-RS Rx cycle count, err %d\n", @@ -2145,7 +2145,7 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port, u64 cycle_adj; u8 rx_cycle; - err = ice_read_phy_reg_e822(hw, port, P_REG_RX_80_TO_160_CNT, + err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_80_TO_160_CNT, &val); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read 50G-RS Rx cycle count, err %d\n", @@ -2172,18 +2172,18 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port, } /** - * ice_calc_fixed_rx_offset_e822 - Calculated the fixed Rx offset for a port + * ice_calc_fixed_rx_offset_e82x - Calculated the fixed Rx offset for a port * @hw: pointer to HW struct * @link_spd: The Link speed to calculate for * * Determine the fixed Rx latency for a given link speed. */ static u64 -ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd) +ice_calc_fixed_rx_offset_e82x(struct ice_hw *hw, enum ice_ptp_link_spd link_spd) { u64 cur_freq, clk_incval, tu_per_sec, fixed_offset; - cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw)); + cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw)); clk_incval = ice_ptp_read_src_incval(hw); /* Calculate TUs per second */ @@ -2203,7 +2203,7 @@ ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd) } /** - * ice_phy_cfg_rx_offset_e822 - Configure total Rx timestamp offset + * ice_phy_cfg_rx_offset_e82x - Configure total Rx timestamp offset * @hw: pointer to the HW struct * @port: the PHY port to configure * @@ -2229,7 +2229,7 @@ ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd) * Returns zero on success, -EBUSY if the hardware vernier offset * calibration has not completed, or another error code on failure. */ -int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port) +int ice_phy_cfg_rx_offset_e82x(struct ice_hw *hw, u8 port) { enum ice_ptp_link_spd link_spd; enum ice_ptp_fec_mode fec_mode; @@ -2238,7 +2238,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port) u32 reg; /* Nothing to do if we've already programmed the offset */ - err = ice_read_phy_reg_e822(hw, port, P_REG_RX_OR, ®); + err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_OR, ®); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OR for port %u, err %d\n", port, err); @@ -2248,7 +2248,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port) if (reg) return 0; - err = ice_read_phy_reg_e822(hw, port, P_REG_RX_OV_STATUS, ®); + err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_OV_STATUS, ®); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OV_STATUS for port %u, err %d\n", port, err); @@ -2258,16 +2258,16 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port) if (!(reg & P_REG_RX_OV_STATUS_OV_M)) return -EBUSY; - err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode); + err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, &fec_mode); if (err) return err; - total_offset = ice_calc_fixed_rx_offset_e822(hw, link_spd); + total_offset = ice_calc_fixed_rx_offset_e82x(hw, link_spd); /* Read the first Vernier offset from the PHY register and add it to * the total offset. */ - err = ice_read_64b_phy_reg_e822(hw, port, + err = ice_read_64b_phy_reg_e82x(hw, port, P_REG_PAR_PCS_RX_OFFSET_L, &val); if (err) @@ -2282,7 +2282,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port) link_spd == ICE_PTP_LNK_SPD_50G || link_spd == ICE_PTP_LNK_SPD_50G_RS || link_spd == ICE_PTP_LNK_SPD_100G_RS) { - err = ice_read_64b_phy_reg_e822(hw, port, + err = ice_read_64b_phy_reg_e82x(hw, port, P_REG_PAR_RX_TIME_L, &val); if (err) @@ -2292,7 +2292,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port) } /* In addition, Rx must account for the PMD alignment */ - err = ice_phy_calc_pmd_adj_e822(hw, port, link_spd, fec_mode, &pmd); + err = ice_phy_calc_pmd_adj_e82x(hw, port, link_spd, fec_mode, &pmd); if (err) return err; @@ -2308,12 +2308,12 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port) * PHY and indicate that the Rx offset is ready. After this, * timestamps will be enabled. */ - err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_RX_OFFSET_L, + err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_TOTAL_RX_OFFSET_L, total_offset); if (err) return err; - err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 1); + err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_OR, 1); if (err) return err; @@ -2324,7 +2324,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port) } /** - * ice_read_phy_and_phc_time_e822 - Simultaneously capture PHC and PHY time + * ice_read_phy_and_phc_time_e82x - Simultaneously capture PHC and PHY time * @hw: pointer to the HW struct * @port: the PHY port to read * @phy_time: on return, the 64bit PHY timer value @@ -2334,7 +2334,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port) * and PHC timer values. */ static int -ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time, +ice_read_phy_and_phc_time_e82x(struct ice_hw *hw, u8 port, u64 *phy_time, u64 *phc_time) { u64 tx_time, rx_time; @@ -2381,7 +2381,7 @@ ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time, } /** - * ice_sync_phy_timer_e822 - Synchronize the PHY timer with PHC timer + * ice_sync_phy_timer_e82x - Synchronize the PHY timer with PHC timer * @hw: pointer to the HW struct * @port: the PHY port to synchronize * @@ -2392,7 +2392,7 @@ ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time, * to the PHY timer in order to ensure it reads the same value as the * primary PHC timer. */ -static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port) +static int ice_sync_phy_timer_e82x(struct ice_hw *hw, u8 port) { u64 phc_time, phy_time, difference; int err; @@ -2402,7 +2402,7 @@ static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port) return -EBUSY; } - err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time); + err = ice_read_phy_and_phc_time_e82x(hw, port, &phy_time, &phc_time); if (err) goto err_unlock; @@ -2416,7 +2416,7 @@ static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port) */ difference = phc_time - phy_time; - err = ice_ptp_prep_port_adj_e822(hw, port, (s64)difference); + err = ice_ptp_prep_port_adj_e82x(hw, port, (s64)difference); if (err) goto err_unlock; @@ -2433,7 +2433,7 @@ static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port) /* Re-capture the timer values to flush the command registers and * verify that the time was properly adjusted. */ - err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time); + err = ice_read_phy_and_phc_time_e82x(hw, port, &phy_time, &phc_time); if (err) goto err_unlock; @@ -2452,7 +2452,7 @@ err_unlock: } /** - * ice_stop_phy_timer_e822 - Stop the PHY clock timer + * ice_stop_phy_timer_e82x - Stop the PHY clock timer * @hw: pointer to the HW struct * @port: the PHY port to stop * @soft_reset: if true, hold the SOFT_RESET bit of P_REG_PS @@ -2462,36 +2462,36 @@ err_unlock: * initialized or when link speed changes. */ int -ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset) +ice_stop_phy_timer_e82x(struct ice_hw *hw, u8 port, bool soft_reset) { int err; u32 val; - err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 0); + err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_OR, 0); if (err) return err; - err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 0); + err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_OR, 0); if (err) return err; - err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val); + err = ice_read_phy_reg_e82x(hw, port, P_REG_PS, &val); if (err) return err; val &= ~P_REG_PS_START_M; - err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val); if (err) return err; val &= ~P_REG_PS_ENA_CLK_M; - err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val); if (err) return err; if (soft_reset) { val |= P_REG_PS_SFT_RESET_M; - err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val); if (err) return err; } @@ -2502,7 +2502,7 @@ ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset) } /** - * ice_start_phy_timer_e822 - Start the PHY clock timer + * ice_start_phy_timer_e82x - Start the PHY clock timer * @hw: pointer to the HW struct * @port: the PHY port to start * @@ -2512,7 +2512,7 @@ ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset) * * Hardware will take Vernier measurements on Tx or Rx of packets. */ -int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port) +int ice_start_phy_timer_e82x(struct ice_hw *hw, u8 port) { u32 lo, hi, val; u64 incval; @@ -2521,17 +2521,17 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port) tmr_idx = ice_get_ptp_src_clock_index(hw); - err = ice_stop_phy_timer_e822(hw, port, false); + err = ice_stop_phy_timer_e82x(hw, port, false); if (err) return err; - ice_phy_cfg_lane_e822(hw, port); + ice_phy_cfg_lane_e82x(hw, port); - err = ice_phy_cfg_uix_e822(hw, port); + err = ice_phy_cfg_uix_e82x(hw, port); if (err) return err; - err = ice_phy_cfg_parpcs_e822(hw, port); + err = ice_phy_cfg_parpcs_e82x(hw, port); if (err) return err; @@ -2539,7 +2539,7 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port) hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx)); incval = (u64)hi << 32 | lo; - err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L, incval); + err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_TIMETUS_L, incval); if (err) return err; @@ -2552,22 +2552,22 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port) ice_ptp_exec_tmr_cmd(hw); - err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val); + err = ice_read_phy_reg_e82x(hw, port, P_REG_PS, &val); if (err) return err; val |= P_REG_PS_SFT_RESET_M; - err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val); if (err) return err; val |= P_REG_PS_START_M; - err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val); if (err) return err; val &= ~P_REG_PS_SFT_RESET_M; - err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val); if (err) return err; @@ -2578,18 +2578,18 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port) ice_ptp_exec_tmr_cmd(hw); val |= P_REG_PS_ENA_CLK_M; - err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val); if (err) return err; val |= P_REG_PS_LOAD_OFFSET_M; - err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val); if (err) return err; ice_ptp_exec_tmr_cmd(hw); - err = ice_sync_phy_timer_e822(hw, port); + err = ice_sync_phy_timer_e82x(hw, port); if (err) return err; @@ -2599,7 +2599,7 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port) } /** - * ice_get_phy_tx_tstamp_ready_e822 - Read Tx memory status register + * ice_get_phy_tx_tstamp_ready_e82x - Read Tx memory status register * @hw: pointer to the HW struct * @quad: the timestamp quad to read from * @tstamp_ready: contents of the Tx memory status register @@ -2609,19 +2609,19 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port) * ready to be captured from the PHY timestamp block. */ static int -ice_get_phy_tx_tstamp_ready_e822(struct ice_hw *hw, u8 quad, u64 *tstamp_ready) +ice_get_phy_tx_tstamp_ready_e82x(struct ice_hw *hw, u8 quad, u64 *tstamp_ready) { u32 hi, lo; int err; - err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEMORY_STATUS_U, &hi); + err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEMORY_STATUS_U, &hi); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS_U for quad %u, err %d\n", quad, err); return err; } - err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEMORY_STATUS_L, &lo); + err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEMORY_STATUS_L, &lo); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS_L for quad %u, err %d\n", quad, err); @@ -3307,7 +3307,7 @@ void ice_ptp_init_phy_model(struct ice_hw *hw) if (ice_is_e810(hw)) hw->phy_model = ICE_PHY_E810; else - hw->phy_model = ICE_PHY_E822; + hw->phy_model = ICE_PHY_E82X; } /** @@ -3332,8 +3332,8 @@ static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) case ICE_PHY_E810: err = ice_ptp_port_cmd_e810(hw, cmd); break; - case ICE_PHY_E822: - err = ice_ptp_port_cmd_e822(hw, cmd); + case ICE_PHY_E82X: + err = ice_ptp_port_cmd_e82x(hw, cmd); break; default: err = -EOPNOTSUPP; @@ -3384,8 +3384,8 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time) case ICE_PHY_E810: err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF); break; - case ICE_PHY_E822: - err = ice_ptp_prep_phy_time_e822(hw, time & 0xFFFFFFFF); + case ICE_PHY_E82X: + err = ice_ptp_prep_phy_time_e82x(hw, time & 0xFFFFFFFF); break; default: err = -EOPNOTSUPP; @@ -3426,8 +3426,8 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval) case ICE_PHY_E810: err = ice_ptp_prep_phy_incval_e810(hw, incval); break; - case ICE_PHY_E822: - err = ice_ptp_prep_phy_incval_e822(hw, incval); + case ICE_PHY_E82X: + err = ice_ptp_prep_phy_incval_e82x(hw, incval); break; default: err = -EOPNOTSUPP; @@ -3492,8 +3492,8 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj) case ICE_PHY_E810: err = ice_ptp_prep_phy_adj_e810(hw, adj); break; - case ICE_PHY_E822: - err = ice_ptp_prep_phy_adj_e822(hw, adj); + case ICE_PHY_E82X: + err = ice_ptp_prep_phy_adj_e82x(hw, adj); break; default: err = -EOPNOTSUPP; @@ -3521,8 +3521,8 @@ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp) switch (hw->phy_model) { case ICE_PHY_E810: return ice_read_phy_tstamp_e810(hw, block, idx, tstamp); - case ICE_PHY_E822: - return ice_read_phy_tstamp_e822(hw, block, idx, tstamp); + case ICE_PHY_E82X: + return ice_read_phy_tstamp_e82x(hw, block, idx, tstamp); default: return -EOPNOTSUPP; } @@ -3549,8 +3549,8 @@ int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx) switch (hw->phy_model) { case ICE_PHY_E810: return ice_clear_phy_tstamp_e810(hw, block, idx); - case ICE_PHY_E822: - return ice_clear_phy_tstamp_e822(hw, block, idx); + case ICE_PHY_E82X: + return ice_clear_phy_tstamp_e82x(hw, block, idx); default: return -EOPNOTSUPP; } @@ -3608,8 +3608,8 @@ static int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx) void ice_ptp_reset_ts_memory(struct ice_hw *hw) { switch (hw->phy_model) { - case ICE_PHY_E822: - ice_ptp_reset_ts_memory_e822(hw); + case ICE_PHY_E82X: + ice_ptp_reset_ts_memory_e82x(hw); break; case ICE_PHY_E810: default: @@ -3636,8 +3636,8 @@ int ice_ptp_init_phc(struct ice_hw *hw) switch (hw->phy_model) { case ICE_PHY_E810: return ice_ptp_init_phc_e810(hw); - case ICE_PHY_E822: - return ice_ptp_init_phc_e822(hw); + case ICE_PHY_E82X: + return ice_ptp_init_phc_e82x(hw); default: return -EOPNOTSUPP; } @@ -3660,8 +3660,8 @@ int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready) case ICE_PHY_E810: return ice_get_phy_tx_tstamp_ready_e810(hw, block, tstamp_ready); - case ICE_PHY_E822: - return ice_get_phy_tx_tstamp_ready_e822(hw, block, + case ICE_PHY_E82X: + return ice_get_phy_tx_tstamp_ready_e82x(hw, block, tstamp_ready); break; default: @@ -3942,7 +3942,7 @@ int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num) case ICE_DEV_ID_E823C_QSFP: case ICE_DEV_ID_E823C_SFP: case ICE_DEV_ID_E823C_SGMII: - *pin_num = ICE_E822_RCLK_PINS_NUM; + *pin_num = ICE_E82X_RCLK_PINS_NUM; ret = 0; if (hw->cgu_part_number == ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032) diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h index cf76701566c7..e976138e934a 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h @@ -42,7 +42,7 @@ enum ice_ptp_fec_mode { }; /** - * struct ice_time_ref_info_e822 + * struct ice_time_ref_info_e82x * @pll_freq: Frequency of PLL that drives timer ticks in Hz * @nominal_incval: increment to generate nanoseconds in GLTSYN_TIME_L * @pps_delay: propagation delay of the PPS output signal @@ -50,14 +50,14 @@ enum ice_ptp_fec_mode { * Characteristic information for the various TIME_REF sources possible in the * E822 devices */ -struct ice_time_ref_info_e822 { +struct ice_time_ref_info_e82x { u64 pll_freq; u64 nominal_incval; u8 pps_delay; }; /** - * struct ice_vernier_info_e822 + * struct ice_vernier_info_e82x * @tx_par_clk: Frequency used to calculate P_REG_PAR_TX_TUS * @rx_par_clk: Frequency used to calculate P_REG_PAR_RX_TUS * @tx_pcs_clk: Frequency used to calculate P_REG_PCS_TX_TUS @@ -80,7 +80,7 @@ struct ice_time_ref_info_e822 { * different link speeds, either the deskew marker for multi-lane link speeds * or the Reed Solomon gearbox marker for RS-FEC. */ -struct ice_vernier_info_e822 { +struct ice_vernier_info_e82x { u32 tx_par_clk; u32 rx_par_clk; u32 tx_pcs_clk; @@ -95,7 +95,7 @@ struct ice_vernier_info_e822 { }; /** - * struct ice_cgu_pll_params_e822 + * struct ice_cgu_pll_params_e82x * @refclk_pre_div: Reference clock pre-divisor * @feedback_div: Feedback divisor * @frac_n_div: Fractional divisor @@ -104,7 +104,7 @@ struct ice_vernier_info_e822 { * Clock Generation Unit parameters used to program the PLL based on the * selected TIME_REF frequency. */ -struct ice_cgu_pll_params_e822 { +struct ice_cgu_pll_params_e82x { u32 refclk_pre_div; u32 feedback_div; u32 frac_n_div; @@ -124,7 +124,7 @@ enum ice_phy_rclk_pins { }; #define ICE_E810_RCLK_PINS_NUM (ICE_RCLKB_PIN + 1) -#define ICE_E822_RCLK_PINS_NUM (ICE_RCLKA_PIN + 1) +#define ICE_E82X_RCLK_PINS_NUM (ICE_RCLKA_PIN + 1) #define E810T_CGU_INPUT_C827(_phy, _pin) ((_phy) * ICE_E810_RCLK_PINS_NUM + \ (_pin) + ZL_REF1P) @@ -183,16 +183,16 @@ struct ice_cgu_pin_desc { }; extern const struct -ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ]; +ice_cgu_pll_params_e82x e822_cgu_params[NUM_ICE_TIME_REF_FREQ]; #define E810C_QSFP_C827_0_HANDLE 2 #define E810C_QSFP_C827_1_HANDLE 3 /* Table of constants related to possible TIME_REF sources */ -extern const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ]; +extern const struct ice_time_ref_info_e82x e822_time_ref[NUM_ICE_TIME_REF_FREQ]; /* Table of constants for Vernier calibration on E822 */ -extern const struct ice_vernier_info_e822 e822_vernier[NUM_ICE_PTP_LNK_SPD]; +extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD]; /* Increment value to generate nanoseconds in the GLTSYN_TIME_L register for * the E810 devices. Based off of a PLL with an 812.5 MHz frequency. @@ -215,23 +215,23 @@ int ice_ptp_init_phc(struct ice_hw *hw); int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready); /* E822 family functions */ -int ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val); -int ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val); -void ice_ptp_reset_ts_memory_quad_e822(struct ice_hw *hw, u8 quad); +int ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val); +int ice_write_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 val); +void ice_ptp_reset_ts_memory_quad_e82x(struct ice_hw *hw, u8 quad); /** - * ice_e822_time_ref - Get the current TIME_REF from capabilities + * ice_e82x_time_ref - Get the current TIME_REF from capabilities * @hw: pointer to the HW structure * * Returns the current TIME_REF from the capabilities structure. */ -static inline enum ice_time_ref_freq ice_e822_time_ref(struct ice_hw *hw) +static inline enum ice_time_ref_freq ice_e82x_time_ref(struct ice_hw *hw) { return hw->func_caps.ts_func_info.time_ref; } /** - * ice_set_e822_time_ref - Set new TIME_REF + * ice_set_e82x_time_ref - Set new TIME_REF * @hw: pointer to the HW structure * @time_ref: new TIME_REF to set * @@ -239,31 +239,31 @@ static inline enum ice_time_ref_freq ice_e822_time_ref(struct ice_hw *hw) * change, such as an update to the CGU registers. */ static inline void -ice_set_e822_time_ref(struct ice_hw *hw, enum ice_time_ref_freq time_ref) +ice_set_e82x_time_ref(struct ice_hw *hw, enum ice_time_ref_freq time_ref) { hw->func_caps.ts_func_info.time_ref = time_ref; } -static inline u64 ice_e822_pll_freq(enum ice_time_ref_freq time_ref) +static inline u64 ice_e82x_pll_freq(enum ice_time_ref_freq time_ref) { return e822_time_ref[time_ref].pll_freq; } -static inline u64 ice_e822_nominal_incval(enum ice_time_ref_freq time_ref) +static inline u64 ice_e82x_nominal_incval(enum ice_time_ref_freq time_ref) { return e822_time_ref[time_ref].nominal_incval; } -static inline u64 ice_e822_pps_delay(enum ice_time_ref_freq time_ref) +static inline u64 ice_e82x_pps_delay(enum ice_time_ref_freq time_ref) { return e822_time_ref[time_ref].pps_delay; } /* E822 Vernier calibration functions */ -int ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset); -int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port); -int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port); -int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port); +int ice_stop_phy_timer_e82x(struct ice_hw *hw, u8 port, bool soft_reset); +int ice_start_phy_timer_e82x(struct ice_hw *hw, u8 port); +int ice_phy_cfg_tx_offset_e82x(struct ice_hw *hw, u8 port); +int ice_phy_cfg_rx_offset_e82x(struct ice_hw *hw, u8 port); /* E810 family functions */ int ice_ptp_init_phy_e810(struct ice_hw *hw); diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c index c686ac0935eb..5f30fb131f74 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.c +++ b/drivers/net/ethernet/intel/ice/ice_repr.c @@ -14,7 +14,7 @@ */ static int ice_repr_get_sw_port_id(struct ice_repr *repr) { - return repr->vf->pf->hw.port_info->lport; + return repr->src_vsi->back->hw.port_info->lport; } /** @@ -35,7 +35,7 @@ ice_repr_get_phys_port_name(struct net_device *netdev, char *buf, size_t len) return -EOPNOTSUPP; res = snprintf(buf, len, "pf%dvfr%d", ice_repr_get_sw_port_id(repr), - repr->vf->vf_id); + repr->id); if (res <= 0) return -EOPNOTSUPP; return 0; @@ -278,25 +278,67 @@ ice_repr_reg_netdev(struct net_device *netdev) return register_netdev(netdev); } +static void ice_repr_remove_node(struct devlink_port *devlink_port) +{ + devl_lock(devlink_port->devlink); + devl_rate_leaf_destroy(devlink_port); + devl_unlock(devlink_port->devlink); +} + /** - * ice_repr_add - add representor for VF - * @vf: pointer to VF structure + * ice_repr_rem - remove representor from VF + * @repr: pointer to representor structure */ -static int ice_repr_add(struct ice_vf *vf) +static void ice_repr_rem(struct ice_repr *repr) +{ + kfree(repr->q_vector); + free_netdev(repr->netdev); + kfree(repr); +} + +/** + * ice_repr_rem_vf - remove representor from VF + * @repr: pointer to representor structure + */ +void ice_repr_rem_vf(struct ice_repr *repr) +{ + ice_repr_remove_node(&repr->vf->devlink_port); + unregister_netdev(repr->netdev); + ice_devlink_destroy_vf_port(repr->vf); + ice_virtchnl_set_dflt_ops(repr->vf); + ice_repr_rem(repr); +} + +static void ice_repr_set_tx_topology(struct ice_pf *pf) +{ + struct devlink *devlink; + + /* only export if ADQ and DCB disabled and eswitch enabled*/ + if (ice_is_adq_active(pf) || ice_is_dcb_active(pf) || + !ice_is_switchdev_running(pf)) + return; + + devlink = priv_to_devlink(pf); + ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf)); +} + +/** + * ice_repr_add - add representor for generic VSI + * @pf: pointer to PF structure + * @src_vsi: pointer to VSI structure of device to represent + * @parent_mac: device MAC address + */ +static struct ice_repr * +ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac) { struct ice_q_vector *q_vector; struct ice_netdev_priv *np; struct ice_repr *repr; - struct ice_vsi *vsi; int err; - vsi = ice_get_vf_vsi(vf); - if (!vsi) - return -EINVAL; - repr = kzalloc(sizeof(*repr), GFP_KERNEL); if (!repr) - return -ENOMEM; + return ERR_PTR(-ENOMEM); repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv)); if (!repr->netdev) { @@ -304,9 +346,7 @@ static int ice_repr_add(struct ice_vf *vf) goto err_alloc; } - repr->src_vsi = vsi; - repr->vf = vf; - vf->repr = repr; + repr->src_vsi = src_vsi; np = netdev_priv(repr->netdev); np->repr = repr; @@ -316,10 +356,40 @@ static int ice_repr_add(struct ice_vf *vf) goto err_alloc_q_vector; } repr->q_vector = q_vector; + repr->q_id = repr->id; + + ether_addr_copy(repr->parent_mac, parent_mac); + + return repr; + +err_alloc_q_vector: + free_netdev(repr->netdev); +err_alloc: + kfree(repr); + return ERR_PTR(err); +} + +struct ice_repr *ice_repr_add_vf(struct ice_vf *vf) +{ + struct ice_repr *repr; + struct ice_vsi *vsi; + int err; + + vsi = ice_get_vf_vsi(vf); + if (!vsi) + return ERR_PTR(-ENOENT); err = ice_devlink_create_vf_port(vf); if (err) - goto err_devlink; + return ERR_PTR(err); + + repr = ice_repr_add(vf->pf, vsi, vf->hw_lan_addr); + if (IS_ERR(repr)) { + err = PTR_ERR(repr); + goto err_repr_add; + } + + repr->vf = vf; repr->netdev->min_mtu = ETH_MIN_MTU; repr->netdev->max_mtu = ICE_MAX_MTU; @@ -331,100 +401,23 @@ static int ice_repr_add(struct ice_vf *vf) goto err_netdev; ice_virtchnl_set_repr_ops(vf); + ice_repr_set_tx_topology(vf->pf); - return 0; + return repr; err_netdev: + ice_repr_rem(repr); +err_repr_add: ice_devlink_destroy_vf_port(vf); -err_devlink: - kfree(repr->q_vector); - vf->repr->q_vector = NULL; -err_alloc_q_vector: - free_netdev(repr->netdev); - repr->netdev = NULL; -err_alloc: - kfree(repr); - vf->repr = NULL; - return err; -} - -/** - * ice_repr_rem - remove representor from VF - * @vf: pointer to VF structure - */ -static void ice_repr_rem(struct ice_vf *vf) -{ - if (!vf->repr) - return; - - kfree(vf->repr->q_vector); - vf->repr->q_vector = NULL; - unregister_netdev(vf->repr->netdev); - ice_devlink_destroy_vf_port(vf); - free_netdev(vf->repr->netdev); - vf->repr->netdev = NULL; - kfree(vf->repr); - vf->repr = NULL; - - ice_virtchnl_set_dflt_ops(vf); -} - -/** - * ice_repr_rem_from_all_vfs - remove port representor for all VFs - * @pf: pointer to PF structure - */ -void ice_repr_rem_from_all_vfs(struct ice_pf *pf) -{ - struct devlink *devlink; - struct ice_vf *vf; - unsigned int bkt; - - lockdep_assert_held(&pf->vfs.table_lock); - - ice_for_each_vf(pf, bkt, vf) - ice_repr_rem(vf); - - /* since all port representors are destroyed, there is - * no point in keeping the nodes - */ - devlink = priv_to_devlink(pf); - devl_lock(devlink); - devl_rate_nodes_destroy(devlink); - devl_unlock(devlink); + return ERR_PTR(err); } -/** - * ice_repr_add_for_all_vfs - add port representor for all VFs - * @pf: pointer to PF structure - */ -int ice_repr_add_for_all_vfs(struct ice_pf *pf) +struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi) { - struct devlink *devlink; - struct ice_vf *vf; - unsigned int bkt; - int err; - - lockdep_assert_held(&pf->vfs.table_lock); - - ice_for_each_vf(pf, bkt, vf) { - err = ice_repr_add(vf); - if (err) - goto err; - } - - /* only export if ADQ and DCB disabled */ - if (ice_is_adq_active(pf) || ice_is_dcb_active(pf)) - return 0; - - devlink = priv_to_devlink(pf); - ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf)); - - return 0; - -err: - ice_repr_rem_from_all_vfs(pf); + if (!vsi->vf) + return NULL; - return err; + return xa_load(&vsi->back->eswitch.reprs, vsi->vf->repr_id); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h index e1ee2d2c1d2d..f9aede315716 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.h +++ b/drivers/net/ethernet/intel/ice/ice_repr.h @@ -13,14 +13,17 @@ struct ice_repr { struct net_device *netdev; struct metadata_dst *dst; struct ice_esw_br_port *br_port; + int q_id; + u32 id; + u8 parent_mac[ETH_ALEN]; #ifdef CONFIG_ICE_SWITCHDEV /* info about slow path rule */ struct ice_rule_query_data sp_rule; #endif }; -int ice_repr_add_for_all_vfs(struct ice_pf *pf); -void ice_repr_rem_from_all_vfs(struct ice_pf *pf); +struct ice_repr *ice_repr_add_vf(struct ice_vf *vf); +void ice_repr_rem_vf(struct ice_repr *repr); void ice_repr_start_tx_queues(struct ice_repr *repr); void ice_repr_stop_tx_queues(struct ice_repr *repr); @@ -29,4 +32,6 @@ void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi); struct ice_repr *ice_netdev_to_repr(struct net_device *netdev); bool ice_is_port_repr_netdev(const struct net_device *netdev); + +struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi); #endif diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index e1494f24f661..6d33dd647c78 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -172,13 +172,14 @@ void ice_free_vfs(struct ice_pf *pf) else dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n"); - mutex_lock(&vfs->table_lock); + ice_eswitch_reserve_cp_queues(pf, -ice_get_num_vfs(pf)); - ice_eswitch_release(pf); + mutex_lock(&vfs->table_lock); ice_for_each_vf(pf, bkt, vf) { mutex_lock(&vf->cfg_lock); + ice_eswitch_detach(pf, vf); ice_dis_vf_qs(vf); if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { @@ -609,6 +610,14 @@ static int ice_start_vfs(struct ice_pf *pf) goto teardown; } + retval = ice_eswitch_attach(pf, vf); + if (retval) { + dev_err(ice_pf_to_dev(pf), "Failed to attach VF %d to eswitch, error %d", + vf->vf_id, retval); + ice_vf_vsi_release(vf); + goto teardown; + } + set_bit(ICE_VF_STATE_INIT, vf->vf_states); ice_ena_vf_mappings(vf); wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); @@ -918,6 +927,7 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) goto err_unroll_sriov; } + ice_eswitch_reserve_cp_queues(pf, num_vfs); ret = ice_start_vfs(pf); if (ret) { dev_err(dev, "Failed to start %d VFs, err %d\n", num_vfs, ret); @@ -927,12 +937,6 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) clear_bit(ICE_VF_DIS, pf->state); - ret = ice_eswitch_configure(pf); - if (ret) { - dev_err(dev, "Failed to configure eswitch, err %d\n", ret); - goto err_unroll_sriov; - } - /* rearm global interrupts */ if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state)) ice_irq_dynamic_ena(hw, NULL, NULL); diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c index dd03cb69ad26..08d3bbf4b44c 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c @@ -653,7 +653,7 @@ static int ice_tc_setup_redirect_action(struct net_device *filter_dev, ice_tc_is_dev_uplink(target_dev)) { repr = ice_netdev_to_repr(filter_dev); - fltr->dest_vsi = repr->src_vsi->back->switchdev.uplink_vsi; + fltr->dest_vsi = repr->src_vsi->back->eswitch.uplink_vsi; fltr->direction = ICE_ESWITCH_FLTR_EGRESS; } else if (ice_tc_is_dev_uplink(filter_dev) && ice_is_port_repr_netdev(target_dev)) { @@ -765,7 +765,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) rule_info.sw_act.src = hw->pf_id; rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE; } else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS && - fltr->dest_vsi == vsi->back->switchdev.uplink_vsi) { + fltr->dest_vsi == vsi->back->eswitch.uplink_vsi) { /* VF to Uplink */ rule_info.sw_act.flag |= ICE_FLTR_TX; rule_info.sw_act.src = vsi->idx; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 9e97ea863068..59617f055e35 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -557,13 +557,14 @@ ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, const unsigned int size) * @xdp_prog: XDP program to run * @xdp_ring: ring to be used for XDP_TX action * @rx_buf: Rx buffer to store the XDP action + * @eop_desc: Last descriptor in packet to read metadata from * * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} */ static void ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring, - struct ice_rx_buf *rx_buf) + struct ice_rx_buf *rx_buf, union ice_32b_rx_flex_desc *eop_desc) { unsigned int ret = ICE_XDP_PASS; u32 act; @@ -571,6 +572,8 @@ ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, if (!xdp_prog) goto exit; + ice_xdp_meta_set_desc(xdp, eop_desc); + act = bpf_prog_run_xdp(xdp_prog, xdp); switch (act) { case XDP_PASS: @@ -1180,8 +1183,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) struct sk_buff *skb; unsigned int size; u16 stat_err_bits; - u16 vlan_tag = 0; - u16 rx_ptype; + u16 vlan_tci; /* get the Rx desc from Rx ring based on 'next_to_clean' */ rx_desc = ICE_RX_DESC(rx_ring, ntc); @@ -1241,7 +1243,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) if (ice_is_non_eop(rx_ring, rx_desc)) continue; - ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf); + ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf, rx_desc); if (rx_buf->act == ICE_XDP_PASS) goto construct_skb; total_rx_bytes += xdp_get_buff_len(xdp); @@ -1276,7 +1278,7 @@ construct_skb: continue; } - vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc); + vlan_tci = ice_get_vlan_tci(rx_desc); /* pad the skb if needed, to make a valid ethernet frame */ if (eth_skb_pad(skb)) @@ -1286,14 +1288,11 @@ construct_skb: total_rx_bytes += skb->len; /* populate checksum, VLAN, and protocol */ - rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & - ICE_RX_FLEX_DESC_PTYPE_M; - - ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); + ice_process_skb_fields(rx_ring, rx_desc, skb); ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb); /* send completed skb up the stack */ - ice_receive_skb(rx_ring, skb, vlan_tag); + ice_receive_skb(rx_ring, skb, vlan_tci); /* update budget accounting */ total_rx_pkts++; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index daf7b9dbb143..b3379ff73674 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -257,6 +257,20 @@ enum ice_rx_dtype { ICE_RX_DTYPE_SPLIT_ALWAYS = 2, }; +struct ice_pkt_ctx { + u64 cached_phctime; + __be16 vlan_proto; +}; + +struct ice_xdp_buff { + struct xdp_buff xdp_buff; + const union ice_32b_rx_flex_desc *eop_desc; + const struct ice_pkt_ctx *pkt_ctx; +}; + +/* Required for compatibility with xdp_buffs from xsk_pool */ +static_assert(offsetof(struct ice_xdp_buff, xdp_buff) == 0); + /* indices into GLINT_ITR registers */ #define ICE_RX_ITR ICE_IDX_ITR0 #define ICE_TX_ITR ICE_IDX_ITR1 @@ -298,7 +312,6 @@ enum ice_dynamic_itr { /* descriptor ring, associated with a VSI */ struct ice_rx_ring { /* CL1 - 1st cacheline starts here */ - struct ice_rx_ring *next; /* pointer to next ring in q_vector */ void *desc; /* Descriptor ring memory */ struct device *dev; /* Used for DMA mapping */ struct net_device *netdev; /* netdev ring maps to */ @@ -310,13 +323,24 @@ struct ice_rx_ring { u16 count; /* Number of descriptors */ u16 reg_idx; /* HW register index of the ring */ u16 next_to_alloc; - /* CL2 - 2nd cacheline starts here */ + union { struct ice_rx_buf *rx_buf; struct xdp_buff **xdp_buf; }; - struct xdp_buff xdp; + /* CL2 - 2nd cacheline starts here */ + union { + struct ice_xdp_buff xdp_ext; + struct xdp_buff xdp; + }; /* CL3 - 3rd cacheline starts here */ + union { + struct ice_pkt_ctx pkt_ctx; + struct { + u64 cached_phctime; + __be16 vlan_proto; + }; + }; struct bpf_prog *xdp_prog; u16 rx_offset; @@ -332,9 +356,9 @@ struct ice_rx_ring { /* CL4 - 4th cacheline starts here */ struct ice_channel *ch; struct ice_tx_ring *xdp_ring; + struct ice_rx_ring *next; /* pointer to next ring in q_vector */ struct xsk_buff_pool *xsk_pool; dma_addr_t dma; /* physical address of ring */ - u64 cached_phctime; u16 rx_buf_len; u8 dcb_tc; /* Traffic class of ring */ u8 ptp_rx; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c index 7e06373e14d9..839e5da24ad5 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c @@ -63,28 +63,42 @@ static enum pkt_hash_types ice_ptype_to_htype(u16 ptype) } /** - * ice_rx_hash - set the hash value in the skb + * ice_get_rx_hash - get RX hash value from descriptor + * @rx_desc: specific descriptor + * + * Returns hash, if present, 0 otherwise. + */ +static u32 ice_get_rx_hash(const union ice_32b_rx_flex_desc *rx_desc) +{ + const struct ice_32b_rx_flex_desc_nic *nic_mdid; + + if (unlikely(rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)) + return 0; + + nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc; + return le32_to_cpu(nic_mdid->rss_hash); +} + +/** + * ice_rx_hash_to_skb - set the hash value in the skb * @rx_ring: descriptor ring * @rx_desc: specific descriptor * @skb: pointer to current skb * @rx_ptype: the ptype value from the descriptor */ static void -ice_rx_hash(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, - struct sk_buff *skb, u16 rx_ptype) +ice_rx_hash_to_skb(const struct ice_rx_ring *rx_ring, + const union ice_32b_rx_flex_desc *rx_desc, + struct sk_buff *skb, u16 rx_ptype) { - struct ice_32b_rx_flex_desc_nic *nic_mdid; u32 hash; if (!(rx_ring->netdev->features & NETIF_F_RXHASH)) return; - if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC) - return; - - nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc; - hash = le32_to_cpu(nic_mdid->rss_hash); - skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype)); + hash = ice_get_rx_hash(rx_desc); + if (likely(hash)) + skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype)); } /** @@ -171,11 +185,38 @@ checksum_fail: } /** + * ice_ptp_rx_hwts_to_skb - Put RX timestamp into skb + * @rx_ring: Ring to get the VSI info + * @rx_desc: Receive descriptor + * @skb: Particular skb to send timestamp with + * + * The timestamp is in ns, so we must convert the result first. + */ +static void +ice_ptp_rx_hwts_to_skb(struct ice_rx_ring *rx_ring, + const union ice_32b_rx_flex_desc *rx_desc, + struct sk_buff *skb) +{ + u64 ts_ns = ice_ptp_get_rx_hwts(rx_desc, &rx_ring->pkt_ctx); + + skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ts_ns); +} + +/** + * ice_get_ptype - Read HW packet type from the descriptor + * @rx_desc: RX descriptor + */ +static u16 ice_get_ptype(const union ice_32b_rx_flex_desc *rx_desc) +{ + return le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & + ICE_RX_FLEX_DESC_PTYPE_M; +} + +/** * ice_process_skb_fields - Populate skb header fields from Rx descriptor * @rx_ring: Rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being populated - * @ptype: the packet type decoded by hardware * * This function checks the ring, descriptor, and packet information in * order to populate the hash, checksum, VLAN, protocol, and @@ -184,9 +225,11 @@ checksum_fail: void ice_process_skb_fields(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, - struct sk_buff *skb, u16 ptype) + struct sk_buff *skb) { - ice_rx_hash(rx_ring, rx_desc, skb, ptype); + u16 ptype = ice_get_ptype(rx_desc); + + ice_rx_hash_to_skb(rx_ring, rx_desc, skb, ptype); /* modifies the skb - consumes the enet header */ skb->protocol = eth_type_trans(skb, rx_ring->netdev); @@ -194,28 +237,24 @@ ice_process_skb_fields(struct ice_rx_ring *rx_ring, ice_rx_csum(rx_ring, skb, rx_desc, ptype); if (rx_ring->ptp_rx) - ice_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); + ice_ptp_rx_hwts_to_skb(rx_ring, rx_desc, skb); } /** * ice_receive_skb - Send a completed packet up the stack * @rx_ring: Rx ring in play * @skb: packet to send up - * @vlan_tag: VLAN tag for packet + * @vlan_tci: VLAN TCI for packet * * This function sends the completed packet (via. skb) up the stack using * gro receive functions (with/without VLAN tag) */ void -ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) +ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tci) { - netdev_features_t features = rx_ring->netdev->features; - bool non_zero_vlan = !!(vlan_tag & VLAN_VID_MASK); - - if ((features & NETIF_F_HW_VLAN_CTAG_RX) && non_zero_vlan) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); - else if ((features & NETIF_F_HW_VLAN_STAG_RX) && non_zero_vlan) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag); + if ((vlan_tci & VLAN_VID_MASK) && rx_ring->vlan_proto) + __vlan_hwaccel_put_tag(skb, rx_ring->vlan_proto, + vlan_tci); napi_gro_receive(&rx_ring->q_vector->napi, skb); } @@ -464,3 +503,125 @@ void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res, spin_unlock(&xdp_ring->tx_lock); } } + +/** + * ice_xdp_rx_hw_ts - HW timestamp XDP hint handler + * @ctx: XDP buff pointer + * @ts_ns: destination address + * + * Copy HW timestamp (if available) to the destination address. + */ +static int ice_xdp_rx_hw_ts(const struct xdp_md *ctx, u64 *ts_ns) +{ + const struct ice_xdp_buff *xdp_ext = (void *)ctx; + + *ts_ns = ice_ptp_get_rx_hwts(xdp_ext->eop_desc, + xdp_ext->pkt_ctx); + if (!*ts_ns) + return -ENODATA; + + return 0; +} + +/* Define a ptype index -> XDP hash type lookup table. + * It uses the same ptype definitions as ice_decode_rx_desc_ptype[], + * avoiding possible copy-paste errors. + */ +#undef ICE_PTT +#undef ICE_PTT_UNUSED_ENTRY + +#define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ + [PTYPE] = XDP_RSS_L3_##OUTER_IP_VER | XDP_RSS_L4_##I | XDP_RSS_TYPE_##PL + +#define ICE_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = 0 + +/* A few supplementary definitions for when XDP hash types do not coincide + * with what can be generated from ptype definitions + * by means of preprocessor concatenation. + */ +#define XDP_RSS_L3_NONE XDP_RSS_TYPE_NONE +#define XDP_RSS_L4_NONE XDP_RSS_TYPE_NONE +#define XDP_RSS_TYPE_PAY2 XDP_RSS_TYPE_L2 +#define XDP_RSS_TYPE_PAY3 XDP_RSS_TYPE_NONE +#define XDP_RSS_TYPE_PAY4 XDP_RSS_L4 + +static const enum xdp_rss_hash_type +ice_ptype_to_xdp_hash[ICE_NUM_DEFINED_PTYPES] = { + ICE_PTYPES +}; + +#undef XDP_RSS_L3_NONE +#undef XDP_RSS_L4_NONE +#undef XDP_RSS_TYPE_PAY2 +#undef XDP_RSS_TYPE_PAY3 +#undef XDP_RSS_TYPE_PAY4 + +#undef ICE_PTT +#undef ICE_PTT_UNUSED_ENTRY + +/** + * ice_xdp_rx_hash_type - Get XDP-specific hash type from the RX descriptor + * @eop_desc: End of Packet descriptor + */ +static enum xdp_rss_hash_type +ice_xdp_rx_hash_type(const union ice_32b_rx_flex_desc *eop_desc) +{ + u16 ptype = ice_get_ptype(eop_desc); + + if (unlikely(ptype >= ICE_NUM_DEFINED_PTYPES)) + return 0; + + return ice_ptype_to_xdp_hash[ptype]; +} + +/** + * ice_xdp_rx_hash - RX hash XDP hint handler + * @ctx: XDP buff pointer + * @hash: hash destination address + * @rss_type: XDP hash type destination address + * + * Copy RX hash (if available) and its type to the destination address. + */ +static int ice_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash, + enum xdp_rss_hash_type *rss_type) +{ + const struct ice_xdp_buff *xdp_ext = (void *)ctx; + + *hash = ice_get_rx_hash(xdp_ext->eop_desc); + *rss_type = ice_xdp_rx_hash_type(xdp_ext->eop_desc); + if (!likely(*hash)) + return -ENODATA; + + return 0; +} + +/** + * ice_xdp_rx_vlan_tag - VLAN tag XDP hint handler + * @ctx: XDP buff pointer + * @vlan_proto: destination address for VLAN protocol + * @vlan_tci: destination address for VLAN TCI + * + * Copy VLAN tag (if was stripped) and corresponding protocol + * to the destination address. + */ +static int ice_xdp_rx_vlan_tag(const struct xdp_md *ctx, __be16 *vlan_proto, + u16 *vlan_tci) +{ + const struct ice_xdp_buff *xdp_ext = (void *)ctx; + + *vlan_proto = xdp_ext->pkt_ctx->vlan_proto; + if (!*vlan_proto) + return -ENODATA; + + *vlan_tci = ice_get_vlan_tci(xdp_ext->eop_desc); + if (!*vlan_tci) + return -ENODATA; + + return 0; +} + +const struct xdp_metadata_ops ice_xdp_md_ops = { + .xmo_rx_timestamp = ice_xdp_rx_hw_ts, + .xmo_rx_hash = ice_xdp_rx_hash, + .xmo_rx_vlan_tag = ice_xdp_rx_vlan_tag, +}; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h index 115969ecdf7b..762047508619 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h @@ -84,7 +84,7 @@ ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag) } /** - * ice_get_vlan_tag_from_rx_desc - get VLAN from Rx flex descriptor + * ice_get_vlan_tci - get VLAN TCI from Rx flex descriptor * @rx_desc: Rx 32b flex descriptor with RXDID=2 * * The OS and current PF implementation only support stripping a single VLAN tag @@ -92,7 +92,7 @@ ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag) * one is found return the tag, else return 0 to mean no VLAN tag was found. */ static inline u16 -ice_get_vlan_tag_from_rx_desc(union ice_32b_rx_flex_desc *rx_desc) +ice_get_vlan_tci(const union ice_32b_rx_flex_desc *rx_desc) { u16 stat_err_bits; @@ -148,7 +148,17 @@ void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val); void ice_process_skb_fields(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, - struct sk_buff *skb, u16 ptype); + struct sk_buff *skb); void -ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag); +ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tci); + +static inline void +ice_xdp_meta_set_desc(struct xdp_buff *xdp, + union ice_32b_rx_flex_desc *eop_desc) +{ + struct ice_xdp_buff *xdp_ext = container_of(xdp, struct ice_xdp_buff, + xdp_buff); + + xdp_ext->eop_desc = eop_desc; +} #endif /* !_ICE_TXRX_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index a18ca0ff879f..6df7c4487ad0 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -17,6 +17,7 @@ #include "ice_protocol_type.h" #include "ice_sbq_cmd.h" #include "ice_vlan_mode.h" +#include "ice_fwlog.h" static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc) { @@ -246,6 +247,7 @@ struct ice_fd_hw_prof { int cnt; u64 entry_h[ICE_MAX_FDIR_VSI_PER_FILTER][ICE_FD_HW_SEG_MAX]; u16 vsi_h[ICE_MAX_FDIR_VSI_PER_FILTER]; + u64 prof_id[ICE_FD_HW_SEG_MAX]; }; /* Common HW capabilities for SW use */ @@ -377,6 +379,8 @@ struct ice_hw_func_caps { struct ice_ts_func_info ts_func_info; }; +#define ICE_SENSOR_SUPPORT_E810_INT_TEMP_BIT 0 + /* Device wide capabilities */ struct ice_hw_dev_caps { struct ice_hw_common_caps common_cap; @@ -385,6 +389,11 @@ struct ice_hw_dev_caps { u32 num_flow_director_fltr; /* Number of FD filters available */ struct ice_ts_dev_info ts_dev_info; u32 num_funcs; + /* bitmap of supported sensors + * bit 0 - internal temperature sensor + * bit 31:1 - Reserved + */ + u32 supported_sensors; }; /* MAC info */ @@ -730,24 +739,6 @@ struct ice_switch_info { DECLARE_BITMAP(prof_res_bm[ICE_MAX_NUM_PROFILES], ICE_MAX_FV_WORDS); }; -/* FW logging configuration */ -struct ice_fw_log_evnt { - u8 cfg : 4; /* New event enables to configure */ - u8 cur : 4; /* Current/active event enables */ -}; - -struct ice_fw_log_cfg { - u8 cq_en : 1; /* FW logging is enabled via the control queue */ - u8 uart_en : 1; /* FW logging is enabled via UART for all PFs */ - u8 actv_evnts; /* Cumulation of currently enabled log events */ - -#define ICE_FW_LOG_EVNT_INFO (ICE_AQC_FW_LOG_INFO_EN >> ICE_AQC_FW_LOG_EN_S) -#define ICE_FW_LOG_EVNT_INIT (ICE_AQC_FW_LOG_INIT_EN >> ICE_AQC_FW_LOG_EN_S) -#define ICE_FW_LOG_EVNT_FLOW (ICE_AQC_FW_LOG_FLOW_EN >> ICE_AQC_FW_LOG_EN_S) -#define ICE_FW_LOG_EVNT_ERR (ICE_AQC_FW_LOG_ERR_EN >> ICE_AQC_FW_LOG_EN_S) - struct ice_fw_log_evnt evnts[ICE_AQC_FW_LOG_ID_MAX]; -}; - /* Enum defining the different states of the mailbox snapshot in the * PF-VF mailbox overflow detection algorithm. The snapshot can be in * states: @@ -827,7 +818,7 @@ struct ice_mbx_data { enum ice_phy_model { ICE_PHY_UNSUP = -1, ICE_PHY_E810 = 1, - ICE_PHY_E822, + ICE_PHY_E82X, }; /* Port hardware description */ @@ -889,7 +880,9 @@ struct ice_hw { u8 fw_patch; /* firmware patch version */ u32 fw_build; /* firmware build number */ - struct ice_fw_log_cfg fw_log; + struct ice_fwlog_cfg fwlog_cfg; + bool fwlog_supported; /* does hardware support FW logging? */ + struct ice_fwlog_ring fwlog_ring; /* Device max aggregate bandwidths corresponding to the GL_PWR_MODE_CTL * register. Used for determining the ITR/INTRL granularity during @@ -910,10 +903,9 @@ struct ice_hw { /* INTRL granularity in 1 us */ u8 intrl_gran; -#define ICE_PHY_PER_NAC_E822 1 #define ICE_MAX_QUAD 2 -#define ICE_QUADS_PER_PHY_E822 2 -#define ICE_PORTS_PER_PHY_E822 8 +#define ICE_QUADS_PER_PHY_E82X 2 +#define ICE_PORTS_PER_PHY_E82X 8 #define ICE_PORTS_PER_QUAD 4 #define ICE_PORTS_PER_PHY_E810 4 #define ICE_NUM_EXTERNAL_PORTS (ICE_MAX_QUAD * ICE_PORTS_PER_QUAD) diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index b7ae09952156..d6f74513b495 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -760,6 +760,7 @@ void ice_reset_all_vfs(struct ice_pf *pf) ice_for_each_vf(pf, bkt, vf) { mutex_lock(&vf->cfg_lock); + ice_eswitch_detach(pf, vf); vf->driver_caps = 0; ice_vc_set_default_allowlist(vf); @@ -775,13 +776,11 @@ void ice_reset_all_vfs(struct ice_pf *pf) ice_vf_rebuild_vsi(vf); ice_vf_post_vsi_rebuild(vf); + ice_eswitch_attach(pf, vf); + mutex_unlock(&vf->cfg_lock); } - if (ice_is_eswitch_mode_switchdev(pf)) - if (ice_eswitch_rebuild(pf)) - dev_warn(dev, "eswitch rebuild failed\n"); - ice_flush(hw); clear_bit(ICE_VF_DIS, pf->state); @@ -943,7 +942,7 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags) goto out_unlock; } - ice_eswitch_update_repr(vsi); + ice_eswitch_update_repr(vf->repr_id, vsi); /* if the VF has been reset allow it to come up again */ ice_mbx_clear_malvf(&vf->mbx_info); diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h index 93c774f2f437..35866553f288 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h @@ -130,7 +130,7 @@ struct ice_vf { struct ice_mdd_vf_events mdd_tx_events; DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX); - struct ice_repr *repr; + unsigned long repr_id; const struct ice_virtchnl_ops *virtchnl_ops; const struct ice_vf_ops *vf_ops; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index 1c7b4ded948b..d4ad0739b57b 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -689,9 +689,7 @@ out: * a specific virtchnl RSS cfg * @hw: pointer to the hardware * @rss_cfg: pointer to the virtchnl RSS cfg - * @addl_hdrs: pointer to the protocol header fields (ICE_FLOW_SEG_HDR_*) - * to configure - * @hash_flds: pointer to the hash bit fields (ICE_FLOW_HASH_*) to configure + * @hash_cfg: pointer to the HW hash configuration * * Return true if all the protocol header and hash fields in the RSS cfg could * be parsed, else return false @@ -699,13 +697,23 @@ out: * This function parses the virtchnl RSS cfg to be the intended * hash fields and the intended header for RSS configuration */ -static bool -ice_vc_parse_rss_cfg(struct ice_hw *hw, struct virtchnl_rss_cfg *rss_cfg, - u32 *addl_hdrs, u64 *hash_flds) +static bool ice_vc_parse_rss_cfg(struct ice_hw *hw, + struct virtchnl_rss_cfg *rss_cfg, + struct ice_rss_hash_cfg *hash_cfg) { const struct ice_vc_hash_field_match_type *hf_list; const struct ice_vc_hdr_match_type *hdr_list; int i, hf_list_len, hdr_list_len; + u32 *addl_hdrs = &hash_cfg->addl_hdrs; + u64 *hash_flds = &hash_cfg->hash_flds; + + /* set outer layer RSS as default */ + hash_cfg->hdr_type = ICE_RSS_OUTER_HEADERS; + + if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC) + hash_cfg->symm = true; + else + hash_cfg->symm = false; hf_list = ice_vc_hash_field_list; hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list); @@ -823,8 +831,8 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add) int status; lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; - hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_XOR : - ICE_AQ_VSI_Q_OPT_RSS_TPLZ; + hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_HASH_XOR : + ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) { @@ -832,11 +840,9 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add) goto error_param; } - ctx->info.q_opt_rss = ((lut_type << - ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & - ICE_AQ_VSI_Q_OPT_RSS_LUT_M) | - (hash_type & - ICE_AQ_VSI_Q_OPT_RSS_HASH_M); + ctx->info.q_opt_rss = + FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_LUT_M, lut_type) | + FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hash_type); /* Preserve existing queueing option setting */ ctx->info.q_opt_rss |= (vsi->info.q_opt_rss & @@ -858,18 +864,24 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add) kfree(ctx); } else { - u32 addl_hdrs = ICE_FLOW_SEG_HDR_NONE; - u64 hash_flds = ICE_HASH_INVALID; + struct ice_rss_hash_cfg cfg; - if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &addl_hdrs, - &hash_flds)) { + /* Only check for none raw pattern case */ + if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + cfg.addl_hdrs = ICE_FLOW_SEG_HDR_NONE; + cfg.hash_flds = ICE_HASH_INVALID; + cfg.hdr_type = ICE_RSS_ANY_HEADERS; + + if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &cfg)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } if (add) { - if (ice_add_rss_cfg(hw, vsi->idx, hash_flds, - addl_hdrs)) { + if (ice_add_rss_cfg(hw, vsi, &cfg)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n", vsi->vsi_num, v_ret); @@ -877,8 +889,7 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add) } else { int status; - status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds, - addl_hdrs); + status = ice_rem_rss_cfg(hw, vsi->idx, &cfg); /* We just ignore -ENOENT, because if two configurations * share the same profile remove one of them actually * removes both, since the profile is deleted. @@ -989,6 +1000,51 @@ error_param: } /** + * ice_vc_config_rss_hfunc + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * Configure the VF's RSS Hash function + */ +static int ice_vc_config_rss_hfunc(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_rss_hfunc *vrh = (struct virtchnl_rss_hfunc *)msg; + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + u8 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ; + struct ice_vsi *vsi; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + if (!ice_vc_isvalid_vsi_id(vf, vrh->vsi_id)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + vsi = ice_get_vf_vsi(vf); + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + if (vrh->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC) + hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ; + + if (ice_set_rss_hfunc(vsi, hfunc)) + v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; +error_param: + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_HFUNC, v_ret, + NULL, 0); +} + +/** * ice_vc_cfg_promiscuous_mode_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -2634,7 +2690,7 @@ static int ice_vc_set_rss_hena(struct ice_vf *vf, u8 *msg) } if (vrh->hena) { - status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, vrh->hena); + status = ice_add_avf_rss_cfg(&pf->hw, vsi, vrh->hena); v_ret = ice_err_to_virt_err(status); } @@ -3755,6 +3811,7 @@ static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = { .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg, .config_rss_key = ice_vc_config_rss_key, .config_rss_lut = ice_vc_config_rss_lut, + .config_rss_hfunc = ice_vc_config_rss_hfunc, .get_stats_msg = ice_vc_get_stats_msg, .cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg, .add_vlan_msg = ice_vc_add_vlan_msg, @@ -3884,6 +3941,7 @@ static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = { .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg, .config_rss_key = ice_vc_config_rss_key, .config_rss_lut = ice_vc_config_rss_lut, + .config_rss_hfunc = ice_vc_config_rss_hfunc, .get_stats_msg = ice_vc_get_stats_msg, .cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode, .add_vlan_msg = ice_vc_add_vlan_msg, @@ -4066,6 +4124,9 @@ error_handler: case VIRTCHNL_OP_CONFIG_RSS_LUT: err = ops->config_rss_lut(vf, msg); break; + case VIRTCHNL_OP_CONFIG_RSS_HFUNC: + err = ops->config_rss_hfunc(vf, msg); + break; case VIRTCHNL_OP_GET_STATS: err = ops->get_stats_msg(vf, msg); break; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/ice_virtchnl.h index cd747718de73..60dfbe05980a 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.h @@ -32,6 +32,7 @@ struct ice_virtchnl_ops { int (*cfg_irq_map_msg)(struct ice_vf *vf, u8 *msg); int (*config_rss_key)(struct ice_vf *vf, u8 *msg); int (*config_rss_lut)(struct ice_vf *vf, u8 *msg); + int (*config_rss_hfunc)(struct ice_vf *vf, u8 *msg); int (*get_stats_msg)(struct ice_vf *vf, u8 *msg); int (*cfg_promiscuous_mode_msg)(struct ice_vf *vf, u8 *msg); int (*add_vlan_msg)(struct ice_vf *vf, u8 *msg); diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c index 7d547fa616fa..5e19d48a05b4 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c @@ -68,6 +68,7 @@ static const u32 vlan_v2_allowlist_opcodes[] = { static const u32 rss_pf_allowlist_opcodes[] = { VIRTCHNL_OP_CONFIG_RSS_KEY, VIRTCHNL_OP_CONFIG_RSS_LUT, VIRTCHNL_OP_GET_RSS_HENA_CAPS, VIRTCHNL_OP_SET_RSS_HENA, + VIRTCHNL_OP_CONFIG_RSS_HFUNC, }; /* VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c index 24b23b7ef04a..9ee7ab207b37 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c @@ -10,19 +10,6 @@ #define to_fltr_conf_from_desc(p) \ container_of(p, struct virtchnl_fdir_fltr_conf, input) -#define ICE_FLOW_PROF_TYPE_S 0 -#define ICE_FLOW_PROF_TYPE_M (0xFFFFFFFFULL << ICE_FLOW_PROF_TYPE_S) -#define ICE_FLOW_PROF_VSI_S 32 -#define ICE_FLOW_PROF_VSI_M (0xFFFFFFFFULL << ICE_FLOW_PROF_VSI_S) - -/* Flow profile ID format: - * [0:31] - flow type, flow + tun_offs - * [32:63] - VSI index - */ -#define ICE_FLOW_PROF_FD(vsi, flow, tun_offs) \ - ((u64)(((((flow) + (tun_offs)) & ICE_FLOW_PROF_TYPE_M)) | \ - (((u64)(vsi) << ICE_FLOW_PROF_VSI_S) & ICE_FLOW_PROF_VSI_M))) - #define GTPU_TEID_OFFSET 4 #define GTPU_EH_QFI_OFFSET 1 #define GTPU_EH_QFI_MASK 0x3F @@ -493,6 +480,7 @@ ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun) return; vf_prof = fdir->fdir_prof[flow]; + prof_id = vf_prof->prof_id[tun]; vf_vsi = ice_get_vf_vsi(vf); if (!vf_vsi) { @@ -503,9 +491,6 @@ ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun) if (!fdir->prof_entry_cnt[flow][tun]) return; - prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, - flow, tun ? ICE_FLTR_PTYPE_MAX : 0); - for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++) if (vf_prof->entry_h[i][tun]) { u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]); @@ -647,7 +632,6 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, struct ice_hw *hw; u64 entry1_h = 0; u64 entry2_h = 0; - u64 prof_id; int ret; pf = vf->pf; @@ -681,18 +665,15 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, ice_vc_fdir_rem_prof(vf, flow, tun); } - prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, flow, - tun ? ICE_FLTR_PTYPE_MAX : 0); - - ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, - tun + 1, &prof); + ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, seg, + tun + 1, false, &prof); if (ret) { dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n", flow, vf->vf_id); goto err_exit; } - ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, + ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx, vf_vsi->idx, ICE_FLOW_PRIO_NORMAL, seg, &entry1_h); if (ret) { @@ -701,7 +682,7 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, goto err_prof; } - ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, + ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx, ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, seg, &entry2_h); if (ret) { @@ -725,14 +706,16 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, vf_prof->cnt++; fdir->prof_entry_cnt[flow][tun]++; + vf_prof->prof_id[tun] = prof->id; + return 0; err_entry_1: ice_rem_prof_id_flow(hw, ICE_BLK_FD, - ice_get_hw_vsi_num(hw, vf_vsi->idx), prof_id); + ice_get_hw_vsi_num(hw, vf_vsi->idx), prof->id); ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h); err_prof: - ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id); + ice_flow_rem_prof(hw, ICE_BLK_FD, prof->id); err_exit: return ret; } diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 99954508184f..5d1ae8e4058a 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -458,6 +458,11 @@ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp, rx_desc->read.pkt_addr = cpu_to_le64(dma); rx_desc->wb.status_error0 = 0; + /* Put private info that changes on a per-packet basis + * into xdp_buff_xsk->cb. + */ + ice_xdp_meta_set_desc(*xdp, rx_desc); + rx_desc++; xdp++; } @@ -863,8 +868,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget) struct xdp_buff *xdp; struct sk_buff *skb; u16 stat_err_bits; - u16 vlan_tag = 0; - u16 rx_ptype; + u16 vlan_tci; rx_desc = ICE_RX_DESC(rx_ring, ntc); @@ -942,13 +946,10 @@ construct_skb: total_rx_bytes += skb->len; total_rx_packets++; - vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc); - - rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & - ICE_RX_FLEX_DESC_PTYPE_M; + vlan_tci = ice_get_vlan_tci(rx_desc); - ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); - ice_receive_skb(rx_ring, skb, vlan_tag); + ice_process_skb_fields(rx_ring, rx_desc, skb); + ice_receive_skb(rx_ring, skb, vlan_tci); } rx_ring->next_to_clean = ntc; diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h index bee73353b56a..0acc125decb3 100644 --- a/drivers/net/ethernet/intel/idpf/idpf.h +++ b/drivers/net/ethernet/intel/idpf/idpf.h @@ -15,7 +15,7 @@ struct idpf_vport_max_q; #include <linux/pci.h> #include <linux/bitfield.h> #include <linux/sctp.h> -#include <linux/ethtool.h> +#include <linux/ethtool_netlink.h> #include <net/gro.h> #include <linux/dim.h> @@ -418,11 +418,13 @@ struct idpf_vport { /** * enum idpf_user_flags + * @__IDPF_USER_FLAG_HSPLIT: header split state * @__IDPF_PROMISC_UC: Unicast promiscuous mode * @__IDPF_PROMISC_MC: Multicast promiscuous mode * @__IDPF_USER_FLAGS_NBITS: Must be last */ enum idpf_user_flags { + __IDPF_USER_FLAG_HSPLIT = 0U, __IDPF_PROMISC_UC = 32, __IDPF_PROMISC_MC, @@ -965,4 +967,7 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map); int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs); int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs); +u8 idpf_vport_get_hsplit(const struct idpf_vport *vport); +bool idpf_vport_set_hsplit(const struct idpf_vport *vport, u8 val); + #endif /* !_IDPF_H_ */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c index 52ea38669f85..986d429d1175 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c +++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c @@ -75,14 +75,12 @@ static u32 idpf_get_rxfh_indir_size(struct net_device *netdev) /** * idpf_get_rxfh - get the rx flow hash indirection table * @netdev: network interface device structure - * @indir: indirection table - * @key: hash key - * @hfunc: hash function in use + * @rxfh: pointer to param struct (indir, key, hfunc) * * Reads the indirection table directly from the hardware. Always returns 0. */ -static int idpf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, - u8 *hfunc) +static int idpf_get_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh) { struct idpf_netdev_priv *np = netdev_priv(netdev); struct idpf_rss_data *rss_data; @@ -103,15 +101,14 @@ static int idpf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, if (np->state != __IDPF_VPORT_UP) goto unlock_mutex; - if (hfunc) - *hfunc = ETH_RSS_HASH_TOP; + rxfh->hfunc = ETH_RSS_HASH_TOP; - if (key) - memcpy(key, rss_data->rss_key, rss_data->rss_key_size); + if (rxfh->key) + memcpy(rxfh->key, rss_data->rss_key, rss_data->rss_key_size); - if (indir) { + if (rxfh->indir) { for (i = 0; i < rss_data->rss_lut_size; i++) - indir[i] = rss_data->rss_lut[i]; + rxfh->indir[i] = rss_data->rss_lut[i]; } unlock_mutex: @@ -123,15 +120,15 @@ unlock_mutex: /** * idpf_set_rxfh - set the rx flow hash indirection table * @netdev: network interface device structure - * @indir: indirection table - * @key: hash key - * @hfunc: hash function to use + * @rxfh: pointer to param struct (indir, key, hfunc) + * @extack: extended ACK from the Netlink message * * Returns -EINVAL if the table specifies an invalid queue id, otherwise * returns 0 after programming the table. */ -static int idpf_set_rxfh(struct net_device *netdev, const u32 *indir, - const u8 *key, const u8 hfunc) +static int idpf_set_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) { struct idpf_netdev_priv *np = netdev_priv(netdev); struct idpf_rss_data *rss_data; @@ -154,17 +151,18 @@ static int idpf_set_rxfh(struct net_device *netdev, const u32 *indir, if (np->state != __IDPF_VPORT_UP) goto unlock_mutex; - if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) { + if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && + rxfh->hfunc != ETH_RSS_HASH_TOP) { err = -EOPNOTSUPP; goto unlock_mutex; } - if (key) - memcpy(rss_data->rss_key, key, rss_data->rss_key_size); + if (rxfh->key) + memcpy(rss_data->rss_key, rxfh->key, rss_data->rss_key_size); - if (indir) { + if (rxfh->indir) { for (lut = 0; lut < rss_data->rss_lut_size; lut++) - rss_data->rss_lut[lut] = indir[lut]; + rss_data->rss_lut[lut] = rxfh->indir[lut]; } err = idpf_config_rss(vport); @@ -320,6 +318,8 @@ static void idpf_get_ringparam(struct net_device *netdev, ring->rx_pending = vport->rxq_desc_count; ring->tx_pending = vport->txq_desc_count; + kring->tcp_data_split = idpf_vport_get_hsplit(vport); + idpf_vport_ctrl_unlock(netdev); } @@ -379,6 +379,14 @@ static int idpf_set_ringparam(struct net_device *netdev, new_rx_count == vport->rxq_desc_count) goto unlock_mutex; + if (!idpf_vport_set_hsplit(vport, kring->tcp_data_split)) { + NL_SET_ERR_MSG_MOD(ext_ack, + "setting TCP data split is not supported"); + err = -EOPNOTSUPP; + + goto unlock_mutex; + } + config_data = &vport->adapter->vport_config[idx]->user_config; config_data->num_req_txq_desc = new_tx_count; config_data->num_req_rxq_desc = new_rx_count; @@ -532,7 +540,7 @@ static void idpf_add_stat_strings(u8 **p, const struct idpf_stats *stats, unsigned int i; for (i = 0; i < size; i++) - ethtool_sprintf(p, "%s", stats[i].stat_string); + ethtool_puts(p, stats[i].stat_string); } /** @@ -1334,6 +1342,7 @@ static int idpf_get_link_ksettings(struct net_device *netdev, static const struct ethtool_ops idpf_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE, + .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT, .get_msglevel = idpf_get_msglevel, .set_msglevel = idpf_set_msglevel, .get_link = ethtool_op_get_link, diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index 19809b0ddcd9..5fea2fd957eb 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -1058,6 +1058,71 @@ static void idpf_vport_dealloc(struct idpf_vport *vport) } /** + * idpf_is_hsplit_supported - check whether the header split is supported + * @vport: virtual port to check the capability for + * + * Return: true if it's supported by the HW/FW, false if not. + */ +static bool idpf_is_hsplit_supported(const struct idpf_vport *vport) +{ + return idpf_is_queue_model_split(vport->rxq_model) && + idpf_is_cap_ena_all(vport->adapter, IDPF_HSPLIT_CAPS, + IDPF_CAP_HSPLIT); +} + +/** + * idpf_vport_get_hsplit - get the current header split feature state + * @vport: virtual port to query the state for + * + * Return: ``ETHTOOL_TCP_DATA_SPLIT_UNKNOWN`` if not supported, + * ``ETHTOOL_TCP_DATA_SPLIT_DISABLED`` if disabled, + * ``ETHTOOL_TCP_DATA_SPLIT_ENABLED`` if active. + */ +u8 idpf_vport_get_hsplit(const struct idpf_vport *vport) +{ + const struct idpf_vport_user_config_data *config; + + if (!idpf_is_hsplit_supported(vport)) + return ETHTOOL_TCP_DATA_SPLIT_UNKNOWN; + + config = &vport->adapter->vport_config[vport->idx]->user_config; + + return test_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags) ? + ETHTOOL_TCP_DATA_SPLIT_ENABLED : + ETHTOOL_TCP_DATA_SPLIT_DISABLED; +} + +/** + * idpf_vport_set_hsplit - enable or disable header split on a given vport + * @vport: virtual port to configure + * @val: Ethtool flag controlling the header split state + * + * Return: true on success, false if not supported by the HW. + */ +bool idpf_vport_set_hsplit(const struct idpf_vport *vport, u8 val) +{ + struct idpf_vport_user_config_data *config; + + if (!idpf_is_hsplit_supported(vport)) + return val == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN; + + config = &vport->adapter->vport_config[vport->idx]->user_config; + + switch (val) { + case ETHTOOL_TCP_DATA_SPLIT_UNKNOWN: + /* Default is to enable */ + case ETHTOOL_TCP_DATA_SPLIT_ENABLED: + __set_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags); + return true; + case ETHTOOL_TCP_DATA_SPLIT_DISABLED: + __clear_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags); + return true; + default: + return false; + } +} + +/** * idpf_vport_alloc - Allocates the next available struct vport in the adapter * @adapter: board private structure * @max_q: vport max queue info diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 1f728a9004d9..a005626129a5 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -1240,12 +1240,15 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq) struct idpf_adapter *adapter = vport->adapter; struct idpf_queue *q; int i, k, err = 0; + bool hs; vport->rxq_grps = kcalloc(vport->num_rxq_grp, sizeof(struct idpf_rxq_group), GFP_KERNEL); if (!vport->rxq_grps) return -ENOMEM; + hs = idpf_vport_get_hsplit(vport) == ETHTOOL_TCP_DATA_SPLIT_ENABLED; + for (i = 0; i < vport->num_rxq_grp; i++) { struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; int j; @@ -1298,9 +1301,8 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq) q->rx_buf_size = vport->bufq_size[j]; q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK; q->rx_buf_stride = IDPF_RX_BUF_STRIDE; - if (idpf_is_cap_ena_all(adapter, IDPF_HSPLIT_CAPS, - IDPF_CAP_HSPLIT) && - idpf_is_queue_model_split(vport->rxq_model)) { + + if (hs) { q->rx_hsplit_en = true; q->rx_hbuf_size = IDPF_HDR_BUF_SIZE; } @@ -1344,9 +1346,7 @@ skip_splitq_rx_init: rx_qgrp->splitq.rxq_sets[j]->refillq1 = &rx_qgrp->splitq.bufq_sets[1].refillqs[j]; - if (idpf_is_cap_ena_all(adapter, IDPF_HSPLIT_CAPS, - IDPF_CAP_HSPLIT) && - idpf_is_queue_model_split(vport->rxq_model)) { + if (hs) { q->rx_hsplit_en = true; q->rx_hbuf_size = IDPF_HDR_BUF_SIZE; } diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c index 2c1b051fdc0d..d0cdd63b3d5b 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c @@ -3285,6 +3285,8 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q) memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS); memcpy(vport->tx_itr_profile, tx_itr, IDPF_DIM_PROFILE_SLOTS); + idpf_vport_set_hsplit(vport, ETHTOOL_TCP_DATA_SPLIT_ENABLED); + idpf_vport_init_num_qs(vport, vport_msg); idpf_vport_calc_num_q_desc(vport); idpf_vport_calc_num_q_groups(vport); diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c index caf91c6f52b4..5a23b9cfec6c 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.c +++ b/drivers/net/ethernet/intel/igb/e1000_mac.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2007 - 2018 Intel Corporation. */ +#include <linux/bitfield.h> #include <linux/if_ether.h> #include <linux/delay.h> #include <linux/pci.h> @@ -50,9 +51,8 @@ s32 igb_get_bus_info_pcie(struct e1000_hw *hw) break; } - bus->width = (enum e1000_bus_width)((pcie_link_status & - PCI_EXP_LNKSTA_NLW) >> - PCI_EXP_LNKSTA_NLW_SHIFT); + bus->width = (enum e1000_bus_width)FIELD_GET(PCI_EXP_LNKSTA_NLW, + pcie_link_status); } reg = rd32(E1000_STATUS); diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 16d2a55d5e17..d868a70732f4 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2356,11 +2356,9 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) break; case ETH_SS_STATS: for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) - ethtool_sprintf(&p, "%s", - igb_gstrings_stats[i].stat_string); + ethtool_puts(&p, igb_gstrings_stats[i].stat_string); for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) - ethtool_sprintf(&p, "%s", - igb_gstrings_net_stats[i].stat_string); + ethtool_puts(&p, igb_gstrings_net_stats[i].stat_string); for (i = 0; i < adapter->num_tx_queues; i++) { ethtool_sprintf(&p, "tx_queue_%u_packets", i); ethtool_sprintf(&p, "tx_queue_%u_bytes", i); @@ -3282,18 +3280,17 @@ static u32 igb_get_rxfh_indir_size(struct net_device *netdev) return IGB_RETA_SIZE; } -static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, - u8 *hfunc) +static int igb_get_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh) { struct igb_adapter *adapter = netdev_priv(netdev); int i; - if (hfunc) - *hfunc = ETH_RSS_HASH_TOP; - if (!indir) + rxfh->hfunc = ETH_RSS_HASH_TOP; + if (!rxfh->indir) return 0; for (i = 0; i < IGB_RETA_SIZE; i++) - indir[i] = adapter->rss_indir_tbl[i]; + rxfh->indir[i] = adapter->rss_indir_tbl[i]; return 0; } @@ -3333,8 +3330,9 @@ void igb_write_rss_indir_tbl(struct igb_adapter *adapter) } } -static int igb_set_rxfh(struct net_device *netdev, const u32 *indir, - const u8 *key, const u8 hfunc) +static int igb_set_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -3342,10 +3340,11 @@ static int igb_set_rxfh(struct net_device *netdev, const u32 *indir, u32 num_queues; /* We do not allow change in unsupported parameters */ - if (key || - (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + if (rxfh->key || + (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && + rxfh->hfunc != ETH_RSS_HASH_TOP)) return -EOPNOTSUPP; - if (!indir) + if (!rxfh->indir) return 0; num_queues = adapter->rss_queues; @@ -3362,12 +3361,12 @@ static int igb_set_rxfh(struct net_device *netdev, const u32 *indir, /* Verify user input. */ for (i = 0; i < IGB_RETA_SIZE; i++) - if (indir[i] >= num_queues) + if (rxfh->indir[i] >= num_queues) return -EINVAL; for (i = 0; i < IGB_RETA_SIZE; i++) - adapter->rss_indir_tbl[i] = indir[i]; + adapter->rss_indir_tbl[i] = rxfh->indir[i]; igb_write_rss_indir_tbl(adapter); diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index f48f82d5e274..ac7c861e83a0 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -81,6 +81,21 @@ struct igc_tx_timestamp_request { u32 flags; /* flags that should be added to the tx_buffer */ }; +struct igc_inline_rx_tstamps { + /* Timestamps are saved in little endian at the beginning of the packet + * buffer following the layout: + * + * DWORD: | 0 | 1 | 2 | 3 | + * Field: | Timer1 SYSTIML | Timer1 SYSTIMH | Timer0 SYSTIML | Timer0 SYSTIMH | + * + * SYSTIML holds the nanoseconds part while SYSTIMH holds the seconds + * part of the timestamp. + * + */ + __le32 timer1[2]; + __le32 timer0[2]; +}; + struct igc_ring_container { struct igc_ring *ring; /* pointer to linked list of rings */ unsigned int total_bytes; /* total bytes processed this int */ @@ -261,6 +276,8 @@ struct igc_adapter { unsigned int ptp_flags; /* System time value lock */ spinlock_t tmreg_lock; + /* Free-running timer lock */ + spinlock_t free_timer_lock; struct cyclecounter cc; struct timecounter tc; struct timespec64 prev_ptp_time; /* Pre-reset PTP clock */ @@ -469,6 +486,8 @@ enum igc_tx_flags { IGC_TX_FLAGS_TSTAMP_1 = 0x100, IGC_TX_FLAGS_TSTAMP_2 = 0x200, IGC_TX_FLAGS_TSTAMP_3 = 0x400, + + IGC_TX_FLAGS_TSTAMP_TIMER_1 = 0x800, }; enum igc_boards { @@ -531,7 +550,7 @@ struct igc_rx_buffer { struct igc_xdp_buff { struct xdp_buff xdp; union igc_adv_rx_desc *rx_desc; - ktime_t rx_ts; /* data indication bit IGC_RXDADV_STAT_TSIP */ + struct igc_inline_rx_tstamps *rx_ts; /* data indication bit IGC_RXDADV_STAT_TSIP */ }; struct igc_q_vector { diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h index f7d6491d4c60..bf8cdfbba9ff 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.h +++ b/drivers/net/ethernet/intel/igc/igc_base.h @@ -37,6 +37,10 @@ struct igc_adv_tx_context_desc { #define IGC_ADVTXD_TSTAMP_REG_1 0x00010000 /* Select register 1 for timestamp */ #define IGC_ADVTXD_TSTAMP_REG_2 0x00020000 /* Select register 2 for timestamp */ #define IGC_ADVTXD_TSTAMP_REG_3 0x00030000 /* Select register 3 for timestamp */ +#define IGC_ADVTXD_TSTAMP_TIMER_1 0x00010000 /* Select timer 1 for timestamp */ +#define IGC_ADVTXD_TSTAMP_TIMER_2 0x00020000 /* Select timer 2 for timestamp */ +#define IGC_ADVTXD_TSTAMP_TIMER_3 0x00030000 /* Select timer 3 for timestamp */ + #define IGC_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ #define IGC_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ #define IGC_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index b3037016f31d..5f92b3c7c3d4 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -317,6 +317,8 @@ #define IGC_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ #define IGC_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */ +#define IGC_TXD_PTP2_TIMER_1 0x00000020 + /* IPSec Encrypt Enable */ #define IGC_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ #define IGC_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index 785eaa8e0ba8..684f6f2a0572 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -773,11 +773,9 @@ static void igc_ethtool_get_strings(struct net_device *netdev, u32 stringset, break; case ETH_SS_STATS: for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) - ethtool_sprintf(&p, "%s", - igc_gstrings_stats[i].stat_string); + ethtool_puts(&p, igc_gstrings_stats[i].stat_string); for (i = 0; i < IGC_NETDEV_STATS_LEN; i++) - ethtool_sprintf(&p, "%s", - igc_gstrings_net_stats[i].stat_string); + ethtool_puts(&p, igc_gstrings_net_stats[i].stat_string); for (i = 0; i < adapter->num_tx_queues; i++) { ethtool_sprintf(&p, "tx_queue_%u_packets", i); ethtool_sprintf(&p, "tx_queue_%u_bytes", i); @@ -1428,45 +1426,46 @@ static u32 igc_ethtool_get_rxfh_indir_size(struct net_device *netdev) return IGC_RETA_SIZE; } -static int igc_ethtool_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, - u8 *hfunc) +static int igc_ethtool_get_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh) { struct igc_adapter *adapter = netdev_priv(netdev); int i; - if (hfunc) - *hfunc = ETH_RSS_HASH_TOP; - if (!indir) + rxfh->hfunc = ETH_RSS_HASH_TOP; + if (!rxfh->indir) return 0; for (i = 0; i < IGC_RETA_SIZE; i++) - indir[i] = adapter->rss_indir_tbl[i]; + rxfh->indir[i] = adapter->rss_indir_tbl[i]; return 0; } -static int igc_ethtool_set_rxfh(struct net_device *netdev, const u32 *indir, - const u8 *key, const u8 hfunc) +static int igc_ethtool_set_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) { struct igc_adapter *adapter = netdev_priv(netdev); u32 num_queues; int i; /* We do not allow change in unsupported parameters */ - if (key || - (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + if (rxfh->key || + (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && + rxfh->hfunc != ETH_RSS_HASH_TOP)) return -EOPNOTSUPP; - if (!indir) + if (!rxfh->indir) return 0; num_queues = adapter->rss_queues; /* Verify user input. */ for (i = 0; i < IGC_RETA_SIZE; i++) - if (indir[i] >= num_queues) + if (rxfh->indir[i] >= num_queues) return -EINVAL; for (i = 0; i < IGC_RETA_SIZE; i++) - adapter->rss_indir_tbl[i] = indir[i]; + adapter->rss_indir_tbl[i] = rxfh->indir[i]; igc_write_rss_indir_tbl(adapter); diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index e9bb403bbacf..61db1d3bfa0b 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -1299,14 +1299,16 @@ static void igc_tx_olinfo_status(struct igc_ring *tx_ring, u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT; /* insert L4 checksum */ - olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) * - ((IGC_TXD_POPTS_TXSM << 8) / - IGC_TX_FLAGS_CSUM); + olinfo_status |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_CSUM, + (IGC_TXD_POPTS_TXSM << 8)); /* insert IPv4 checksum */ - olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) * - (((IGC_TXD_POPTS_IXSM << 8)) / - IGC_TX_FLAGS_IPV4); + olinfo_status |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_IPV4, + (IGC_TXD_POPTS_IXSM << 8)); + + /* Use the second timer (free running, in general) for the timestamp */ + olinfo_status |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_TIMER_1, + IGC_TXD_PTP2_TIMER_1); tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); } @@ -1651,6 +1653,8 @@ done: if (igc_request_tx_tstamp(adapter, skb, &tstamp_flags)) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; tx_flags |= IGC_TX_FLAGS_TSTAMP | tstamp_flags; + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP_USE_CYCLES) + tx_flags |= IGC_TX_FLAGS_TSTAMP_TIMER_1; } else { adapter->tx_hwtstamp_skipped++; } @@ -1963,9 +1967,9 @@ static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring, static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, struct igc_rx_buffer *rx_buffer, - struct xdp_buff *xdp, - ktime_t timestamp) + struct igc_xdp_buff *ctx) { + struct xdp_buff *xdp = &ctx->xdp; unsigned int metasize = xdp->data - xdp->data_meta; unsigned int size = xdp->data_end - xdp->data; unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); @@ -1982,8 +1986,10 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, if (unlikely(!skb)) return NULL; - if (timestamp) - skb_hwtstamps(skb)->hwtstamp = timestamp; + if (ctx->rx_ts) { + skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP_NETDEV; + skb_hwtstamps(skb)->netdev_data = ctx->rx_ts; + } /* Determine available headroom for copy */ headlen = size; @@ -2583,11 +2589,10 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) int xdp_status = 0, rx_buffer_pgcnt; while (likely(total_packets < budget)) { - union igc_adv_rx_desc *rx_desc; + struct igc_xdp_buff ctx = { .rx_ts = NULL }; struct igc_rx_buffer *rx_buffer; + union igc_adv_rx_desc *rx_desc; unsigned int size, truesize; - struct igc_xdp_buff ctx; - ktime_t timestamp = 0; int pkt_offset = 0; void *pktbuf; @@ -2614,9 +2619,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset; if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) { - timestamp = igc_ptp_rx_pktstamp(q_vector->adapter, - pktbuf); - ctx.rx_ts = timestamp; + ctx.rx_ts = pktbuf; pkt_offset = IGC_TS_HDR_LEN; size -= IGC_TS_HDR_LEN; } @@ -2653,8 +2656,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) else if (ring_uses_build_skb(rx_ring)) skb = igc_build_skb(rx_ring, rx_buffer, &ctx.xdp); else - skb = igc_construct_skb(rx_ring, rx_buffer, &ctx.xdp, - timestamp); + skb = igc_construct_skb(rx_ring, rx_buffer, &ctx); /* exit if we failed to retrieve a buffer */ if (!skb) { @@ -2803,9 +2805,7 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget) ctx->rx_desc = desc; if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) { - timestamp = igc_ptp_rx_pktstamp(q_vector->adapter, - bi->xdp->data); - ctx->rx_ts = timestamp; + ctx->rx_ts = bi->xdp->data; bi->xdp->data += IGC_TS_HDR_LEN; @@ -6562,6 +6562,24 @@ int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) return 0; } +static ktime_t igc_get_tstamp(struct net_device *dev, + const struct skb_shared_hwtstamps *hwtstamps, + bool cycles) +{ + struct igc_adapter *adapter = netdev_priv(dev); + struct igc_inline_rx_tstamps *tstamp; + ktime_t timestamp; + + tstamp = hwtstamps->netdev_data; + + if (cycles) + timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer1); + else + timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer0); + + return timestamp; +} + static const struct net_device_ops igc_netdev_ops = { .ndo_open = igc_open, .ndo_stop = igc_close, @@ -6579,6 +6597,7 @@ static const struct net_device_ops igc_netdev_ops = { .ndo_bpf = igc_bpf, .ndo_xdp_xmit = igc_xdp_xmit, .ndo_xsk_wakeup = igc_xsk_wakeup, + .ndo_get_tstamp = igc_get_tstamp, }; /* PCIe configuration access */ @@ -6682,9 +6701,11 @@ static int igc_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash, static int igc_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp) { const struct igc_xdp_buff *ctx = (void *)_ctx; + struct igc_adapter *adapter = netdev_priv(ctx->xdp.rxq->dev); + struct igc_inline_rx_tstamps *tstamp = ctx->rx_ts; if (igc_test_staterr(ctx->rx_desc, IGC_RXDADV_STAT_TSIP)) { - *timestamp = ctx->rx_ts; + *timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer0); return 0; } diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index 928f38792203..885faaa7b9de 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -459,12 +459,10 @@ static int igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter, /** * igc_ptp_rx_pktstamp - Retrieve timestamp from Rx packet buffer * @adapter: Pointer to adapter the packet buffer belongs to - * @buf: Pointer to packet buffer + * @buf: Pointer to start of timestamp in HW format (2 32-bit words) * - * This function retrieves the timestamp saved in the beginning of packet - * buffer. While two timestamps are available, one in timer0 reference and the - * other in timer1 reference, this function considers only the timestamp in - * timer0 reference. + * This function retrieves and converts the timestamp stored at @buf + * to ktime_t, adjusting for hardware latencies. * * Returns timestamp value. */ @@ -474,17 +472,8 @@ ktime_t igc_ptp_rx_pktstamp(struct igc_adapter *adapter, __le32 *buf) u32 secs, nsecs; int adjust; - /* Timestamps are saved in little endian at the beginning of the packet - * buffer following the layout: - * - * DWORD: | 0 | 1 | 2 | 3 | - * Field: | Timer1 SYSTIML | Timer1 SYSTIMH | Timer0 SYSTIML | Timer0 SYSTIMH | - * - * SYSTIML holds the nanoseconds part while SYSTIMH holds the seconds - * part of the timestamp. - */ - nsecs = le32_to_cpu(buf[2]); - secs = le32_to_cpu(buf[3]); + nsecs = le32_to_cpu(buf[0]); + secs = le32_to_cpu(buf[1]); timestamp = ktime_set(secs, nsecs); @@ -542,10 +531,11 @@ static void igc_ptp_enable_rx_timestamp(struct igc_adapter *adapter) for (i = 0; i < adapter->num_rx_queues; i++) { val = rd32(IGC_SRRCTL(i)); - /* FIXME: For now, only support retrieving RX timestamps from - * timer 0. + /* Enable retrieving timestamps from timer 0, the + * "adjustable clock" and timer 1 the "free running + * clock". */ - val |= IGC_SRRCTL_TIMER1SEL(0) | IGC_SRRCTL_TIMER0SEL(0) | + val |= IGC_SRRCTL_TIMER1SEL(1) | IGC_SRRCTL_TIMER0SEL(0) | IGC_SRRCTL_TIMESTAMP; wr32(IGC_SRRCTL(i), val); } @@ -1035,6 +1025,26 @@ static int igc_ptp_getcrosststamp(struct ptp_clock_info *ptp, adapter, &adapter->snapshot, cts); } +static int igc_ptp_getcyclesx64(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct igc_adapter *igc = container_of(ptp, struct igc_adapter, ptp_caps); + struct igc_hw *hw = &igc->hw; + unsigned long flags; + + spin_lock_irqsave(&igc->free_timer_lock, flags); + + ptp_read_system_prets(sts); + ts->tv_nsec = rd32(IGC_SYSTIML_1); + ts->tv_sec = rd32(IGC_SYSTIMH_1); + ptp_read_system_postts(sts); + + spin_unlock_irqrestore(&igc->free_timer_lock, flags); + + return 0; +} + /** * igc_ptp_init - Initialize PTP functionality * @adapter: Board private structure @@ -1088,6 +1098,7 @@ void igc_ptp_init(struct igc_adapter *adapter) adapter->ptp_caps.adjfine = igc_ptp_adjfine_i225; adapter->ptp_caps.adjtime = igc_ptp_adjtime_i225; adapter->ptp_caps.gettimex64 = igc_ptp_gettimex64_i225; + adapter->ptp_caps.getcyclesx64 = igc_ptp_getcyclesx64; adapter->ptp_caps.settime64 = igc_ptp_settime_i225; adapter->ptp_caps.enable = igc_ptp_feature_enable_i225; adapter->ptp_caps.pps = 1; @@ -1108,6 +1119,7 @@ void igc_ptp_init(struct igc_adapter *adapter) } spin_lock_init(&adapter->ptp_tx_lock); + spin_lock_init(&adapter->free_timer_lock); spin_lock_init(&adapter->tmreg_lock); adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h index 20e17f5fbce3..d38c87d7e5e8 100644 --- a/drivers/net/ethernet/intel/igc/igc_regs.h +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -243,6 +243,11 @@ #define IGC_SYSTIMR 0x0B6F8 /* System time register Residue */ #define IGC_TIMINCA 0x0B608 /* Increment attributes register - RW */ +#define IGC_SYSTIML_1 0x0B688 /* System time register Low - RO (timer 1) */ +#define IGC_SYSTIMH_1 0x0B68C /* System time register High - RO (timer 1) */ +#define IGC_SYSTIMR_1 0x0B684 /* System time register Residue (timer 1) */ +#define IGC_TIMINCA_1 0x0B690 /* Increment attributes register - RW (timer 1) */ + /* TX Timestamp Low */ #define IGC_TXSTMPL_0 0x0B618 #define IGC_TXSTMPL_1 0x0B698 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 4dd897806fa5..af55f19d5bba 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1413,12 +1413,11 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, switch (stringset) { case ETH_SS_TEST: for (i = 0; i < IXGBE_TEST_LEN; i++) - ethtool_sprintf(&p, "%s", ixgbe_gstrings_test[i]); + ethtool_puts(&p, ixgbe_gstrings_test[i]); break; case ETH_SS_STATS: for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) - ethtool_sprintf(&p, "%s", - ixgbe_gstrings_stats[i].stat_string); + ethtool_puts(&p, ixgbe_gstrings_stats[i].stat_string); for (i = 0; i < netdev->num_tx_queues; i++) { ethtool_sprintf(&p, "tx_queue_%u_packets", i); ethtool_sprintf(&p, "tx_queue_%u_bytes", i); @@ -3108,35 +3107,37 @@ static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir) indir[i] = adapter->rss_indir_tbl[i] & rss_m; } -static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, - u8 *hfunc) +static int ixgbe_get_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - if (hfunc) - *hfunc = ETH_RSS_HASH_TOP; + rxfh->hfunc = ETH_RSS_HASH_TOP; - if (indir) - ixgbe_get_reta(adapter, indir); + if (rxfh->indir) + ixgbe_get_reta(adapter, rxfh->indir); - if (key) - memcpy(key, adapter->rss_key, ixgbe_get_rxfh_key_size(netdev)); + if (rxfh->key) + memcpy(rxfh->key, adapter->rss_key, + ixgbe_get_rxfh_key_size(netdev)); return 0; } -static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir, - const u8 *key, const u8 hfunc) +static int ixgbe_set_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) { struct ixgbe_adapter *adapter = netdev_priv(netdev); int i; u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter); - if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && + rxfh->hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; /* Fill out the redirection table */ - if (indir) { + if (rxfh->indir) { int max_queues = min_t(int, adapter->num_rx_queues, ixgbe_rss_indir_tbl_max(adapter)); @@ -3147,18 +3148,19 @@ static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir, /* Verify user input. */ for (i = 0; i < reta_entries; i++) - if (indir[i] >= max_queues) + if (rxfh->indir[i] >= max_queues) return -EINVAL; for (i = 0; i < reta_entries; i++) - adapter->rss_indir_tbl[i] = indir[i]; + adapter->rss_indir_tbl[i] = rxfh->indir[i]; ixgbe_store_reta(adapter); } /* Fill out the rss hash key */ - if (key) { - memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev)); + if (rxfh->key) { + memcpy(adapter->rss_key, rxfh->key, + ixgbe_get_rxfh_key_size(netdev)); ixgbe_store_key(adapter); } diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index 296915414a7c..7ac53171b041 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -897,40 +897,41 @@ static u32 ixgbevf_get_rxfh_key_size(struct net_device *netdev) return IXGBEVF_RSS_HASH_KEY_SIZE; } -static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, - u8 *hfunc) +static int ixgbevf_get_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); int err = 0; - if (hfunc) - *hfunc = ETH_RSS_HASH_TOP; + rxfh->hfunc = ETH_RSS_HASH_TOP; if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) { - if (key) - memcpy(key, adapter->rss_key, + if (rxfh->key) + memcpy(rxfh->key, adapter->rss_key, ixgbevf_get_rxfh_key_size(netdev)); - if (indir) { + if (rxfh->indir) { int i; for (i = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++) - indir[i] = adapter->rss_indir_tbl[i]; + rxfh->indir[i] = adapter->rss_indir_tbl[i]; } } else { /* If neither indirection table nor hash key was requested * - just return a success avoiding taking any locks. */ - if (!indir && !key) + if (!rxfh->indir && !rxfh->key) return 0; spin_lock_bh(&adapter->mbx_lock); - if (indir) - err = ixgbevf_get_reta_locked(&adapter->hw, indir, + if (rxfh->indir) + err = ixgbevf_get_reta_locked(&adapter->hw, + rxfh->indir, adapter->num_rx_queues); - if (!err && key) - err = ixgbevf_get_rss_key_locked(&adapter->hw, key); + if (!err && rxfh->key) + err = ixgbevf_get_rss_key_locked(&adapter->hw, + rxfh->key); spin_unlock_bh(&adapter->mbx_lock); } |