aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/ice
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/ice')
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile2
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h72
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h124
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c938
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h50
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c89
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c818
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h538
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h37
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c2620
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h76
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c3654
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c379
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c127
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.h34
-rw-r--r--drivers/net/ethernet/intel/ice/ice_status.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c1686
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h91
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h23
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h111
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c2675
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h173
27 files changed, 10340 insertions, 4012 deletions
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 4058673fd853..e5d6f684437e 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -13,5 +13,7 @@ ice-y := ice_main.o \
ice_nvm.o \
ice_switch.o \
ice_sched.o \
+ ice_lib.o \
ice_txrx.o \
ice_ethtool.o
+ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 868f4a1d0f72..4c4b5717a627 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -28,6 +28,7 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/if_bridge.h>
+#include <linux/avf/virtchnl.h>
#include <net/ipv6.h>
#include "ice_devids.h"
#include "ice_type.h"
@@ -35,17 +36,20 @@
#include "ice_switch.h"
#include "ice_common.h"
#include "ice_sched.h"
+#include "ice_virtchnl_pf.h"
+#include "ice_sriov.h"
extern const char ice_drv_ver[];
#define ICE_BAR0 0
#define ICE_DFLT_NUM_DESC 128
-#define ICE_MIN_NUM_DESC 8
-#define ICE_MAX_NUM_DESC 8160
#define ICE_REQ_DESC_MULTIPLE 32
+#define ICE_MIN_NUM_DESC ICE_REQ_DESC_MULTIPLE
+#define ICE_MAX_NUM_DESC 8160
#define ICE_DFLT_TRAFFIC_CLASS BIT(0)
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
#define ICE_ETHTOOL_FWVER_LEN 32
#define ICE_AQ_LEN 64
+#define ICE_MBXQ_LEN 64
#define ICE_MIN_MSIX 2
#define ICE_NO_VSI 0xffff
#define ICE_MAX_VSI_ALLOC 130
@@ -62,6 +66,15 @@ extern const char ice_drv_ver[];
#define ICE_RES_VALID_BIT 0x8000
#define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1)
#define ICE_INVAL_Q_INDEX 0xffff
+#define ICE_INVAL_VFID 256
+#define ICE_MAX_VF_COUNT 256
+#define ICE_MAX_QS_PER_VF 256
+#define ICE_MIN_QS_PER_VF 1
+#define ICE_DFLT_QS_PER_VF 4
+#define ICE_MAX_BASE_QS_PER_VF 16
+#define ICE_MAX_INTR_PER_VF 65
+#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
+#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
#define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4)
@@ -122,7 +135,8 @@ struct ice_sw {
enum ice_state {
__ICE_DOWN,
__ICE_NEEDS_RESTART,
- __ICE_RESET_RECOVERY_PENDING, /* set by driver when reset starts */
+ __ICE_PREPARED_FOR_RESET, /* set by driver when prepared */
+ __ICE_RESET_OICR_RECV, /* set by driver after rcv reset OICR */
__ICE_PFR_REQ, /* set by driver and peers */
__ICE_CORER_REQ, /* set by driver and peers */
__ICE_GLOBR_REQ, /* set by driver and peers */
@@ -131,10 +145,24 @@ enum ice_state {
__ICE_EMPR_RECV, /* set by OICR handler */
__ICE_SUSPENDED, /* set on module remove path */
__ICE_RESET_FAILED, /* set by reset/rebuild */
+ /* When checking for the PF to be in a nominal operating state, the
+ * bits that are grouped at the beginning of the list need to be
+ * checked. Bits occurring before __ICE_STATE_NOMINAL_CHECK_BITS will
+ * be checked. If you need to add a bit into consideration for nominal
+ * operating state, it must be added before
+ * __ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position
+ * without appropriate consideration.
+ */
+ __ICE_STATE_NOMINAL_CHECK_BITS,
__ICE_ADMINQ_EVENT_PENDING,
+ __ICE_MAILBOXQ_EVENT_PENDING,
+ __ICE_MDD_EVENT_PENDING,
+ __ICE_VFLR_EVENT_PENDING,
__ICE_FLTR_OVERFLOW_PROMISC,
+ __ICE_VF_DIS,
__ICE_CFG_BUSY,
__ICE_SERVICE_SCHED,
+ __ICE_SERVICE_DIS,
__ICE_STATE_NBITS /* must be last */
};
@@ -168,7 +196,8 @@ struct ice_vsi {
u32 rx_buf_failed;
u32 rx_page_failed;
int num_q_vectors;
- int base_vector;
+ int sw_base_vector; /* Irq base for OS reserved vectors */
+ int hw_base_vector; /* HW (absolute) index of a vector */
enum ice_vsi_type type;
u16 vsi_num; /* HW (absolute) index of this VSI */
u16 idx; /* software index in pf->vsi[] */
@@ -176,6 +205,8 @@ struct ice_vsi {
/* Interrupt thresholds */
u16 work_lmt;
+ s16 vf_id; /* VF ID for SR-IOV VSIs */
+
/* RSS config */
u16 rss_table_size; /* HW RSS table size */
u16 rss_size; /* Allocated RSS queues */
@@ -225,21 +256,39 @@ struct ice_q_vector {
u8 num_ring_tx; /* total number of tx rings in vector */
u8 num_ring_rx; /* total number of rx rings in vector */
char name[ICE_INT_NAME_STR_LEN];
+ /* in usecs, need to use ice_intrl_to_usecs_reg() before writing this
+ * value to the device
+ */
+ u8 intrl;
} ____cacheline_internodealigned_in_smp;
enum ice_pf_flags {
ICE_FLAG_MSIX_ENA,
ICE_FLAG_FLTR_SYNC,
ICE_FLAG_RSS_ENA,
+ ICE_FLAG_SRIOV_ENA,
+ ICE_FLAG_SRIOV_CAPABLE,
ICE_PF_FLAGS_NBITS /* must be last */
};
struct ice_pf {
struct pci_dev *pdev;
+
+ /* OS reserved IRQ details */
struct msix_entry *msix_entries;
- struct ice_res_tracker *irq_tracker;
+ struct ice_res_tracker *sw_irq_tracker;
+
+ /* HW reserved Interrupts for this PF */
+ struct ice_res_tracker *hw_irq_tracker;
+
struct ice_vsi **vsi; /* VSIs created by the driver */
struct ice_sw *first_sw; /* first switch created by firmware */
+ /* Virtchnl/SR-IOV config info */
+ struct ice_vf *vf;
+ int num_alloc_vfs; /* actual number of VFs allocated */
+ u16 num_vfs_supported; /* num VFs supported for this PF */
+ u16 num_vf_qps; /* num queue pairs per VF */
+ u16 num_vf_msix; /* num vectors per VF */
DECLARE_BITMAP(state, __ICE_STATE_NBITS);
DECLARE_BITMAP(avail_txqs, ICE_MAX_TXQS);
DECLARE_BITMAP(avail_rxqs, ICE_MAX_RXQS);
@@ -252,9 +301,11 @@ struct ice_pf {
struct mutex sw_mutex; /* lock for protecting VSI alloc flow */
u32 msg_enable;
u32 hw_csum_rx_error;
- u32 oicr_idx; /* Other interrupt cause vector index */
+ u32 sw_oicr_idx; /* Other interrupt cause SW vector index */
+ u32 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */
+ u32 hw_oicr_idx; /* Other interrupt cause vector HW index */
+ u32 num_avail_hw_msix; /* remaining HW MSIX vectors left unclaimed */
u32 num_lan_msix; /* Total MSIX vectors for base driver */
- u32 num_avail_msix; /* remaining MSIX vectors left unclaimed */
u16 num_lan_tx; /* num lan tx queues setup */
u16 num_lan_rx; /* num lan rx queues setup */
u16 q_left_tx; /* remaining num tx queues left unclaimed */
@@ -270,6 +321,9 @@ struct ice_pf {
struct ice_hw_port_stats stats_prev;
struct ice_hw hw;
u8 stat_prev_loaded; /* has previous stats been loaded */
+ u32 tx_timeout_count;
+ unsigned long tx_timeout_last_recovery;
+ u32 tx_timeout_recovery_level;
char int_name[ICE_INT_NAME_STR_LEN];
};
@@ -286,8 +340,8 @@ struct ice_netdev_priv {
static inline void ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
struct ice_q_vector *q_vector)
{
- u32 vector = (vsi && q_vector) ? vsi->base_vector + q_vector->v_idx :
- ((struct ice_pf *)hw->back)->oicr_idx;
+ u32 vector = (vsi && q_vector) ? vsi->hw_base_vector + q_vector->v_idx :
+ ((struct ice_pf *)hw->back)->hw_oicr_idx;
int itr = ICE_ITR_NONE;
u32 val;
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index a0614f472658..6653555f55dd 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -87,6 +87,8 @@ struct ice_aqc_list_caps {
/* Device/Function buffer entry, repeated per reported capability */
struct ice_aqc_list_caps_elem {
__le16 cap;
+#define ICE_AQC_CAPS_SRIOV 0x0012
+#define ICE_AQC_CAPS_VF 0x0013
#define ICE_AQC_CAPS_VSI 0x0017
#define ICE_AQC_CAPS_RSS 0x0040
#define ICE_AQC_CAPS_RXQS 0x0041
@@ -443,6 +445,8 @@ struct ice_aqc_vsi_props {
u8 reserved[24];
};
+#define ICE_MAX_NUM_RECIPES 64
+
/* Add/Update/Remove/Get switch rules (indirect 0x02A0, 0x02A1, 0x02A2, 0x02A3)
*/
struct ice_aqc_sw_rules {
@@ -734,6 +738,10 @@ struct ice_aqc_add_elem {
struct ice_aqc_txsched_elem_data generic[1];
};
+struct ice_aqc_get_elem {
+ struct ice_aqc_txsched_elem_data generic[1];
+};
+
struct ice_aqc_get_topo_elem {
struct ice_aqc_txsched_topo_grp_info_hdr hdr;
struct ice_aqc_txsched_elem_data
@@ -771,9 +779,8 @@ struct ice_aqc_layer_props {
u8 chunk_size;
__le16 max_device_nodes;
__le16 max_pf_nodes;
- u8 rsvd0[2];
- __le16 max_shared_rate_lmtr;
- __le16 max_children;
+ u8 rsvd0[4];
+ __le16 max_sibl_grp_sz;
__le16 max_cir_rl_profiles;
__le16 max_eir_rl_profiles;
__le16 max_srl_profiles;
@@ -919,9 +926,11 @@ struct ice_aqc_set_phy_cfg_data {
u8 caps;
#define ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY BIT(0)
#define ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY BIT(1)
-#define ICE_AQ_PHY_ENA_LOW_POWER BIT(2)
-#define ICE_AQ_PHY_ENA_LINK BIT(3)
-#define ICE_AQ_PHY_ENA_ATOMIC_LINK BIT(5)
+#define ICE_AQ_PHY_ENA_LOW_POWER BIT(2)
+#define ICE_AQ_PHY_ENA_LINK BIT(3)
+#define ICE_AQ_PHY_ENA_AUTO_LINK_UPDT BIT(5)
+#define ICE_AQ_PHY_ENA_LESM BIT(6)
+#define ICE_AQ_PHY_ENA_AUTO_FEC BIT(7)
u8 low_power_ctrl;
__le16 eee_cap; /* Value from ice_aqc_get_phy_caps */
__le16 eeer_value;
@@ -1068,6 +1077,19 @@ struct ice_aqc_nvm {
__le32 addr_low;
};
+/**
+ * Send to PF command (indirect 0x0801) id is only used by PF
+ *
+ * Send to VF command (indirect 0x0802) id is only used by PF
+ *
+ */
+struct ice_aqc_pf_vf_msg {
+ __le32 id;
+ u32 reserved;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
/* Get/Set RSS key (indirect 0x0B04/0x0B02) */
struct ice_aqc_get_set_rss_key {
#define ICE_AQC_GSET_RSS_KEY_VSI_VALID BIT(15)
@@ -1203,6 +1225,84 @@ struct ice_aqc_dis_txq {
struct ice_aqc_dis_txq_item qgrps[1];
};
+/* Configure Firmware Logging Command (indirect 0xFF09)
+ * Logging Information Read Response (indirect 0xFF10)
+ * Note: The 0xFF10 command has no input parameters.
+ */
+struct ice_aqc_fw_logging {
+ u8 log_ctrl;
+#define ICE_AQC_FW_LOG_AQ_EN BIT(0)
+#define ICE_AQC_FW_LOG_UART_EN BIT(1)
+ u8 rsvd0;
+ u8 log_ctrl_valid; /* Not used by 0xFF10 Response */
+#define ICE_AQC_FW_LOG_AQ_VALID BIT(0)
+#define ICE_AQC_FW_LOG_UART_VALID BIT(1)
+ u8 rsvd1[5];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+enum ice_aqc_fw_logging_mod {
+ ICE_AQC_FW_LOG_ID_GENERAL = 0,
+ ICE_AQC_FW_LOG_ID_CTRL,
+ ICE_AQC_FW_LOG_ID_LINK,
+ ICE_AQC_FW_LOG_ID_LINK_TOPO,
+ ICE_AQC_FW_LOG_ID_DNL,
+ ICE_AQC_FW_LOG_ID_I2C,
+ ICE_AQC_FW_LOG_ID_SDP,
+ ICE_AQC_FW_LOG_ID_MDIO,
+ ICE_AQC_FW_LOG_ID_ADMINQ,
+ ICE_AQC_FW_LOG_ID_HDMA,
+ ICE_AQC_FW_LOG_ID_LLDP,
+ ICE_AQC_FW_LOG_ID_DCBX,
+ ICE_AQC_FW_LOG_ID_DCB,
+ ICE_AQC_FW_LOG_ID_NETPROXY,
+ ICE_AQC_FW_LOG_ID_NVM,
+ ICE_AQC_FW_LOG_ID_AUTH,
+ ICE_AQC_FW_LOG_ID_VPD,
+ ICE_AQC_FW_LOG_ID_IOSF,
+ ICE_AQC_FW_LOG_ID_PARSER,
+ ICE_AQC_FW_LOG_ID_SW,
+ ICE_AQC_FW_LOG_ID_SCHEDULER,
+ ICE_AQC_FW_LOG_ID_TXQ,
+ ICE_AQC_FW_LOG_ID_RSVD,
+ ICE_AQC_FW_LOG_ID_POST,
+ ICE_AQC_FW_LOG_ID_WATCHDOG,
+ ICE_AQC_FW_LOG_ID_TASK_DISPATCH,
+ ICE_AQC_FW_LOG_ID_MNG,
+ ICE_AQC_FW_LOG_ID_MAX,
+};
+
+/* This is the buffer for both of the logging commands.
+ * The entry array size depends on the datalen parameter in the descriptor.
+ * There will be a total of datalen / 2 entries.
+ */
+struct ice_aqc_fw_logging_data {
+ __le16 entry[1];
+#define ICE_AQC_FW_LOG_ID_S 0
+#define ICE_AQC_FW_LOG_ID_M (0xFFF << ICE_AQC_FW_LOG_ID_S)
+
+#define ICE_AQC_FW_LOG_CONF_SUCCESS 0 /* Used by response */
+#define ICE_AQC_FW_LOG_CONF_BAD_INDX BIT(12) /* Used by response */
+
+#define ICE_AQC_FW_LOG_EN_S 12
+#define ICE_AQC_FW_LOG_EN_M (0xF << ICE_AQC_FW_LOG_EN_S)
+#define ICE_AQC_FW_LOG_INFO_EN BIT(12) /* Used by command */
+#define ICE_AQC_FW_LOG_INIT_EN BIT(13) /* Used by command */
+#define ICE_AQC_FW_LOG_FLOW_EN BIT(14) /* Used by command */
+#define ICE_AQC_FW_LOG_ERR_EN BIT(15) /* Used by command */
+};
+
+/* Get/Clear FW Log (indirect 0xFF11) */
+struct ice_aqc_get_clear_fw_log {
+ u8 flags;
+#define ICE_AQC_FW_LOG_CLEAR BIT(0)
+#define ICE_AQC_FW_LOG_MORE_DATA_AVAIL BIT(1)
+ u8 rsvd1[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
/**
* struct ice_aq_desc - Admin Queue (AQ) descriptor
* @flags: ICE_AQ_FLAG_* flags
@@ -1247,11 +1347,15 @@ struct ice_aq_desc {
struct ice_aqc_query_txsched_res query_sched_res;
struct ice_aqc_add_move_delete_elem add_move_delete_elem;
struct ice_aqc_nvm nvm;
+ struct ice_aqc_pf_vf_msg virt;
struct ice_aqc_get_set_rss_lut get_set_rss_lut;
struct ice_aqc_get_set_rss_key get_set_rss_key;
struct ice_aqc_add_txqs add_txqs;
struct ice_aqc_dis_txqs dis_txqs;
struct ice_aqc_add_get_update_free_vsi vsi_cmd;
+ struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res;
+ struct ice_aqc_fw_logging fw_logging;
+ struct ice_aqc_get_clear_fw_log get_clear_fw_log;
struct ice_aqc_alloc_free_res_cmd sw_res_ctrl;
struct ice_aqc_set_event_mask set_event_mask;
struct ice_aqc_get_link_status get_link_status;
@@ -1325,6 +1429,7 @@ enum ice_adminq_opc {
/* transmit scheduler commands */
ice_aqc_opc_get_dflt_topo = 0x0400,
ice_aqc_opc_add_sched_elems = 0x0401,
+ ice_aqc_opc_get_sched_elems = 0x0404,
ice_aqc_opc_suspend_sched_elems = 0x0409,
ice_aqc_opc_resume_sched_elems = 0x040A,
ice_aqc_opc_delete_sched_elems = 0x040F,
@@ -1340,6 +1445,10 @@ enum ice_adminq_opc {
/* NVM commands */
ice_aqc_opc_nvm_read = 0x0701,
+ /* PF/VF mailbox commands */
+ ice_mbx_opc_send_msg_to_pf = 0x0801,
+ ice_mbx_opc_send_msg_to_vf = 0x0802,
+
/* RSS commands */
ice_aqc_opc_set_rss_key = 0x0B02,
ice_aqc_opc_set_rss_lut = 0x0B03,
@@ -1349,6 +1458,9 @@ enum ice_adminq_opc {
/* TX queue handling commands/events */
ice_aqc_opc_add_txqs = 0x0C30,
ice_aqc_opc_dis_txqs = 0x0C31,
+
+ /* debug commands */
+ ice_aqc_opc_fw_logging = 0xFF09,
};
#endif /* _ICE_ADMINQ_CMD_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 661beea6af79..8cd6a2401fd9 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -7,16 +7,16 @@
#define ICE_PF_RESET_WAIT_COUNT 200
-#define ICE_NIC_FLX_ENTRY(hw, mdid, idx) \
- wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(ICE_RXDID_FLEX_NIC), \
+#define ICE_PROG_FLEX_ENTRY(hw, rxdid, mdid, idx) \
+ wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(rxdid), \
((ICE_RX_OPC_MDID << \
GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \
GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \
(((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \
GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M))
-#define ICE_NIC_FLX_FLG_ENTRY(hw, flg_0, flg_1, flg_2, flg_3, idx) \
- wr32((hw), GLFLXP_RXDID_FLAGS(ICE_RXDID_FLEX_NIC, idx), \
+#define ICE_PROG_FLG_ENTRY(hw, rxdid, flg_0, flg_1, flg_2, flg_3, idx) \
+ wr32((hw), GLFLXP_RXDID_FLAGS(rxdid, idx), \
(((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \
GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \
(((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \
@@ -43,6 +43,23 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
}
/**
+ * ice_dev_onetime_setup - Temporary HW/FW workarounds
+ * @hw: pointer to the HW structure
+ *
+ * This function provides temporary workarounds for certain issues
+ * that are expected to be fixed in the HW/FW.
+ */
+void ice_dev_onetime_setup(struct ice_hw *hw)
+{
+ /* configure Rx - set non pxe mode */
+ wr32(hw, GLLAN_RCTL_0, 0x1);
+
+#define MBX_PF_VT_PFALLOC 0x00231E80
+ /* set VFs per PF */
+ wr32(hw, MBX_PF_VT_PFALLOC, rd32(hw, PF_VT_PFALLOC_HIF));
+}
+
+/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
*
@@ -125,7 +142,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
*
* Returns the various PHY capabilities supported on the Port (0x0600)
*/
-static enum ice_status
+enum ice_status
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *pcaps,
struct ice_sq_cd *cd)
@@ -218,7 +235,7 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
*
* Get Link Status (0x607). Returns the link status of the adapter.
*/
-enum ice_status
+static enum ice_status
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *link, struct ice_sq_cd *cd)
{
@@ -290,30 +307,85 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
}
/**
- * ice_init_flex_parser - initialize rx flex parser
+ * ice_init_flex_flags
* @hw: pointer to the hardware structure
+ * @prof_id: Rx Descriptor Builder profile ID
*
- * Function to initialize flex descriptors
+ * Function to initialize Rx flex flags
*/
-static void ice_init_flex_parser(struct ice_hw *hw)
+static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id)
{
u8 idx = 0;
- ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_HASH_LOW, 0);
- ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_HASH_HIGH, 1);
- ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_FLOW_ID_LOWER, 2);
- ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_FLOW_ID_HIGH, 3);
- ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_PKT_FRG, ICE_RXFLG_UDP_GRE,
- ICE_RXFLG_PKT_DSI, ICE_RXFLG_FIN, idx++);
- ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_SYN, ICE_RXFLG_RST,
- ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++);
- ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI,
- ICE_RXFLG_EVLAN_x8100, ICE_RXFLG_EVLAN_x9100,
- idx++);
- ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_VLAN_x8100, ICE_RXFLG_TNL_VLAN,
- ICE_RXFLG_TNL_MAC, ICE_RXFLG_TNL0, idx++);
- ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2,
- ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx);
+ /* Flex-flag fields (0-2) are programmed with FLG64 bits with layout:
+ * flexiflags0[5:0] - TCP flags, is_packet_fragmented, is_packet_UDP_GRE
+ * flexiflags1[3:0] - Not used for flag programming
+ * flexiflags2[7:0] - Tunnel and VLAN types
+ * 2 invalid fields in last index
+ */
+ switch (prof_id) {
+ /* Rx flex flags are currently programmed for the NIC profiles only.
+ * Different flag bit programming configurations can be added per
+ * profile as needed.
+ */
+ case ICE_RXDID_FLEX_NIC:
+ case ICE_RXDID_FLEX_NIC_2:
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_FRG,
+ ICE_RXFLG_UDP_GRE, ICE_RXFLG_PKT_DSI,
+ ICE_RXFLG_FIN, idx++);
+ /* flex flag 1 is not used for flexi-flag programming, skipping
+ * these four FLG64 bits.
+ */
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_SYN, ICE_RXFLG_RST,
+ ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++);
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_DSI,
+ ICE_RXFLG_PKT_DSI, ICE_RXFLG_EVLAN_x8100,
+ ICE_RXFLG_EVLAN_x9100, idx++);
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_VLAN_x8100,
+ ICE_RXFLG_TNL_VLAN, ICE_RXFLG_TNL_MAC,
+ ICE_RXFLG_TNL0, idx++);
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2,
+ ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx);
+ break;
+
+ default:
+ ice_debug(hw, ICE_DBG_INIT,
+ "Flag programming for profile ID %d not supported\n",
+ prof_id);
+ }
+}
+
+/**
+ * ice_init_flex_flds
+ * @hw: pointer to the hardware structure
+ * @prof_id: Rx Descriptor Builder profile ID
+ *
+ * Function to initialize flex descriptors
+ */
+static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id)
+{
+ enum ice_flex_rx_mdid mdid;
+
+ switch (prof_id) {
+ case ICE_RXDID_FLEX_NIC:
+ case ICE_RXDID_FLEX_NIC_2:
+ ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_LOW, 0);
+ ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_HIGH, 1);
+ ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_FLOW_ID_LOWER, 2);
+
+ mdid = (prof_id == ICE_RXDID_FLEX_NIC_2) ?
+ ICE_RX_MDID_SRC_VSI : ICE_RX_MDID_FLOW_ID_HIGH;
+
+ ICE_PROG_FLEX_ENTRY(hw, prof_id, mdid, 3);
+
+ ice_init_flex_flags(hw, prof_id);
+ break;
+
+ default:
+ ice_debug(hw, ICE_DBG_INIT,
+ "Field init for profile ID %d not supported\n",
+ prof_id);
+ }
}
/**
@@ -333,20 +405,7 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
INIT_LIST_HEAD(&sw->vsi_list_map_head);
- mutex_init(&sw->mac_list_lock);
- INIT_LIST_HEAD(&sw->mac_list_head);
-
- mutex_init(&sw->vlan_list_lock);
- INIT_LIST_HEAD(&sw->vlan_list_head);
-
- mutex_init(&sw->eth_m_list_lock);
- INIT_LIST_HEAD(&sw->eth_m_list_head);
-
- mutex_init(&sw->promisc_list_lock);
- INIT_LIST_HEAD(&sw->promisc_list_head);
-
- mutex_init(&sw->mac_vlan_list_lock);
- INIT_LIST_HEAD(&sw->mac_vlan_list_head);
+ ice_init_def_sw_recp(hw);
return 0;
}
@@ -360,20 +419,232 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
struct ice_switch_info *sw = hw->switch_info;
struct ice_vsi_list_map_info *v_pos_map;
struct ice_vsi_list_map_info *v_tmp_map;
+ struct ice_sw_recipe *recps;
+ u8 i;
list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
list_entry) {
list_del(&v_pos_map->list_entry);
devm_kfree(ice_hw_to_dev(hw), v_pos_map);
}
+ recps = hw->switch_info->recp_list;
+ for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
+ struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
+
+ recps[i].root_rid = i;
+ mutex_destroy(&recps[i].filt_rule_lock);
+ list_for_each_entry_safe(lst_itr, tmp_entry,
+ &recps[i].filt_rules, list_entry) {
+ list_del(&lst_itr->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), lst_itr);
+ }
+ }
+ ice_rm_all_sw_replay_rule_info(hw);
+ devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
+ devm_kfree(ice_hw_to_dev(hw), sw);
+}
- mutex_destroy(&sw->mac_list_lock);
- mutex_destroy(&sw->vlan_list_lock);
- mutex_destroy(&sw->eth_m_list_lock);
- mutex_destroy(&sw->promisc_list_lock);
- mutex_destroy(&sw->mac_vlan_list_lock);
+#define ICE_FW_LOG_DESC_SIZE(n) (sizeof(struct ice_aqc_fw_logging_data) + \
+ (((n) - 1) * sizeof(((struct ice_aqc_fw_logging_data *)0)->entry)))
+#define ICE_FW_LOG_DESC_SIZE_MAX \
+ ICE_FW_LOG_DESC_SIZE(ICE_AQC_FW_LOG_ID_MAX)
- devm_kfree(ice_hw_to_dev(hw), sw);
+/**
+ * ice_cfg_fw_log - configure FW logging
+ * @hw: pointer to the hw struct
+ * @enable: enable certain FW logging events if true, disable all if false
+ *
+ * This function enables/disables the FW logging via Rx CQ events and a UART
+ * port based on predetermined configurations. FW logging via the Rx CQ can be
+ * enabled/disabled for individual PF's. However, FW logging via the UART can
+ * only be enabled/disabled for all PFs on the same device.
+ *
+ * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in
+ * hw->fw_log need to be set accordingly, e.g. based on user-provided input,
+ * before initializing the device.
+ *
+ * When re/configuring FW logging, callers need to update the "cfg" elements of
+ * the hw->fw_log.evnts array with the desired logging event configurations for
+ * modules of interest. When disabling FW logging completely, the callers can
+ * just pass false in the "enable" parameter. On completion, the function will
+ * update the "cur" element of the hw->fw_log.evnts array with the resulting
+ * logging event configurations of the modules that are being re/configured. FW
+ * logging modules that are not part of a reconfiguration operation retain their
+ * previous states.
+ *
+ * Before resetting the device, it is recommended that the driver disables FW
+ * logging before shutting down the control queue. When disabling FW logging
+ * ("enable" = false), the latest configurations of FW logging events stored in
+ * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after
+ * a device reset.
+ *
+ * When enabling FW logging to emit log messages via the Rx CQ during the
+ * device's initialization phase, a mechanism alternative to interrupt handlers
+ * needs to be used to extract FW log messages from the Rx CQ periodically and
+ * to prevent the Rx CQ from being full and stalling other types of control
+ * messages from FW to SW. Interrupts are typically disabled during the device's
+ * initialization phase.
+ */
+static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
+{
+ struct ice_aqc_fw_logging_data *data = NULL;
+ struct ice_aqc_fw_logging *cmd;
+ enum ice_status status = 0;
+ u16 i, chgs = 0, len = 0;
+ struct ice_aq_desc desc;
+ u8 actv_evnts = 0;
+ void *buf = NULL;
+
+ if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
+ return 0;
+
+ /* Disable FW logging only when the control queue is still responsive */
+ if (!enable &&
+ (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
+ return 0;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
+ cmd = &desc.params.fw_logging;
+
+ /* Indicate which controls are valid */
+ if (hw->fw_log.cq_en)
+ cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
+
+ if (hw->fw_log.uart_en)
+ cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
+
+ if (enable) {
+ /* Fill in an array of entries with FW logging modules and
+ * logging events being reconfigured.
+ */
+ for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
+ u16 val;
+
+ /* Keep track of enabled event types */
+ actv_evnts |= hw->fw_log.evnts[i].cfg;
+
+ if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
+ continue;
+
+ if (!data) {
+ data = devm_kzalloc(ice_hw_to_dev(hw),
+ ICE_FW_LOG_DESC_SIZE_MAX,
+ GFP_KERNEL);
+ if (!data)
+ return ICE_ERR_NO_MEMORY;
+ }
+
+ val = i << ICE_AQC_FW_LOG_ID_S;
+ val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
+ data->entry[chgs++] = cpu_to_le16(val);
+ }
+
+ /* Only enable FW logging if at least one module is specified.
+ * If FW logging is currently enabled but all modules are not
+ * enabled to emit log messages, disable FW logging altogether.
+ */
+ if (actv_evnts) {
+ /* Leave if there is effectively no change */
+ if (!chgs)
+ goto out;
+
+ if (hw->fw_log.cq_en)
+ cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
+
+ if (hw->fw_log.uart_en)
+ cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
+
+ buf = data;
+ len = ICE_FW_LOG_DESC_SIZE(chgs);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ }
+ }
+
+ status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
+ if (!status) {
+ /* Update the current configuration to reflect events enabled.
+ * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW
+ * logging mode is enabled for the device. They do not reflect
+ * actual modules being enabled to emit log messages. So, their
+ * values remain unchanged even when all modules are disabled.
+ */
+ u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
+
+ hw->fw_log.actv_evnts = actv_evnts;
+ for (i = 0; i < cnt; i++) {
+ u16 v, m;
+
+ if (!enable) {
+ /* When disabling all FW logging events as part
+ * of device's de-initialization, the original
+ * configurations are retained, and can be used
+ * to reconfigure FW logging later if the device
+ * is re-initialized.
+ */
+ hw->fw_log.evnts[i].cur = 0;
+ continue;
+ }
+
+ v = le16_to_cpu(data->entry[i]);
+ m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
+ hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
+ }
+ }
+
+out:
+ if (data)
+ devm_kfree(ice_hw_to_dev(hw), data);
+
+ return status;
+}
+
+/**
+ * ice_output_fw_log
+ * @hw: pointer to the hw struct
+ * @desc: pointer to the AQ message descriptor
+ * @buf: pointer to the buffer accompanying the AQ message
+ *
+ * Formats a FW Log message and outputs it via the standard driver logs.
+ */
+void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
+{
+ ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg Start ]\n");
+ ice_debug_array(hw, ICE_DBG_AQ_MSG, 16, 1, (u8 *)buf,
+ le16_to_cpu(desc->datalen));
+ ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg End ]\n");
+}
+
+/**
+ * ice_get_itr_intrl_gran - determine int/intrl granularity
+ * @hw: pointer to the hw struct
+ *
+ * Determines the itr/intrl granularities based on the maximum aggregate
+ * bandwidth according to the device's configuration during power-on.
+ */
+static enum ice_status ice_get_itr_intrl_gran(struct ice_hw *hw)
+{
+ u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
+ GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
+ GL_PWR_MODE_CTL_CAR_MAX_BW_S;
+
+ switch (max_agg_bw) {
+ case ICE_MAX_AGG_BW_200G:
+ case ICE_MAX_AGG_BW_100G:
+ case ICE_MAX_AGG_BW_50G:
+ hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
+ hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
+ break;
+ case ICE_MAX_AGG_BW_25G:
+ hw->itr_gran = ICE_ITR_GRAN_MAX_25;
+ hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
+ break;
+ default:
+ ice_debug(hw, ICE_DBG_INIT,
+ "Failed to determine itr/intrl granularity\n");
+ return ICE_ERR_CFG;
+ }
+
+ return 0;
}
/**
@@ -400,16 +671,19 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
return status;
- /* set these values to minimum allowed */
- hw->itr_gran_200 = ICE_ITR_GRAN_MIN_200;
- hw->itr_gran_100 = ICE_ITR_GRAN_MIN_100;
- hw->itr_gran_50 = ICE_ITR_GRAN_MIN_50;
- hw->itr_gran_25 = ICE_ITR_GRAN_MIN_25;
+ status = ice_get_itr_intrl_gran(hw);
+ if (status)
+ return status;
status = ice_init_all_ctrlq(hw);
if (status)
goto err_unroll_cqinit;
+ /* Enable FW logging. Not fatal if this fails. */
+ status = ice_cfg_fw_log(hw, true);
+ if (status)
+ ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
+
status = ice_clear_pf_cfg(hw);
if (status)
goto err_unroll_cqinit;
@@ -472,10 +746,19 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_sched;
+ /* need a valid SW entry point to build a Tx tree */
+ if (!hw->sw_entry_point_layer) {
+ ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
+ status = ICE_ERR_CFG;
+ goto err_unroll_sched;
+ }
+
status = ice_init_fltr_mgmt_struct(hw);
if (status)
goto err_unroll_sched;
+ ice_dev_onetime_setup(hw);
+
/* Get MAC information */
/* A single port can report up to two (LAN and WoL) addresses */
mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
@@ -494,7 +777,8 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_fltr_mgmt_struct;
- ice_init_flex_parser(hw);
+ ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC);
+ ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2);
return 0;
@@ -515,15 +799,18 @@ err_unroll_cqinit:
*/
void ice_deinit_hw(struct ice_hw *hw)
{
+ ice_cleanup_fltr_mgmt_struct(hw);
+
ice_sched_cleanup_all(hw);
- ice_shutdown_all_ctrlq(hw);
if (hw->port_info) {
devm_kfree(ice_hw_to_dev(hw), hw->port_info);
hw->port_info = NULL;
}
- ice_cleanup_fltr_mgmt_struct(hw);
+ /* Attempt to disable FW logging before shutting down control queues */
+ ice_cfg_fw_log(hw, false);
+ ice_shutdown_all_ctrlq(hw);
}
/**
@@ -652,6 +939,8 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
val = GLGEN_RTRIG_GLOBR_M;
break;
+ default:
+ return ICE_ERR_PARAM;
}
val |= rd32(hw, GLGEN_RTRIG);
@@ -904,7 +1193,22 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
* @timeout: the maximum time in ms that the driver may hold the resource
* @cd: pointer to command details structure or NULL
*
- * requests common resource using the admin queue commands (0x0008)
+ * Requests common resource using the admin queue commands (0x0008).
+ * When attempting to acquire the Global Config Lock, the driver can
+ * learn of three states:
+ * 1) ICE_SUCCESS - acquired lock, and can perform download package
+ * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
+ * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
+ * successfully downloaded the package; the driver does
+ * not have to download the package and can continue
+ * loading
+ *
+ * Note that if the caller is in an acquire lock, perform action, release lock
+ * phase of operation, it is possible that the FW may detect a timeout and issue
+ * a CORER. In this case, the driver will receive a CORER interrupt and will
+ * have to determine its cause. The calling thread that is handling this flow
+ * will likely get an error propagated back to it indicating the Download
+ * Package, Update Package or the Release Resource AQ commands timed out.
*/
static enum ice_status
ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
@@ -922,13 +1226,43 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
cmd_resp->res_id = cpu_to_le16(res);
cmd_resp->access_type = cpu_to_le16(access);
cmd_resp->res_number = cpu_to_le32(sdp_number);
+ cmd_resp->timeout = cpu_to_le32(*timeout);
+ *timeout = 0;
status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+
/* The completion specifies the maximum time in ms that the driver
* may hold the resource in the Timeout field.
- * If the resource is held by someone else, the command completes with
- * busy return value and the timeout field indicates the maximum time
- * the current owner of the resource has to free it.
+ */
+
+ /* Global config lock response utilizes an additional status field.
+ *
+ * If the Global config lock resource is held by some other driver, the
+ * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
+ * and the timeout field indicates the maximum time the current owner
+ * of the resource has to free it.
+ */
+ if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
+ if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
+ *timeout = le32_to_cpu(cmd_resp->timeout);
+ return 0;
+ } else if (le16_to_cpu(cmd_resp->status) ==
+ ICE_AQ_RES_GLBL_IN_PROG) {
+ *timeout = le32_to_cpu(cmd_resp->timeout);
+ return ICE_ERR_AQ_ERROR;
+ } else if (le16_to_cpu(cmd_resp->status) ==
+ ICE_AQ_RES_GLBL_DONE) {
+ return ICE_ERR_AQ_NO_WORK;
+ }
+
+ /* invalid FW response, force a timeout immediately */
+ *timeout = 0;
+ return ICE_ERR_AQ_ERROR;
+ }
+
+ /* If the resource is held by some other driver, the command completes
+ * with a busy return value and the timeout field indicates the maximum
+ * time the current owner of the resource has to free it.
*/
if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
*timeout = le32_to_cpu(cmd_resp->timeout);
@@ -967,30 +1301,28 @@ ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
* @hw: pointer to the HW structure
* @res: resource id
* @access: access type (read or write)
+ * @timeout: timeout in milliseconds
*
* This function will attempt to acquire the ownership of a resource.
*/
enum ice_status
ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
- enum ice_aq_res_access_type access)
+ enum ice_aq_res_access_type access, u32 timeout)
{
#define ICE_RES_POLLING_DELAY_MS 10
u32 delay = ICE_RES_POLLING_DELAY_MS;
+ u32 time_left = timeout;
enum ice_status status;
- u32 time_left = 0;
- u32 timeout;
status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
- /* An admin queue return code of ICE_AQ_RC_EEXIST means that another
- * driver has previously acquired the resource and performed any
- * necessary updates; in this case the caller does not obtain the
- * resource and has no further work to do.
+ /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
+ * previously acquired the resource and performed any necessary updates;
+ * in this case the caller does not obtain the resource and has no
+ * further work to do.
*/
- if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) {
- status = ICE_ERR_AQ_NO_WORK;
+ if (status == ICE_ERR_AQ_NO_WORK)
goto ice_acquire_res_exit;
- }
if (status)
ice_debug(hw, ICE_DBG_RES,
@@ -1003,11 +1335,9 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
timeout = (timeout > delay) ? timeout - delay : 0;
status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
- if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) {
+ if (status == ICE_ERR_AQ_NO_WORK)
/* lock free, but no work to do */
- status = ICE_ERR_AQ_NO_WORK;
break;
- }
if (!status)
/* lock acquired */
@@ -1095,6 +1425,28 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
u16 cap = le16_to_cpu(cap_resp->cap);
switch (cap) {
+ case ICE_AQC_CAPS_SRIOV:
+ caps->sr_iov_1_1 = (number == 1);
+ ice_debug(hw, ICE_DBG_INIT,
+ "HW caps: SR-IOV = %d\n", caps->sr_iov_1_1);
+ break;
+ case ICE_AQC_CAPS_VF:
+ if (dev_p) {
+ dev_p->num_vfs_exposed = number;
+ ice_debug(hw, ICE_DBG_INIT,
+ "HW caps: VFs exposed = %d\n",
+ dev_p->num_vfs_exposed);
+ } else if (func_p) {
+ func_p->num_allocd_vfs = number;
+ func_p->vf_base_id = logical_id;
+ ice_debug(hw, ICE_DBG_INIT,
+ "HW caps: VFs allocated = %d\n",
+ func_p->num_allocd_vfs);
+ ice_debug(hw, ICE_DBG_INIT,
+ "HW caps: VF base_id = %d\n",
+ func_p->vf_base_id);
+ }
+ break;
case ICE_AQC_CAPS_VSI:
if (dev_p) {
dev_p->num_vsi_allocd_to_host = number;
@@ -1171,7 +1523,7 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
* @hw: pointer to the hw struct
* @buf: a virtual buffer to hold the capabilities
* @buf_size: Size of the virtual buffer
- * @data_size: Size of the returned data, or buf size needed if AQ err==ENOMEM
+ * @cap_count: cap count needed if AQ err==ENOMEM
* @opc: capabilities type to discover - pass in the command opcode
* @cd: pointer to command details structure or NULL
*
@@ -1179,7 +1531,7 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
* the firmware.
*/
static enum ice_status
-ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u16 *data_size,
+ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
struct ice_aqc_list_caps *cmd;
@@ -1197,59 +1549,75 @@ ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u16 *data_size,
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
if (!status)
ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc);
- *data_size = le16_to_cpu(desc.datalen);
-
+ else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM)
+ *cap_count = le32_to_cpu(cmd->count);
return status;
}
/**
- * ice_get_caps - get info about the HW
+ * ice_discover_caps - get info about the HW
* @hw: pointer to the hardware structure
+ * @opc: capabilities type to discover - pass in the command opcode
*/
-enum ice_status ice_get_caps(struct ice_hw *hw)
+static enum ice_status ice_discover_caps(struct ice_hw *hw,
+ enum ice_adminq_opc opc)
{
enum ice_status status;
- u16 data_size = 0;
+ u32 cap_count;
u16 cbuf_len;
u8 retries;
/* The driver doesn't know how many capabilities the device will return
* so the buffer size required isn't known ahead of time. The driver
* starts with cbuf_len and if this turns out to be insufficient, the
- * device returns ICE_AQ_RC_ENOMEM and also the buffer size it needs.
- * The driver then allocates the buffer of this size and retries the
- * operation. So it follows that the retry count is 2.
+ * device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs.
+ * The driver then allocates the buffer based on the count and retries
+ * the operation. So it follows that the retry count is 2.
*/
#define ICE_GET_CAP_BUF_COUNT 40
#define ICE_GET_CAP_RETRY_COUNT 2
- cbuf_len = ICE_GET_CAP_BUF_COUNT *
- sizeof(struct ice_aqc_list_caps_elem);
-
+ cap_count = ICE_GET_CAP_BUF_COUNT;
retries = ICE_GET_CAP_RETRY_COUNT;
do {
void *cbuf;
+ cbuf_len = (u16)(cap_count *
+ sizeof(struct ice_aqc_list_caps_elem));
cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL);
if (!cbuf)
return ICE_ERR_NO_MEMORY;
- status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &data_size,
- ice_aqc_opc_list_func_caps, NULL);
+ status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count,
+ opc, NULL);
devm_kfree(ice_hw_to_dev(hw), cbuf);
if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
break;
/* If ENOMEM is returned, try again with bigger buffer */
- cbuf_len = data_size;
} while (--retries);
return status;
}
/**
+ * ice_get_caps - get info about the HW
+ * @hw: pointer to the hardware structure
+ */
+enum ice_status ice_get_caps(struct ice_hw *hw)
+{
+ enum ice_status status;
+
+ status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps);
+ if (!status)
+ status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps);
+
+ return status;
+}
+
+/**
* ice_aq_manage_mac_write - manage MAC address write command
* @hw: pointer to the hw struct
* @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
@@ -1307,6 +1675,110 @@ void ice_clear_pxe_mode(struct ice_hw *hw)
}
/**
+ * ice_get_link_speed_based_on_phy_type - returns link speed
+ * @phy_type_low: lower part of phy_type
+ *
+ * This helper function will convert a phy_type_low to its corresponding link
+ * speed.
+ * Note: In the structure of phy_type_low, there should be one bit set, as
+ * this function will convert one phy type to its speed.
+ * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
+ * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
+ */
+static u16
+ice_get_link_speed_based_on_phy_type(u64 phy_type_low)
+{
+ u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
+
+ switch (phy_type_low) {
+ case ICE_PHY_TYPE_LOW_100BASE_TX:
+ case ICE_PHY_TYPE_LOW_100M_SGMII:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
+ break;
+ case ICE_PHY_TYPE_LOW_1000BASE_T:
+ case ICE_PHY_TYPE_LOW_1000BASE_SX:
+ case ICE_PHY_TYPE_LOW_1000BASE_LX:
+ case ICE_PHY_TYPE_LOW_1000BASE_KX:
+ case ICE_PHY_TYPE_LOW_1G_SGMII:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
+ break;
+ case ICE_PHY_TYPE_LOW_2500BASE_T:
+ case ICE_PHY_TYPE_LOW_2500BASE_X:
+ case ICE_PHY_TYPE_LOW_2500BASE_KX:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
+ break;
+ case ICE_PHY_TYPE_LOW_5GBASE_T:
+ case ICE_PHY_TYPE_LOW_5GBASE_KR:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
+ break;
+ case ICE_PHY_TYPE_LOW_10GBASE_T:
+ case ICE_PHY_TYPE_LOW_10G_SFI_DA:
+ case ICE_PHY_TYPE_LOW_10GBASE_SR:
+ case ICE_PHY_TYPE_LOW_10GBASE_LR:
+ case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
+ case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
+ break;
+ case ICE_PHY_TYPE_LOW_25GBASE_T:
+ case ICE_PHY_TYPE_LOW_25GBASE_CR:
+ case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
+ case ICE_PHY_TYPE_LOW_25GBASE_CR1:
+ case ICE_PHY_TYPE_LOW_25GBASE_SR:
+ case ICE_PHY_TYPE_LOW_25GBASE_LR:
+ case ICE_PHY_TYPE_LOW_25GBASE_KR:
+ case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
+ case ICE_PHY_TYPE_LOW_25GBASE_KR1:
+ case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
+ break;
+ case ICE_PHY_TYPE_LOW_40GBASE_CR4:
+ case ICE_PHY_TYPE_LOW_40GBASE_SR4:
+ case ICE_PHY_TYPE_LOW_40GBASE_LR4:
+ case ICE_PHY_TYPE_LOW_40GBASE_KR4:
+ case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_40G_XLAUI:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
+ break;
+ default:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
+ break;
+ }
+
+ return speed_phy_type_low;
+}
+
+/**
+ * ice_update_phy_type
+ * @phy_type_low: pointer to the lower part of phy_type
+ * @link_speeds_bitmap: targeted link speeds bitmap
+ *
+ * Note: For the link_speeds_bitmap structure, you can check it at
+ * [ice_aqc_get_link_status->link_speed]. Caller can pass in
+ * link_speeds_bitmap include multiple speeds.
+ *
+ * The value of phy_type_low will present a certain link speed. This helper
+ * function will turn on bits in the phy_type_low based on the value of
+ * link_speeds_bitmap input parameter.
+ */
+void ice_update_phy_type(u64 *phy_type_low, u16 link_speeds_bitmap)
+{
+ u16 speed = ICE_AQ_LINK_SPEED_UNKNOWN;
+ u64 pt_low;
+ int index;
+
+ /* We first check with low part of phy_type */
+ for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
+ pt_low = BIT_ULL(index);
+ speed = ice_get_link_speed_based_on_phy_type(pt_low);
+
+ if (link_speeds_bitmap & speed)
+ *phy_type_low |= BIT_ULL(index);
+ }
+}
+
+/**
* ice_aq_set_phy_cfg
* @hw: pointer to the hw struct
* @lport: logical port number
@@ -1318,19 +1790,18 @@ void ice_clear_pxe_mode(struct ice_hw *hw)
* mode as the PF may not have the privilege to set some of the PHY Config
* parameters. This status will be indicated by the command response (0x0601).
*/
-static enum ice_status
+enum ice_status
ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
{
- struct ice_aqc_set_phy_cfg *cmd;
struct ice_aq_desc desc;
if (!cfg)
return ICE_ERR_PARAM;
- cmd = &desc.params.set_phy;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
- cmd->lport_num = lport;
+ desc.params.set_phy.lport_num = lport;
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
}
@@ -1339,8 +1810,7 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
* ice_update_link_info - update status of the HW network link
* @pi: port info structure of the interested logical port
*/
-static enum ice_status
-ice_update_link_info(struct ice_port_info *pi)
+enum ice_status ice_update_link_info(struct ice_port_info *pi)
{
struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_phy_info *phy_info;
@@ -1379,12 +1849,12 @@ out:
* ice_set_fc
* @pi: port information structure
* @aq_failures: pointer to status code, specific to ice_set_fc routine
- * @atomic_restart: enable automatic link update
+ * @ena_auto_link_update: enable automatic link update
*
* Set the requested flow control mode.
*/
enum ice_status
-ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool atomic_restart)
+ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
{
struct ice_aqc_set_phy_cfg_data cfg = { 0 };
struct ice_aqc_get_phy_caps_data *pcaps;
@@ -1434,8 +1904,8 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool atomic_restart)
int retry_count, retry_max = 10;
/* Auto restart link so settings take effect */
- if (atomic_restart)
- cfg.caps |= ICE_AQ_PHY_ENA_ATOMIC_LINK;
+ if (ena_auto_link_update)
+ cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
/* Copy over all the old settings */
cfg.phy_type_low = pcaps->phy_type_low;
cfg.low_power_ctrl = pcaps->low_power_ctrl;
@@ -1535,33 +2005,6 @@ ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
}
/**
- * ice_aq_set_event_mask
- * @hw: pointer to the hw struct
- * @port_num: port number of the physical function
- * @mask: event mask to be set
- * @cd: pointer to command details structure or NULL
- *
- * Set event mask (0x0613)
- */
-enum ice_status
-ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
- struct ice_sq_cd *cd)
-{
- struct ice_aqc_set_event_mask *cmd;
- struct ice_aq_desc desc;
-
- cmd = &desc.params.set_event_mask;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
-
- cmd->lport_num = port_num;
-
- cmd->event_mask = cpu_to_le16(mask);
-
- return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
-}
-
-/**
* __ice_aq_get_set_rss_lut
* @hw: pointer to the hardware structure
* @vsi_id: VSI FW index
@@ -1654,7 +2097,7 @@ ice_aq_get_set_rss_lut_exit:
/**
* ice_aq_get_rss_lut
* @hw: pointer to the hardware structure
- * @vsi_id: VSI FW index
+ * @vsi_handle: software VSI handle
* @lut_type: LUT table type
* @lut: pointer to the LUT buffer provided by the caller
* @lut_size: size of the LUT buffer
@@ -1662,17 +2105,20 @@ ice_aq_get_set_rss_lut_exit:
* get the RSS lookup table, PF or VSI type
*/
enum ice_status
-ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
- u16 lut_size)
+ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
+ u8 *lut, u16 lut_size)
{
- return __ice_aq_get_set_rss_lut(hw, vsi_id, lut_type, lut, lut_size, 0,
- false);
+ if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
+ return ICE_ERR_PARAM;
+
+ return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
+ lut_type, lut, lut_size, 0, false);
}
/**
* ice_aq_set_rss_lut
* @hw: pointer to the hardware structure
- * @vsi_id: VSI FW index
+ * @vsi_handle: software VSI handle
* @lut_type: LUT table type
* @lut: pointer to the LUT buffer provided by the caller
* @lut_size: size of the LUT buffer
@@ -1680,11 +2126,14 @@ ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
* set the RSS lookup table, PF or VSI type
*/
enum ice_status
-ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
- u16 lut_size)
+ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
+ u8 *lut, u16 lut_size)
{
- return __ice_aq_get_set_rss_lut(hw, vsi_id, lut_type, lut, lut_size, 0,
- true);
+ if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
+ return ICE_ERR_PARAM;
+
+ return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
+ lut_type, lut, lut_size, 0, true);
}
/**
@@ -1725,31 +2174,39 @@ ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
/**
* ice_aq_get_rss_key
* @hw: pointer to the hw struct
- * @vsi_id: VSI FW index
+ * @vsi_handle: software VSI handle
* @key: pointer to key info struct
*
* get the RSS key per VSI
*/
enum ice_status
-ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_id,
+ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *key)
{
- return __ice_aq_get_set_rss_key(hw, vsi_id, key, false);
+ if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
+ return ICE_ERR_PARAM;
+
+ return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
+ key, false);
}
/**
* ice_aq_set_rss_key
* @hw: pointer to the hw struct
- * @vsi_id: VSI FW index
+ * @vsi_handle: software VSI handle
* @keys: pointer to key info struct
*
* set the RSS key per VSI
*/
enum ice_status
-ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_id,
+ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *keys)
{
- return __ice_aq_get_set_rss_key(hw, vsi_id, keys, true);
+ if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
+ return ICE_ERR_PARAM;
+
+ return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
+ keys, true);
}
/**
@@ -1820,6 +2277,8 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
* @num_qgrps: number of groups in the list
* @qg_list: the list of groups to disable
* @buf_size: the total size of the qg_list buffer in bytes
+ * @rst_src: if called due to reset, specifies the RST source
+ * @vmvf_num: the relative VM or VF number that is undergoing the reset
* @cd: pointer to command details structure or NULL
*
* Disable LAN Tx queue (0x0C31)
@@ -1827,6 +2286,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
static enum ice_status
ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
+ enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd)
{
struct ice_aqc_dis_txqs *cmd;
@@ -1836,14 +2296,45 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
cmd = &desc.params.dis_txqs;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
- if (!qg_list)
+ /* qg_list can be NULL only in VM/VF reset flow */
+ if (!qg_list && !rst_src)
return ICE_ERR_PARAM;
if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
return ICE_ERR_PARAM;
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
cmd->num_entries = num_qgrps;
+ cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
+ ICE_AQC_Q_DIS_TIMEOUT_M);
+
+ switch (rst_src) {
+ case ICE_VM_RESET:
+ cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
+ cmd->vmvf_and_timeout |=
+ cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
+ break;
+ case ICE_VF_RESET:
+ cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
+ /* In this case, FW expects vmvf_num to be absolute VF id */
+ cmd->vmvf_and_timeout |=
+ cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
+ ICE_AQC_Q_DIS_VMVF_NUM_M);
+ break;
+ case ICE_NO_RESET:
+ default:
+ break;
+ }
+
+ /* If no queue group info, we are in a reset flow. Issue the AQ */
+ if (!qg_list)
+ goto do_aq;
+
+ /* set RD bit to indicate that command buffer is provided by the driver
+ * and it needs to be read by the firmware
+ */
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
for (i = 0; i < num_qgrps; ++i) {
/* Calculate the size taken up by the queue IDs in this group */
sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
@@ -1859,6 +2350,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
if (buf_size != sz)
return ICE_ERR_PARAM;
+do_aq:
return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
}
@@ -2088,7 +2580,7 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/**
* ice_ena_vsi_txq
* @pi: port information structure
- * @vsi_id: VSI id
+ * @vsi_handle: software VSI handle
* @tc: tc number
* @num_qgrps: Number of added queue groups
* @buf: list of queue groups to be added
@@ -2098,7 +2590,7 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
* This function adds one lan q
*/
enum ice_status
-ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_id, u8 tc, u8 num_qgrps,
+ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
@@ -2115,15 +2607,19 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_id, u8 tc, u8 num_qgrps,
hw = pi->hw;
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
mutex_lock(&pi->sched_lock);
/* find a parent node */
- parent = ice_sched_get_free_qparent(pi, vsi_id, tc,
+ parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
ICE_SCHED_NODE_OWNER_LAN);
if (!parent) {
status = ICE_ERR_PARAM;
goto ena_txq_exit;
}
+
buf->parent_teid = parent->info.node_teid;
node.parent_teid = parent->info.node_teid;
/* Mark that the values in the "generic" section as valid. The default
@@ -2161,13 +2657,16 @@ ena_txq_exit:
* @num_queues: number of queues
* @q_ids: pointer to the q_id array
* @q_teids: pointer to queue node teids
+ * @rst_src: if called due to reset, specifies the RST source
+ * @vmvf_num: the relative VM or VF number that is undergoing the reset
* @cd: pointer to command details structure or NULL
*
* This function removes queues and their corresponding nodes in SW DB
*/
enum ice_status
ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
- u32 *q_teids, struct ice_sq_cd *cd)
+ u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num,
+ struct ice_sq_cd *cd)
{
enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
struct ice_aqc_dis_txq_item qg_list;
@@ -2176,6 +2675,15 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return ICE_ERR_CFG;
+ /* if queue is disabled already yet the disable queue command has to be
+ * sent to complete the VF reset, then call ice_aq_dis_lan_txq without
+ * any queue information
+ */
+
+ if (!num_queues && rst_src)
+ return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src, vmvf_num,
+ NULL);
+
mutex_lock(&pi->sched_lock);
for (i = 0; i < num_queues; i++) {
@@ -2188,7 +2696,8 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
qg_list.num_qs = 1;
qg_list.q_id[0] = cpu_to_le16(q_ids[i]);
status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
- sizeof(qg_list), cd);
+ sizeof(qg_list), rst_src, vmvf_num,
+ cd);
if (status)
break;
@@ -2201,7 +2710,7 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
/**
* ice_cfg_vsi_qs - configure the new/exisiting VSI queues
* @pi: port information structure
- * @vsi_id: VSI Id
+ * @vsi_handle: software VSI handle
* @tc_bitmap: TC bitmap
* @maxqs: max queues array per TC
* @owner: lan or rdma
@@ -2209,7 +2718,7 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
* This function adds/updates the VSI queues per TC.
*/
static enum ice_status
-ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
+ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
u16 *maxqs, u8 owner)
{
enum ice_status status = 0;
@@ -2218,6 +2727,9 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return ICE_ERR_CFG;
+ if (!ice_is_vsi_valid(pi->hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
mutex_lock(&pi->sched_lock);
for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
@@ -2225,7 +2737,7 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
if (!ice_sched_get_tc_node(pi, i))
continue;
- status = ice_sched_cfg_vsi(pi, vsi_id, i, maxqs[i], owner,
+ status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
ice_is_tc_ena(tc_bitmap, i));
if (status)
break;
@@ -2238,16 +2750,140 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
/**
* ice_cfg_vsi_lan - configure VSI lan queues
* @pi: port information structure
- * @vsi_id: VSI Id
+ * @vsi_handle: software VSI handle
* @tc_bitmap: TC bitmap
* @max_lanqs: max lan queues array per TC
*
* This function adds/updates the VSI lan queues per TC.
*/
enum ice_status
-ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
+ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
u16 *max_lanqs)
{
- return ice_cfg_vsi_qs(pi, vsi_id, tc_bitmap, max_lanqs,
+ return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
ICE_SCHED_NODE_OWNER_LAN);
}
+
+/**
+ * ice_replay_pre_init - replay pre initialization
+ * @hw: pointer to the hw struct
+ *
+ * Initializes required config data for VSI, FD, ACL, and RSS before replay.
+ */
+static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ u8 i;
+
+ /* Delete old entries from replay filter list head if there is any */
+ ice_rm_all_sw_replay_rule_info(hw);
+ /* In start of replay, move entries into replay_rules list, it
+ * will allow adding rules entries back to filt_rules list,
+ * which is operational list.
+ */
+ for (i = 0; i < ICE_SW_LKUP_LAST; i++)
+ list_replace_init(&sw->recp_list[i].filt_rules,
+ &sw->recp_list[i].filt_replay_rules);
+
+ return 0;
+}
+
+/**
+ * ice_replay_vsi - replay VSI configuration
+ * @hw: pointer to the hw struct
+ * @vsi_handle: driver VSI handle
+ *
+ * Restore all VSI configuration after reset. It is required to call this
+ * function with main VSI first.
+ */
+enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
+{
+ enum ice_status status;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ /* Replay pre-initialization if there is any */
+ if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
+ status = ice_replay_pre_init(hw);
+ if (status)
+ return status;
+ }
+
+ /* Replay per VSI all filters */
+ status = ice_replay_vsi_all_fltr(hw, vsi_handle);
+ return status;
+}
+
+/**
+ * ice_replay_post - post replay configuration cleanup
+ * @hw: pointer to the hw struct
+ *
+ * Post replay cleanup.
+ */
+void ice_replay_post(struct ice_hw *hw)
+{
+ /* Delete old entries from replay filter list head */
+ ice_rm_all_sw_replay_rule_info(hw);
+}
+
+/**
+ * ice_stat_update40 - read 40 bit stat from the chip and update stat values
+ * @hw: ptr to the hardware info
+ * @hireg: high 32 bit HW register to read from
+ * @loreg: low 32 bit HW register to read from
+ * @prev_stat_loaded: bool to specify if previous stats are loaded
+ * @prev_stat: ptr to previous loaded stat value
+ * @cur_stat: ptr to current stat value
+ */
+void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
+ bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat)
+{
+ u64 new_data;
+
+ new_data = rd32(hw, loreg);
+ new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
+
+ /* device stats are not reset at PFR, they likely will not be zeroed
+ * when the driver starts. So save the first values read and use them as
+ * offsets to be subtracted from the raw values in order to report stats
+ * that count from zero.
+ */
+ if (!prev_stat_loaded)
+ *prev_stat = new_data;
+ if (new_data >= *prev_stat)
+ *cur_stat = new_data - *prev_stat;
+ else
+ /* to manage the potential roll-over */
+ *cur_stat = (new_data + BIT_ULL(40)) - *prev_stat;
+ *cur_stat &= 0xFFFFFFFFFFULL;
+}
+
+/**
+ * ice_stat_update32 - read 32 bit stat from the chip and update stat values
+ * @hw: ptr to the hardware info
+ * @reg: HW register to read from
+ * @prev_stat_loaded: bool to specify if previous stats are loaded
+ * @prev_stat: ptr to previous loaded stat value
+ * @cur_stat: ptr to current stat value
+ */
+void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
+ u64 *prev_stat, u64 *cur_stat)
+{
+ u32 new_data;
+
+ new_data = rd32(hw, reg);
+
+ /* device stats are not reset at PFR, they likely will not be zeroed
+ * when the driver starts. So save the first values read and use them as
+ * offsets to be subtracted from the raw values in order to report stats
+ * that count from zero.
+ */
+ if (!prev_stat_loaded)
+ *prev_stat = new_data;
+ if (new_data >= *prev_stat)
+ *cur_stat = new_data - *prev_stat;
+ else
+ /* to manage the potential roll-over */
+ *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 9a5519130af1..cf760c24a6aa 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -7,6 +7,7 @@
#include "ice.h"
#include "ice_type.h"
#include "ice_switch.h"
+#include <linux/avf/virtchnl.h>
void ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf,
u16 buf_len);
@@ -21,9 +22,10 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_rq_event_info *e, u16 *pending);
enum ice_status
ice_get_link_status(struct ice_port_info *pi, bool *link_up);
+enum ice_status ice_update_link_info(struct ice_port_info *pi);
enum ice_status
ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
- enum ice_aq_res_access_type access);
+ enum ice_aq_res_access_type access, u32 timeout);
void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res);
enum ice_status ice_init_nvm(struct ice_hw *hw);
enum ice_status
@@ -32,22 +34,26 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_sq_cd *cd);
void ice_clear_pxe_mode(struct ice_hw *hw);
enum ice_status ice_get_caps(struct ice_hw *hw);
+
+void ice_dev_onetime_setup(struct ice_hw *hw);
+
enum ice_status
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index);
enum ice_status
-ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
+ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, u8 *lut,
u16 lut_size);
enum ice_status
-ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
+ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, u8 *lut,
u16 lut_size);
enum ice_status
-ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_id,
+ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *keys);
enum ice_status
-ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_id,
+ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *keys);
+
bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);
enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
@@ -58,29 +64,43 @@ enum ice_status
ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc,
void *buf, u16 buf_size, struct ice_sq_cd *cd);
enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd);
+
+enum ice_status
+ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
+ struct ice_aqc_get_phy_caps_data *caps,
+ struct ice_sq_cd *cd);
+void
+ice_update_phy_type(u64 *phy_type_low, u16 link_speeds_bitmap);
enum ice_status
ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd);
enum ice_status ice_clear_pf_cfg(struct ice_hw *hw);
enum ice_status
-ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool atomic_restart);
+ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
+ struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd);
+enum ice_status
+ice_set_fc(struct ice_port_info *pi, u8 *aq_failures,
+ bool ena_auto_link_update);
+
enum ice_status
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd);
enum ice_status
-ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
- struct ice_link_status *link, struct ice_sq_cd *cd);
-enum ice_status
-ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
- struct ice_sq_cd *cd);
-enum ice_status
ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
- u32 *q_teids, struct ice_sq_cd *cmd_details);
+ u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num,
+ struct ice_sq_cd *cmd_details);
enum ice_status
-ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
+ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
u16 *max_lanqs);
enum ice_status
-ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_id, u8 tc, u8 num_qgrps,
+ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
struct ice_sq_cd *cd);
+enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
+void ice_replay_post(struct ice_hw *hw);
+void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
+void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
+ bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat);
+void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
+ u64 *prev_stat, u64 *cur_stat);
#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index 62be72fdc8f3..84c967294eaf 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -33,6 +33,36 @@ static void ice_adminq_init_regs(struct ice_hw *hw)
}
/**
+ * ice_mailbox_init_regs - Initialize Mailbox registers
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the alloc_sq and alloc_rq functions have already been called
+ */
+static void ice_mailbox_init_regs(struct ice_hw *hw)
+{
+ struct ice_ctl_q_info *cq = &hw->mailboxq;
+
+ /* set head and tail registers in our local struct */
+ cq->sq.head = PF_MBX_ATQH;
+ cq->sq.tail = PF_MBX_ATQT;
+ cq->sq.len = PF_MBX_ATQLEN;
+ cq->sq.bah = PF_MBX_ATQBAH;
+ cq->sq.bal = PF_MBX_ATQBAL;
+ cq->sq.len_mask = PF_MBX_ATQLEN_ATQLEN_M;
+ cq->sq.len_ena_mask = PF_MBX_ATQLEN_ATQENABLE_M;
+ cq->sq.head_mask = PF_MBX_ATQH_ATQH_M;
+
+ cq->rq.head = PF_MBX_ARQH;
+ cq->rq.tail = PF_MBX_ARQT;
+ cq->rq.len = PF_MBX_ARQLEN;
+ cq->rq.bah = PF_MBX_ARQBAH;
+ cq->rq.bal = PF_MBX_ARQBAL;
+ cq->rq.len_mask = PF_MBX_ARQLEN_ARQLEN_M;
+ cq->rq.len_ena_mask = PF_MBX_ARQLEN_ARQENABLE_M;
+ cq->rq.head_mask = PF_MBX_ARQH_ARQH_M;
+}
+
+/**
* ice_check_sq_alive
* @hw: pointer to the hw struct
* @cq: pointer to the specific Control queue
@@ -518,22 +548,31 @@ shutdown_sq_out:
/**
* ice_aq_ver_check - Check the reported AQ API version.
- * @fw_branch: The "branch" of FW, typically describes the device type
- * @fw_major: The major version of the FW API
- * @fw_minor: The minor version increment of the FW API
+ * @hw: pointer to the hardware structure
*
* Checks if the driver should load on a given AQ API version.
*
* Return: 'true' iff the driver should attempt to load. 'false' otherwise.
*/
-static bool ice_aq_ver_check(u8 fw_branch, u8 fw_major, u8 fw_minor)
+static bool ice_aq_ver_check(struct ice_hw *hw)
{
- if (fw_branch != EXP_FW_API_VER_BRANCH)
- return false;
- if (fw_major != EXP_FW_API_VER_MAJOR)
- return false;
- if (fw_minor != EXP_FW_API_VER_MINOR)
+ if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
+ /* Major API version is newer than expected, don't load */
+ dev_warn(ice_hw_to_dev(hw),
+ "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
return false;
+ } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
+ if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
+ dev_info(ice_hw_to_dev(hw),
+ "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
+ else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
+ dev_info(ice_hw_to_dev(hw),
+ "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
+ } else {
+ /* Major API version is older than expected, log a warning */
+ dev_info(ice_hw_to_dev(hw),
+ "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
+ }
return true;
}
@@ -588,8 +627,7 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
if (status)
goto init_ctrlq_free_rq;
- if (!ice_aq_ver_check(hw->api_branch, hw->api_maj_ver,
- hw->api_min_ver)) {
+ if (!ice_aq_ver_check(hw)) {
status = ICE_ERR_FW_API_VER;
goto init_ctrlq_free_rq;
}
@@ -597,11 +635,11 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
return 0;
init_ctrlq_free_rq:
- if (cq->rq.head) {
+ if (cq->rq.count) {
ice_shutdown_rq(hw, cq);
mutex_destroy(&cq->rq_lock);
}
- if (cq->sq.head) {
+ if (cq->sq.count) {
ice_shutdown_sq(hw, cq);
mutex_destroy(&cq->sq_lock);
}
@@ -631,6 +669,10 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
ice_adminq_init_regs(hw);
cq = &hw->adminq;
break;
+ case ICE_CTL_Q_MAILBOX:
+ ice_mailbox_init_regs(hw);
+ cq = &hw->mailboxq;
+ break;
default:
return ICE_ERR_PARAM;
}
@@ -688,7 +730,12 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
if (ret_code)
return ret_code;
- return ice_init_check_adminq(hw);
+ ret_code = ice_init_check_adminq(hw);
+ if (ret_code)
+ return ret_code;
+
+ /* Init Mailbox queue */
+ return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
}
/**
@@ -706,15 +753,18 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
if (ice_check_sq_alive(hw, cq))
ice_aq_q_shutdown(hw, true);
break;
+ case ICE_CTL_Q_MAILBOX:
+ cq = &hw->mailboxq;
+ break;
default:
return;
}
- if (cq->sq.head) {
+ if (cq->sq.count) {
ice_shutdown_sq(hw, cq);
mutex_destroy(&cq->sq_lock);
}
- if (cq->rq.head) {
+ if (cq->rq.count) {
ice_shutdown_rq(hw, cq);
mutex_destroy(&cq->rq_lock);
}
@@ -728,6 +778,8 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw)
{
/* Shutdown FW admin queue */
ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
+ /* Shutdown PF-VF Mailbox */
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
}
/**
@@ -806,6 +858,9 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
u16 retval = 0;
u32 val = 0;
+ /* if reset is in progress return a soft error */
+ if (hw->reset_ongoing)
+ return ICE_ERR_RESET_ONGOING;
mutex_lock(&cq->sq_lock);
cq->sq_last_status = ICE_AQ_RC_OK;
@@ -847,7 +902,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
if (cd)
- memcpy(details, cd, sizeof(*details));
+ *details = *cd;
else
memset(details, 0, sizeof(*details));
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index ea02b89243e2..0038a4109c99 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -8,6 +8,7 @@
/* Maximum buffer lengths for all control queue types */
#define ICE_AQ_MAX_BUF_LEN 4096
+#define ICE_MBXQ_MAX_BUF_LEN 4096
#define ICE_CTL_Q_DESC(R, i) \
(&(((struct ice_aq_desc *)((R).desc_buf.va))[i]))
@@ -18,16 +19,16 @@
/* Defines that help manage the driver vs FW API checks.
* Take a look at ice_aq_ver_check in ice_controlq.c for actual usage.
- *
*/
#define EXP_FW_API_VER_BRANCH 0x00
-#define EXP_FW_API_VER_MAJOR 0x00
-#define EXP_FW_API_VER_MINOR 0x01
+#define EXP_FW_API_VER_MAJOR 0x01
+#define EXP_FW_API_VER_MINOR 0x03
/* Different control queue types: These are mainly for SW consumption. */
enum ice_ctl_q {
ICE_CTL_Q_UNKNOWN = 0,
ICE_CTL_Q_ADMIN,
+ ICE_CTL_Q_MAILBOX,
};
/* Control Queue default settings */
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
index 0e14d7215a6e..f8d5c661d0ba 100644
--- a/drivers/net/ethernet/intel/ice/ice_devids.h
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
@@ -5,15 +5,11 @@
#define _ICE_DEVIDS_H_
/* Device IDs */
-/* Intel(R) Ethernet Controller C810 for backplane */
-#define ICE_DEV_ID_C810_BACKPLANE 0x1591
-/* Intel(R) Ethernet Controller C810 for QSFP */
-#define ICE_DEV_ID_C810_QSFP 0x1592
-/* Intel(R) Ethernet Controller C810 for SFP */
-#define ICE_DEV_ID_C810_SFP 0x1593
-/* Intel(R) Ethernet Controller C810/X557-AT 10GBASE-T */
-#define ICE_DEV_ID_C810_10G_BASE_T 0x1594
-/* Intel(R) Ethernet Controller C810 1GbE */
-#define ICE_DEV_ID_C810_SGMII 0x1595
+/* Intel(R) Ethernet Controller E810-C for backplane */
+#define ICE_DEV_ID_E810C_BACKPLANE 0x1591
+/* Intel(R) Ethernet Controller E810-C for QSFP */
+#define ICE_DEV_ID_E810C_QSFP 0x1592
+/* Intel(R) Ethernet Controller E810-C for SFP */
+#define ICE_DEV_ID_E810C_SFP 0x1593
#endif /* _ICE_DEVIDS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index c71a9b528d6d..96923580f2a6 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -332,58 +332,473 @@ ice_get_ethtool_stats(struct net_device *netdev,
}
}
-static int
-ice_get_link_ksettings(struct net_device *netdev,
- struct ethtool_link_ksettings *ks)
+/**
+ * ice_phy_type_to_ethtool - convert the phy_types to ethtool link modes
+ * @netdev: network interface device structure
+ * @ks: ethtool link ksettings struct to fill out
+ */
+static void ice_phy_type_to_ethtool(struct net_device *netdev,
+ struct ethtool_link_ksettings *ks)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_link_status *hw_link_info;
struct ice_vsi *vsi = np->vsi;
- bool link_up;
+ u64 phy_types_low;
hw_link_info = &vsi->port_info->phy.link_info;
- link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
+ phy_types_low = vsi->port_info->phy.phy_type_low;
+
+ ethtool_link_ksettings_zero_link_mode(ks, supported);
+ ethtool_link_ksettings_zero_link_mode(ks, advertising);
+
+ if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX ||
+ phy_types_low & ICE_PHY_TYPE_LOW_100M_SGMII) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100baseT_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100MB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 100baseT_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_T ||
+ phy_types_low & ICE_PHY_TYPE_LOW_1G_SGMII) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseT_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 1000baseT_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseKX_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 1000baseKX_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_SX ||
+ phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_LX) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseX_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 1000baseX_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 2500baseT_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 2500baseT_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_X ||
+ phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 2500baseX_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 2500baseX_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_T ||
+ phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 5000baseT_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_5GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 5000baseT_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_T ||
+ phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_DA ||
+ phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC ||
+ phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_C2C) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseT_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseT_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseKR_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseKR_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_SR) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseSR_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseSR_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_LR) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseLR_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseLR_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_T ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR_S ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR1 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25G_AUI_C2C) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseCR_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 25000baseCR_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_SR ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_LR) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseSR_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 25000baseSR_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR_S ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseKR_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 25000baseKR_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseKR4_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseKR4_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_CR4 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC ||
+ phy_types_low & ICE_PHY_TYPE_LOW_40G_XLAUI) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseCR4_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseCR4_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_SR4) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseSR4_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseSR4_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_LR4) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseLR4_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseLR4_Full);
+ }
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 10000baseT_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 10000baseT_Full);
+ /* Autoneg PHY types */
+ if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX ||
+ phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_T ||
+ phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX ||
+ phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T ||
+ phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX ||
+ phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_T ||
+ phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR ||
+ phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_T ||
+ phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_T ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR_S ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR1 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR_S ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_CR4 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ Autoneg);
+ }
+}
- /* set speed and duplex */
- if (link_up) {
- switch (hw_link_info->link_speed) {
- case ICE_AQ_LINK_SPEED_100MB:
- ks->base.speed = SPEED_100;
- break;
- case ICE_AQ_LINK_SPEED_2500MB:
- ks->base.speed = SPEED_2500;
- break;
- case ICE_AQ_LINK_SPEED_5GB:
- ks->base.speed = SPEED_5000;
- break;
- case ICE_AQ_LINK_SPEED_10GB:
- ks->base.speed = SPEED_10000;
- break;
- case ICE_AQ_LINK_SPEED_25GB:
- ks->base.speed = SPEED_25000;
- break;
- case ICE_AQ_LINK_SPEED_40GB:
- ks->base.speed = SPEED_40000;
- break;
- default:
- ks->base.speed = SPEED_UNKNOWN;
- break;
- }
+#define TEST_SET_BITS_TIMEOUT 50
+#define TEST_SET_BITS_SLEEP_MAX 2000
+#define TEST_SET_BITS_SLEEP_MIN 1000
- ks->base.duplex = DUPLEX_FULL;
- } else {
- ks->base.speed = SPEED_UNKNOWN;
- ks->base.duplex = DUPLEX_UNKNOWN;
+/**
+ * ice_get_settings_link_up - Get Link settings for when link is up
+ * @ks: ethtool ksettings to fill in
+ * @netdev: network interface device structure
+ */
+static void ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
+ struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ethtool_link_ksettings cap_ksettings;
+ struct ice_link_status *link_info;
+ struct ice_vsi *vsi = np->vsi;
+ bool unrecog_phy_low = false;
+
+ link_info = &vsi->port_info->phy.link_info;
+
+ /* Initialize supported and advertised settings based on phy settings */
+ switch (link_info->phy_type_low) {
+ case ICE_PHY_TYPE_LOW_100BASE_TX:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 100baseT_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_100M_SGMII:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100baseT_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_1000BASE_T:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 1000baseT_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_1G_SGMII:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseT_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_1000BASE_SX:
+ case ICE_PHY_TYPE_LOW_1000BASE_LX:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseX_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_1000BASE_KX:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseKX_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 1000baseKX_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_2500BASE_T:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 2500baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 2500baseT_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_2500BASE_X:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 2500baseX_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_2500BASE_KX:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 2500baseX_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 2500baseX_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_5GBASE_T:
+ case ICE_PHY_TYPE_LOW_5GBASE_KR:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 5000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 5000baseT_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_10GBASE_T:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseT_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_10G_SFI_DA:
+ case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseT_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_10GBASE_SR:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseSR_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_10GBASE_LR:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseLR_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseKR_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_25GBASE_T:
+ case ICE_PHY_TYPE_LOW_25GBASE_CR:
+ case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
+ case ICE_PHY_TYPE_LOW_25GBASE_CR1:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseCR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 25000baseCR_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseCR_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_25GBASE_SR:
+ case ICE_PHY_TYPE_LOW_25GBASE_LR:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseSR_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_25GBASE_KR:
+ case ICE_PHY_TYPE_LOW_25GBASE_KR1:
+ case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 25000baseKR_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_40GBASE_CR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseCR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseCR4_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_40G_XLAUI:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseCR4_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_40GBASE_SR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseSR4_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_40GBASE_LR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseLR4_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_40GBASE_KR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseKR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseKR4_Full);
+ break;
+ default:
+ unrecog_phy_low = true;
+ }
+
+ if (unrecog_phy_low) {
+ /* if we got here and link is up something bad is afoot */
+ netdev_info(netdev, "WARNING: Unrecognized PHY_Low (0x%llx).\n",
+ (u64)link_info->phy_type_low);
}
+ /* Now that we've worked out everything that could be supported by the
+ * current PHY type, get what is supported by the NVM and intersect
+ * them to get what is truly supported
+ */
+ memset(&cap_ksettings, 0, sizeof(struct ethtool_link_ksettings));
+ ice_phy_type_to_ethtool(netdev, &cap_ksettings);
+ ethtool_intersect_link_masks(ks, &cap_ksettings);
+
+ switch (link_info->link_speed) {
+ case ICE_AQ_LINK_SPEED_40GB:
+ ks->base.speed = SPEED_40000;
+ break;
+ case ICE_AQ_LINK_SPEED_25GB:
+ ks->base.speed = SPEED_25000;
+ break;
+ case ICE_AQ_LINK_SPEED_20GB:
+ ks->base.speed = SPEED_20000;
+ break;
+ case ICE_AQ_LINK_SPEED_10GB:
+ ks->base.speed = SPEED_10000;
+ break;
+ case ICE_AQ_LINK_SPEED_5GB:
+ ks->base.speed = SPEED_5000;
+ break;
+ case ICE_AQ_LINK_SPEED_2500MB:
+ ks->base.speed = SPEED_2500;
+ break;
+ case ICE_AQ_LINK_SPEED_1000MB:
+ ks->base.speed = SPEED_1000;
+ break;
+ case ICE_AQ_LINK_SPEED_100MB:
+ ks->base.speed = SPEED_100;
+ break;
+ default:
+ netdev_info(netdev,
+ "WARNING: Unrecognized link_speed (0x%x).\n",
+ link_info->link_speed);
+ break;
+ }
+ ks->base.duplex = DUPLEX_FULL;
+}
+
+/**
+ * ice_get_settings_link_down - Get the Link settings when link is down
+ * @ks: ethtool ksettings to fill in
+ * @netdev: network interface device structure
+ *
+ * Reports link settings that can be determined when link is down
+ */
+static void
+ice_get_settings_link_down(struct ethtool_link_ksettings *ks,
+ struct net_device __always_unused *netdev)
+{
+ /* link is down and the driver needs to fall back on
+ * supported phy types to figure out what info to display
+ */
+ ice_phy_type_to_ethtool(netdev, ks);
+
+ /* With no link, speed and duplex are unknown */
+ ks->base.speed = SPEED_UNKNOWN;
+ ks->base.duplex = DUPLEX_UNKNOWN;
+}
+
+/**
+ * ice_get_link_ksettings - Get Link Speed and Duplex settings
+ * @netdev: network interface device structure
+ * @ks: ethtool ksettings
+ *
+ * Reports speed/duplex settings based on media_type
+ */
+static int ice_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *ks)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_link_status *hw_link_info;
+ struct ice_vsi *vsi = np->vsi;
+
+ ethtool_link_ksettings_zero_link_mode(ks, supported);
+ ethtool_link_ksettings_zero_link_mode(ks, advertising);
+ hw_link_info = &vsi->port_info->phy.link_info;
+
+ /* set speed and duplex */
+ if (hw_link_info->link_info & ICE_AQ_LINK_UP)
+ ice_get_settings_link_up(ks, netdev);
+ else
+ ice_get_settings_link_down(ks, netdev);
+
/* set autoneg settings */
- ks->base.autoneg = ((hw_link_info->an_info & ICE_AQ_AN_COMPLETED) ?
- AUTONEG_ENABLE : AUTONEG_DISABLE);
+ ks->base.autoneg = (hw_link_info->an_info & ICE_AQ_AN_COMPLETED) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE;
/* set media type settings */
switch (vsi->port_info->phy.media_type) {
@@ -442,6 +857,311 @@ ice_get_link_ksettings(struct net_device *netdev,
}
/**
+ * ice_ksettings_find_adv_link_speed - Find advertising link speed
+ * @ks: ethtool ksettings
+ */
+static u16
+ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks)
+{
+ u16 adv_link_speed = 0;
+
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 100baseT_Full))
+ adv_link_speed |= ICE_AQ_LINK_SPEED_100MB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 1000baseX_Full))
+ adv_link_speed |= ICE_AQ_LINK_SPEED_1000MB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 1000baseT_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 1000baseKX_Full))
+ adv_link_speed |= ICE_AQ_LINK_SPEED_1000MB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 2500baseT_Full))
+ adv_link_speed |= ICE_AQ_LINK_SPEED_2500MB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 2500baseX_Full))
+ adv_link_speed |= ICE_AQ_LINK_SPEED_2500MB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 5000baseT_Full))
+ adv_link_speed |= ICE_AQ_LINK_SPEED_5GB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 10000baseT_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 10000baseKR_Full))
+ adv_link_speed |= ICE_AQ_LINK_SPEED_10GB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 10000baseSR_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 10000baseLR_Full))
+ adv_link_speed |= ICE_AQ_LINK_SPEED_10GB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 25000baseCR_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 25000baseSR_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 25000baseKR_Full))
+ adv_link_speed |= ICE_AQ_LINK_SPEED_25GB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 40000baseCR4_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 40000baseSR4_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 40000baseLR4_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 40000baseKR4_Full))
+ adv_link_speed |= ICE_AQ_LINK_SPEED_40GB;
+
+ return adv_link_speed;
+}
+
+/**
+ * ice_setup_autoneg
+ * @p: port info
+ * @ks: ethtool_link_ksettings
+ * @config: configuration that will be sent down to FW
+ * @autoneg_enabled: autonegotiation is enabled or not
+ * @autoneg_changed: will there a change in autonegotiation
+ * @netdev: network interface device structure
+ *
+ * Setup PHY autonegotiation feature
+ */
+static int
+ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks,
+ struct ice_aqc_set_phy_cfg_data *config,
+ u8 autoneg_enabled, u8 *autoneg_changed,
+ struct net_device *netdev)
+{
+ int err = 0;
+
+ *autoneg_changed = 0;
+
+ /* Check autoneg */
+ if (autoneg_enabled == AUTONEG_ENABLE) {
+ /* If autoneg was not already enabled */
+ if (!(p->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)) {
+ /* If autoneg is not supported, return error */
+ if (!ethtool_link_ksettings_test_link_mode(ks,
+ supported,
+ Autoneg)) {
+ netdev_info(netdev, "Autoneg not supported on this phy.\n");
+ err = -EINVAL;
+ } else {
+ /* Autoneg is allowed to change */
+ config->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+ *autoneg_changed = 1;
+ }
+ }
+ } else {
+ /* If autoneg is currently enabled */
+ if (p->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) {
+ /* If autoneg is supported 10GBASE_T is the only phy
+ * that can disable it, so otherwise return error
+ */
+ if (ethtool_link_ksettings_test_link_mode(ks,
+ supported,
+ Autoneg)) {
+ netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
+ err = -EINVAL;
+ } else {
+ /* Autoneg is allowed to change */
+ config->caps &= ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+ *autoneg_changed = 1;
+ }
+ }
+ }
+
+ return err;
+}
+
+/**
+ * ice_set_link_ksettings - Set Speed and Duplex
+ * @netdev: network interface device structure
+ * @ks: ethtool ksettings
+ *
+ * Set speed/duplex per media_types advertised/forced
+ */
+static int ice_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *ks)
+{
+ u8 autoneg, timeout = TEST_SET_BITS_TIMEOUT, lport = 0;
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ethtool_link_ksettings safe_ks, copy_ks;
+ struct ice_aqc_get_phy_caps_data *abilities;
+ u16 adv_link_speed, curr_link_speed, idx;
+ struct ice_aqc_set_phy_cfg_data config;
+ struct ice_pf *pf = np->vsi->back;
+ struct ice_port_info *p;
+ u8 autoneg_changed = 0;
+ enum ice_status status;
+ u64 phy_type_low;
+ int err = 0;
+ bool linkup;
+
+ p = np->vsi->port_info;
+
+ if (!p)
+ return -EOPNOTSUPP;
+
+ /* Check if this is lan vsi */
+ for (idx = 0 ; idx < pf->num_alloc_vsi ; idx++) {
+ if (pf->vsi[idx]->type == ICE_VSI_PF) {
+ if (np->vsi != pf->vsi[idx])
+ return -EOPNOTSUPP;
+ break;
+ }
+ }
+
+ if (p->phy.media_type != ICE_MEDIA_BASET &&
+ p->phy.media_type != ICE_MEDIA_FIBER &&
+ p->phy.media_type != ICE_MEDIA_BACKPLANE &&
+ p->phy.media_type != ICE_MEDIA_DA &&
+ p->phy.link_info.link_info & ICE_AQ_LINK_UP)
+ return -EOPNOTSUPP;
+
+ /* copy the ksettings to copy_ks to avoid modifying the original */
+ memcpy(&copy_ks, ks, sizeof(struct ethtool_link_ksettings));
+
+ /* save autoneg out of ksettings */
+ autoneg = copy_ks.base.autoneg;
+
+ memset(&safe_ks, 0, sizeof(safe_ks));
+
+ /* Get link modes supported by hardware.*/
+ ice_phy_type_to_ethtool(netdev, &safe_ks);
+
+ /* and check against modes requested by user.
+ * Return an error if unsupported mode was set.
+ */
+ if (!bitmap_subset(copy_ks.link_modes.advertising,
+ safe_ks.link_modes.supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS))
+ return -EINVAL;
+
+ /* get our own copy of the bits to check against */
+ memset(&safe_ks, 0, sizeof(struct ethtool_link_ksettings));
+ safe_ks.base.cmd = copy_ks.base.cmd;
+ safe_ks.base.link_mode_masks_nwords =
+ copy_ks.base.link_mode_masks_nwords;
+ ice_get_link_ksettings(netdev, &safe_ks);
+
+ /* set autoneg back to what it currently is */
+ copy_ks.base.autoneg = safe_ks.base.autoneg;
+ /* we don't compare the speed */
+ copy_ks.base.speed = safe_ks.base.speed;
+
+ /* If copy_ks.base and safe_ks.base are not the same now, then they are
+ * trying to set something that we do not support.
+ */
+ if (memcmp(&copy_ks.base, &safe_ks.base,
+ sizeof(struct ethtool_link_settings)))
+ return -EOPNOTSUPP;
+
+ while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) {
+ timeout--;
+ if (!timeout)
+ return -EBUSY;
+ usleep_range(TEST_SET_BITS_SLEEP_MIN, TEST_SET_BITS_SLEEP_MAX);
+ }
+
+ abilities = devm_kzalloc(&pf->pdev->dev, sizeof(*abilities),
+ GFP_KERNEL);
+ if (!abilities)
+ return -ENOMEM;
+
+ /* Get the current phy config */
+ status = ice_aq_get_phy_caps(p, false, ICE_AQC_REPORT_SW_CFG, abilities,
+ NULL);
+ if (status) {
+ err = -EAGAIN;
+ goto done;
+ }
+
+ /* Copy abilities to config in case autoneg is not set below */
+ memset(&config, 0, sizeof(struct ice_aqc_set_phy_cfg_data));
+ config.caps = abilities->caps & ~ICE_AQC_PHY_AN_MODE;
+ if (abilities->caps & ICE_AQC_PHY_AN_MODE)
+ config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+
+ /* Check autoneg */
+ err = ice_setup_autoneg(p, &safe_ks, &config, autoneg, &autoneg_changed,
+ netdev);
+
+ if (err)
+ goto done;
+
+ /* Call to get the current link speed */
+ p->phy.get_link_info = true;
+ status = ice_get_link_status(p, &linkup);
+ if (status) {
+ err = -EAGAIN;
+ goto done;
+ }
+
+ curr_link_speed = p->phy.link_info.link_speed;
+ adv_link_speed = ice_ksettings_find_adv_link_speed(ks);
+
+ /* If speed didn't get set, set it to what it currently is.
+ * This is needed because if advertise is 0 (as it is when autoneg
+ * is disabled) then speed won't get set.
+ */
+ if (!adv_link_speed)
+ adv_link_speed = curr_link_speed;
+
+ /* Convert the advertise link speeds to their corresponded PHY_TYPE */
+ ice_update_phy_type(&phy_type_low, adv_link_speed);
+
+ if (!autoneg_changed && adv_link_speed == curr_link_speed) {
+ netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
+ goto done;
+ }
+
+ /* copy over the rest of the abilities */
+ config.low_power_ctrl = abilities->low_power_ctrl;
+ config.eee_cap = abilities->eee_cap;
+ config.eeer_value = abilities->eeer_value;
+ config.link_fec_opt = abilities->link_fec_options;
+
+ /* save the requested speeds */
+ p->phy.link_info.req_speeds = adv_link_speed;
+
+ /* set link and auto negotiation so changes take effect */
+ config.caps |= ICE_AQ_PHY_ENA_LINK;
+
+ if (phy_type_low) {
+ config.phy_type_low = cpu_to_le64(phy_type_low) &
+ abilities->phy_type_low;
+ } else {
+ err = -EAGAIN;
+ netdev_info(netdev, "Nothing changed. No PHY_TYPE is corresponded to advertised link speed.\n");
+ goto done;
+ }
+
+ /* If link is up put link down */
+ if (p->phy.link_info.link_info & ICE_AQ_LINK_UP) {
+ /* Tell the OS link is going down, the link will go
+ * back up when fw says it is ready asynchronously
+ */
+ ice_print_link_msg(np->vsi, false);
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+ }
+
+ /* make the aq call */
+ status = ice_aq_set_phy_cfg(&pf->hw, lport, &config, NULL);
+ if (status) {
+ netdev_info(netdev, "Set phy config failed,\n");
+ err = -EAGAIN;
+ }
+
+done:
+ devm_kfree(&pf->pdev->dev, abilities);
+ clear_bit(__ICE_CFG_BUSY, pf->state);
+
+ return err;
+}
+
+/**
* ice_get_rxnfc - command to get RX flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
@@ -478,9 +1198,11 @@ ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
ring->tx_max_pending = ICE_MAX_NUM_DESC;
ring->rx_pending = vsi->rx_rings[0]->count;
ring->tx_pending = vsi->tx_rings[0]->count;
- ring->rx_mini_pending = ICE_MIN_NUM_DESC;
+
+ /* Rx mini and jumbo rings are not supported */
ring->rx_mini_max_pending = 0;
ring->rx_jumbo_max_pending = 0;
+ ring->rx_mini_pending = 0;
ring->rx_jumbo_pending = 0;
}
@@ -498,14 +1220,23 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
ring->tx_pending < ICE_MIN_NUM_DESC ||
ring->rx_pending > ICE_MAX_NUM_DESC ||
ring->rx_pending < ICE_MIN_NUM_DESC) {
- netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n",
+ netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n",
ring->tx_pending, ring->rx_pending,
- ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC);
+ ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC,
+ ICE_REQ_DESC_MULTIPLE);
return -EINVAL;
}
new_tx_cnt = ALIGN(ring->tx_pending, ICE_REQ_DESC_MULTIPLE);
+ if (new_tx_cnt != ring->tx_pending)
+ netdev_info(netdev,
+ "Requested Tx descriptor count rounded up to %d\n",
+ new_tx_cnt);
new_rx_cnt = ALIGN(ring->rx_pending, ICE_REQ_DESC_MULTIPLE);
+ if (new_rx_cnt != ring->rx_pending)
+ netdev_info(netdev,
+ "Requested Rx descriptor count rounded up to %d\n",
+ new_rx_cnt);
/* if nothing to do return success */
if (new_tx_cnt == vsi->tx_rings[0]->count &&
@@ -933,6 +1664,7 @@ static int ice_set_rxfh(struct net_device *netdev, const u32 *indir,
static const struct ethtool_ops ice_ethtool_ops = {
.get_link_ksettings = ice_get_link_ksettings,
+ .set_link_ksettings = ice_set_link_ksettings,
.get_drvinfo = ice_get_drvinfo,
.get_regs_len = ice_get_regs_len,
.get_regs = ice_get_regs,
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 6076fc87df9d..5fdea6ec7675 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -6,251 +6,331 @@
#ifndef _ICE_HW_AUTOGEN_H_
#define _ICE_HW_AUTOGEN_H_
-#define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4))
-#define PF_FW_ARQBAH 0x00080180
-#define PF_FW_ARQBAL 0x00080080
-#define PF_FW_ARQH 0x00080380
-#define PF_FW_ARQH_ARQH_S 0
-#define PF_FW_ARQH_ARQH_M ICE_M(0x3FF, PF_FW_ARQH_ARQH_S)
-#define PF_FW_ARQLEN 0x00080280
-#define PF_FW_ARQLEN_ARQLEN_S 0
-#define PF_FW_ARQLEN_ARQLEN_M ICE_M(0x3FF, PF_FW_ARQLEN_ARQLEN_S)
-#define PF_FW_ARQLEN_ARQVFE_S 28
-#define PF_FW_ARQLEN_ARQVFE_M BIT(PF_FW_ARQLEN_ARQVFE_S)
-#define PF_FW_ARQLEN_ARQOVFL_S 29
-#define PF_FW_ARQLEN_ARQOVFL_M BIT(PF_FW_ARQLEN_ARQOVFL_S)
-#define PF_FW_ARQLEN_ARQCRIT_S 30
-#define PF_FW_ARQLEN_ARQCRIT_M BIT(PF_FW_ARQLEN_ARQCRIT_S)
-#define PF_FW_ARQLEN_ARQENABLE_S 31
-#define PF_FW_ARQLEN_ARQENABLE_M BIT(PF_FW_ARQLEN_ARQENABLE_S)
-#define PF_FW_ARQT 0x00080480
-#define PF_FW_ATQBAH 0x00080100
-#define PF_FW_ATQBAL 0x00080000
-#define PF_FW_ATQH 0x00080300
-#define PF_FW_ATQH_ATQH_S 0
-#define PF_FW_ATQH_ATQH_M ICE_M(0x3FF, PF_FW_ATQH_ATQH_S)
-#define PF_FW_ATQLEN 0x00080200
-#define PF_FW_ATQLEN_ATQLEN_S 0
-#define PF_FW_ATQLEN_ATQLEN_M ICE_M(0x3FF, PF_FW_ATQLEN_ATQLEN_S)
-#define PF_FW_ATQLEN_ATQVFE_S 28
-#define PF_FW_ATQLEN_ATQVFE_M BIT(PF_FW_ATQLEN_ATQVFE_S)
-#define PF_FW_ATQLEN_ATQOVFL_S 29
-#define PF_FW_ATQLEN_ATQOVFL_M BIT(PF_FW_ATQLEN_ATQOVFL_S)
-#define PF_FW_ATQLEN_ATQCRIT_S 30
-#define PF_FW_ATQLEN_ATQCRIT_M BIT(PF_FW_ATQLEN_ATQCRIT_S)
-#define PF_FW_ATQLEN_ATQENABLE_S 31
-#define PF_FW_ATQLEN_ATQENABLE_M BIT(PF_FW_ATQLEN_ATQENABLE_S)
-#define PF_FW_ATQT 0x00080400
-
+#define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4))
+#define PF_FW_ARQBAH 0x00080180
+#define PF_FW_ARQBAL 0x00080080
+#define PF_FW_ARQH 0x00080380
+#define PF_FW_ARQH_ARQH_M ICE_M(0x3FF, 0)
+#define PF_FW_ARQLEN 0x00080280
+#define PF_FW_ARQLEN_ARQLEN_M ICE_M(0x3FF, 0)
+#define PF_FW_ARQLEN_ARQVFE_M BIT(28)
+#define PF_FW_ARQLEN_ARQOVFL_M BIT(29)
+#define PF_FW_ARQLEN_ARQCRIT_M BIT(30)
+#define PF_FW_ARQLEN_ARQENABLE_M BIT(31)
+#define PF_FW_ARQT 0x00080480
+#define PF_FW_ATQBAH 0x00080100
+#define PF_FW_ATQBAL 0x00080000
+#define PF_FW_ATQH 0x00080300
+#define PF_FW_ATQH_ATQH_M ICE_M(0x3FF, 0)
+#define PF_FW_ATQLEN 0x00080200
+#define PF_FW_ATQLEN_ATQLEN_M ICE_M(0x3FF, 0)
+#define PF_FW_ATQLEN_ATQVFE_M BIT(28)
+#define PF_FW_ATQLEN_ATQOVFL_M BIT(29)
+#define PF_FW_ATQLEN_ATQCRIT_M BIT(30)
+#define PF_FW_ATQLEN_ATQENABLE_M BIT(31)
+#define PF_FW_ATQT 0x00080400
+#define PF_MBX_ARQBAH 0x0022E400
+#define PF_MBX_ARQBAL 0x0022E380
+#define PF_MBX_ARQH 0x0022E500
+#define PF_MBX_ARQH_ARQH_M ICE_M(0x3FF, 0)
+#define PF_MBX_ARQLEN 0x0022E480
+#define PF_MBX_ARQLEN_ARQLEN_M ICE_M(0x3FF, 0)
+#define PF_MBX_ARQLEN_ARQENABLE_M BIT(31)
+#define PF_MBX_ARQT 0x0022E580
+#define PF_MBX_ATQBAH 0x0022E180
+#define PF_MBX_ATQBAL 0x0022E100
+#define PF_MBX_ATQH 0x0022E280
+#define PF_MBX_ATQH_ATQH_M ICE_M(0x3FF, 0)
+#define PF_MBX_ATQLEN 0x0022E200
+#define PF_MBX_ATQLEN_ATQLEN_M ICE_M(0x3FF, 0)
+#define PF_MBX_ATQLEN_ATQENABLE_M BIT(31)
+#define PF_MBX_ATQT 0x0022E300
#define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256))
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0
-#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, 0)
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S 8
-#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S)
+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M ICE_M(0x3F, 8)
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S 16
-#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S)
+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M ICE_M(0x3F, 16)
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S 24
-#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S)
+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M ICE_M(0x3F, 24)
#define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045c800 + ((_i) * 4))
#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_S 0
-#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_S)
+#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_M ICE_M(0xFF, 0)
#define GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_S 30
-#define GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_S)
+#define GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_M ICE_M(0x3, 30)
#define GLFLXP_RXDID_FLX_WRD_1(_i) (0x0045c900 + ((_i) * 4))
#define GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_S 0
-#define GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_S)
+#define GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_M ICE_M(0xFF, 0)
#define GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_S 30
-#define GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_S)
+#define GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_M ICE_M(0x3, 30)
#define GLFLXP_RXDID_FLX_WRD_2(_i) (0x0045ca00 + ((_i) * 4))
#define GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_S 0
-#define GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_S)
+#define GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_M ICE_M(0xFF, 0)
#define GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_S 30
-#define GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_S)
+#define GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_M ICE_M(0x3, 30)
#define GLFLXP_RXDID_FLX_WRD_3(_i) (0x0045cb00 + ((_i) * 4))
#define GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_S 0
-#define GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_S)
+#define GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_M ICE_M(0xFF, 0)
#define GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_S 30
-#define GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_S)
-
-#define QRXFLXP_CNTXT(_QRX) (0x00480000 + ((_QRX) * 4))
-#define QRXFLXP_CNTXT_RXDID_IDX_S 0
-#define QRXFLXP_CNTXT_RXDID_IDX_M ICE_M(0x3F, QRXFLXP_CNTXT_RXDID_IDX_S)
-#define QRXFLXP_CNTXT_RXDID_PRIO_S 8
-#define QRXFLXP_CNTXT_RXDID_PRIO_M ICE_M(0x7, QRXFLXP_CNTXT_RXDID_PRIO_S)
-#define QRXFLXP_CNTXT_TS_S 11
-#define QRXFLXP_CNTXT_TS_M BIT(QRXFLXP_CNTXT_TS_S)
-#define GLGEN_RSTAT 0x000B8188
-#define GLGEN_RSTAT_DEVSTATE_S 0
-#define GLGEN_RSTAT_DEVSTATE_M ICE_M(0x3, GLGEN_RSTAT_DEVSTATE_S)
-#define GLGEN_RSTCTL 0x000B8180
-#define GLGEN_RSTCTL_GRSTDEL_S 0
-#define GLGEN_RSTCTL_GRSTDEL_M ICE_M(0x3F, GLGEN_RSTCTL_GRSTDEL_S)
-#define GLGEN_RSTAT_RESET_TYPE_S 2
-#define GLGEN_RSTAT_RESET_TYPE_M ICE_M(0x3, GLGEN_RSTAT_RESET_TYPE_S)
-#define GLGEN_RTRIG 0x000B8190
-#define GLGEN_RTRIG_CORER_S 0
-#define GLGEN_RTRIG_CORER_M BIT(GLGEN_RTRIG_CORER_S)
-#define GLGEN_RTRIG_GLOBR_S 1
-#define GLGEN_RTRIG_GLOBR_M BIT(GLGEN_RTRIG_GLOBR_S)
-#define GLGEN_STAT 0x000B612C
-#define PFGEN_CTRL 0x00091000
-#define PFGEN_CTRL_PFSWR_S 0
-#define PFGEN_CTRL_PFSWR_M BIT(PFGEN_CTRL_PFSWR_S)
-#define PFGEN_STATE 0x00088000
-#define PRTGEN_STATUS 0x000B8100
-#define PFHMC_ERRORDATA 0x00520500
-#define PFHMC_ERRORINFO 0x00520400
-#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4))
-#define GLINT_DYN_CTL_INTENA_S 0
-#define GLINT_DYN_CTL_INTENA_M BIT(GLINT_DYN_CTL_INTENA_S)
-#define GLINT_DYN_CTL_CLEARPBA_S 1
-#define GLINT_DYN_CTL_CLEARPBA_M BIT(GLINT_DYN_CTL_CLEARPBA_S)
-#define GLINT_DYN_CTL_SWINT_TRIG_S 2
-#define GLINT_DYN_CTL_SWINT_TRIG_M BIT(GLINT_DYN_CTL_SWINT_TRIG_S)
-#define GLINT_DYN_CTL_ITR_INDX_S 3
-#define GLINT_DYN_CTL_SW_ITR_INDX_S 25
-#define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, GLINT_DYN_CTL_SW_ITR_INDX_S)
-#define GLINT_DYN_CTL_INTENA_MSK_S 31
-#define GLINT_DYN_CTL_INTENA_MSK_M BIT(GLINT_DYN_CTL_INTENA_MSK_S)
-#define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4))
-#define PFINT_FW_CTL 0x0016C800
-#define PFINT_FW_CTL_MSIX_INDX_S 0
-#define PFINT_FW_CTL_MSIX_INDX_M ICE_M(0x7FF, PFINT_FW_CTL_MSIX_INDX_S)
-#define PFINT_FW_CTL_ITR_INDX_S 11
-#define PFINT_FW_CTL_ITR_INDX_M ICE_M(0x3, PFINT_FW_CTL_ITR_INDX_S)
-#define PFINT_FW_CTL_CAUSE_ENA_S 30
-#define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S)
-#define PFINT_OICR 0x0016CA00
-#define PFINT_OICR_ECC_ERR_S 16
-#define PFINT_OICR_ECC_ERR_M BIT(PFINT_OICR_ECC_ERR_S)
-#define PFINT_OICR_MAL_DETECT_S 19
-#define PFINT_OICR_MAL_DETECT_M BIT(PFINT_OICR_MAL_DETECT_S)
-#define PFINT_OICR_GRST_S 20
-#define PFINT_OICR_GRST_M BIT(PFINT_OICR_GRST_S)
-#define PFINT_OICR_PCI_EXCEPTION_S 21
-#define PFINT_OICR_PCI_EXCEPTION_M BIT(PFINT_OICR_PCI_EXCEPTION_S)
-#define PFINT_OICR_HMC_ERR_S 26
-#define PFINT_OICR_HMC_ERR_M BIT(PFINT_OICR_HMC_ERR_S)
-#define PFINT_OICR_PE_CRITERR_S 28
-#define PFINT_OICR_PE_CRITERR_M BIT(PFINT_OICR_PE_CRITERR_S)
-#define PFINT_OICR_CTL 0x0016CA80
-#define PFINT_OICR_CTL_MSIX_INDX_S 0
-#define PFINT_OICR_CTL_MSIX_INDX_M ICE_M(0x7FF, PFINT_OICR_CTL_MSIX_INDX_S)
-#define PFINT_OICR_CTL_ITR_INDX_S 11
-#define PFINT_OICR_CTL_ITR_INDX_M ICE_M(0x3, PFINT_OICR_CTL_ITR_INDX_S)
-#define PFINT_OICR_CTL_CAUSE_ENA_S 30
-#define PFINT_OICR_CTL_CAUSE_ENA_M BIT(PFINT_OICR_CTL_CAUSE_ENA_S)
-#define PFINT_OICR_ENA 0x0016C900
-#define QINT_RQCTL(_QRX) (0x00150000 + ((_QRX) * 4))
-#define QINT_RQCTL_MSIX_INDX_S 0
-#define QINT_RQCTL_ITR_INDX_S 11
-#define QINT_RQCTL_CAUSE_ENA_S 30
-#define QINT_RQCTL_CAUSE_ENA_M BIT(QINT_RQCTL_CAUSE_ENA_S)
-#define QINT_TQCTL(_DBQM) (0x00140000 + ((_DBQM) * 4))
-#define QINT_TQCTL_MSIX_INDX_S 0
-#define QINT_TQCTL_ITR_INDX_S 11
-#define QINT_TQCTL_CAUSE_ENA_S 30
-#define QINT_TQCTL_CAUSE_ENA_M BIT(QINT_TQCTL_CAUSE_ENA_S)
-#define GLLAN_RCTL_0 0x002941F8
-#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4))
-#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4))
-#define QRX_CTRL_MAX_INDEX 2047
-#define QRX_CTRL_QENA_REQ_S 0
-#define QRX_CTRL_QENA_REQ_M BIT(QRX_CTRL_QENA_REQ_S)
-#define QRX_CTRL_QENA_STAT_S 2
-#define QRX_CTRL_QENA_STAT_M BIT(QRX_CTRL_QENA_STAT_S)
-#define QRX_ITR(_QRX) (0x00292000 + ((_QRX) * 4))
-#define QRX_TAIL(_QRX) (0x00290000 + ((_QRX) * 4))
-#define GLNVM_FLA 0x000B6108
-#define GLNVM_FLA_LOCKED_S 6
-#define GLNVM_FLA_LOCKED_M BIT(GLNVM_FLA_LOCKED_S)
-#define GLNVM_GENS 0x000B6100
-#define GLNVM_GENS_SR_SIZE_S 5
-#define GLNVM_GENS_SR_SIZE_M ICE_M(0x7, GLNVM_GENS_SR_SIZE_S)
-#define GLNVM_ULD 0x000B6008
-#define GLNVM_ULD_CORER_DONE_S 3
-#define GLNVM_ULD_CORER_DONE_M BIT(GLNVM_ULD_CORER_DONE_S)
-#define GLNVM_ULD_GLOBR_DONE_S 4
-#define GLNVM_ULD_GLOBR_DONE_M BIT(GLNVM_ULD_GLOBR_DONE_S)
-#define PF_FUNC_RID 0x0009E880
-#define PF_FUNC_RID_FUNC_NUM_S 0
-#define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, PF_FUNC_RID_FUNC_NUM_S)
-#define GLPRT_BPRCH(_i) (0x00381384 + ((_i) * 8))
-#define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8))
-#define GLPRT_BPTCH(_i) (0x00381244 + ((_i) * 8))
-#define GLPRT_BPTCL(_i) (0x00381240 + ((_i) * 8))
-#define GLPRT_CRCERRS(_i) (0x00380100 + ((_i) * 8))
-#define GLPRT_GORCH(_i) (0x00380004 + ((_i) * 8))
-#define GLPRT_GORCL(_i) (0x00380000 + ((_i) * 8))
-#define GLPRT_GOTCH(_i) (0x00380B44 + ((_i) * 8))
-#define GLPRT_GOTCL(_i) (0x00380B40 + ((_i) * 8))
-#define GLPRT_ILLERRC(_i) (0x003801C0 + ((_i) * 8))
-#define GLPRT_LXOFFRXC(_i) (0x003802C0 + ((_i) * 8))
-#define GLPRT_LXOFFTXC(_i) (0x00381180 + ((_i) * 8))
-#define GLPRT_LXONRXC(_i) (0x00380280 + ((_i) * 8))
-#define GLPRT_LXONTXC(_i) (0x00381140 + ((_i) * 8))
-#define GLPRT_MLFC(_i) (0x00380040 + ((_i) * 8))
-#define GLPRT_MPRCH(_i) (0x00381344 + ((_i) * 8))
-#define GLPRT_MPRCL(_i) (0x00381340 + ((_i) * 8))
-#define GLPRT_MPTCH(_i) (0x00381204 + ((_i) * 8))
-#define GLPRT_MPTCL(_i) (0x00381200 + ((_i) * 8))
-#define GLPRT_MRFC(_i) (0x00380080 + ((_i) * 8))
-#define GLPRT_PRC1023H(_i) (0x00380A04 + ((_i) * 8))
-#define GLPRT_PRC1023L(_i) (0x00380A00 + ((_i) * 8))
-#define GLPRT_PRC127H(_i) (0x00380944 + ((_i) * 8))
-#define GLPRT_PRC127L(_i) (0x00380940 + ((_i) * 8))
-#define GLPRT_PRC1522H(_i) (0x00380A44 + ((_i) * 8))
-#define GLPRT_PRC1522L(_i) (0x00380A40 + ((_i) * 8))
-#define GLPRT_PRC255H(_i) (0x00380984 + ((_i) * 8))
-#define GLPRT_PRC255L(_i) (0x00380980 + ((_i) * 8))
-#define GLPRT_PRC511H(_i) (0x003809C4 + ((_i) * 8))
-#define GLPRT_PRC511L(_i) (0x003809C0 + ((_i) * 8))
-#define GLPRT_PRC64H(_i) (0x00380904 + ((_i) * 8))
-#define GLPRT_PRC64L(_i) (0x00380900 + ((_i) * 8))
-#define GLPRT_PRC9522H(_i) (0x00380A84 + ((_i) * 8))
-#define GLPRT_PRC9522L(_i) (0x00380A80 + ((_i) * 8))
-#define GLPRT_PTC1023H(_i) (0x00380C84 + ((_i) * 8))
-#define GLPRT_PTC1023L(_i) (0x00380C80 + ((_i) * 8))
-#define GLPRT_PTC127H(_i) (0x00380BC4 + ((_i) * 8))
-#define GLPRT_PTC127L(_i) (0x00380BC0 + ((_i) * 8))
-#define GLPRT_PTC1522H(_i) (0x00380CC4 + ((_i) * 8))
-#define GLPRT_PTC1522L(_i) (0x00380CC0 + ((_i) * 8))
-#define GLPRT_PTC255H(_i) (0x00380C04 + ((_i) * 8))
-#define GLPRT_PTC255L(_i) (0x00380C00 + ((_i) * 8))
-#define GLPRT_PTC511H(_i) (0x00380C44 + ((_i) * 8))
-#define GLPRT_PTC511L(_i) (0x00380C40 + ((_i) * 8))
-#define GLPRT_PTC64H(_i) (0x00380B84 + ((_i) * 8))
-#define GLPRT_PTC64L(_i) (0x00380B80 + ((_i) * 8))
-#define GLPRT_PTC9522H(_i) (0x00380D04 + ((_i) * 8))
-#define GLPRT_PTC9522L(_i) (0x00380D00 + ((_i) * 8))
-#define GLPRT_RFC(_i) (0x00380AC0 + ((_i) * 8))
-#define GLPRT_RJC(_i) (0x00380B00 + ((_i) * 8))
-#define GLPRT_RLEC(_i) (0x00380140 + ((_i) * 8))
-#define GLPRT_ROC(_i) (0x00380240 + ((_i) * 8))
-#define GLPRT_RUC(_i) (0x00380200 + ((_i) * 8))
-#define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8))
-#define GLPRT_UPRCH(_i) (0x00381304 + ((_i) * 8))
-#define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8))
-#define GLPRT_UPTCH(_i) (0x003811C4 + ((_i) * 8))
-#define GLPRT_UPTCL(_i) (0x003811C0 + ((_i) * 8))
-#define GLV_BPRCH(_i) (0x003B6004 + ((_i) * 8))
-#define GLV_BPRCL(_i) (0x003B6000 + ((_i) * 8))
-#define GLV_BPTCH(_i) (0x0030E004 + ((_i) * 8))
-#define GLV_BPTCL(_i) (0x0030E000 + ((_i) * 8))
-#define GLV_GORCH(_i) (0x003B0004 + ((_i) * 8))
-#define GLV_GORCL(_i) (0x003B0000 + ((_i) * 8))
-#define GLV_GOTCH(_i) (0x00300004 + ((_i) * 8))
-#define GLV_GOTCL(_i) (0x00300000 + ((_i) * 8))
-#define GLV_MPRCH(_i) (0x003B4004 + ((_i) * 8))
-#define GLV_MPRCL(_i) (0x003B4000 + ((_i) * 8))
-#define GLV_MPTCH(_i) (0x0030C004 + ((_i) * 8))
-#define GLV_MPTCL(_i) (0x0030C000 + ((_i) * 8))
-#define GLV_RDPC(_i) (0x00294C04 + ((_i) * 4))
-#define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4))
-#define GLV_UPRCH(_i) (0x003B2004 + ((_i) * 8))
-#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8))
-#define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8))
-#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
-#define VSIQF_HKEY_MAX_INDEX 12
+#define GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_M ICE_M(0x3, 30)
+#define QRXFLXP_CNTXT(_QRX) (0x00480000 + ((_QRX) * 4))
+#define QRXFLXP_CNTXT_RXDID_IDX_S 0
+#define QRXFLXP_CNTXT_RXDID_IDX_M ICE_M(0x3F, 0)
+#define QRXFLXP_CNTXT_RXDID_PRIO_S 8
+#define QRXFLXP_CNTXT_RXDID_PRIO_M ICE_M(0x7, 8)
+#define GLGEN_RSTAT 0x000B8188
+#define GLGEN_RSTAT_DEVSTATE_M ICE_M(0x3, 0)
+#define GLGEN_RSTCTL 0x000B8180
+#define GLGEN_RSTCTL_GRSTDEL_S 0
+#define GLGEN_RSTCTL_GRSTDEL_M ICE_M(0x3F, GLGEN_RSTCTL_GRSTDEL_S)
+#define GLGEN_RSTAT_RESET_TYPE_S 2
+#define GLGEN_RSTAT_RESET_TYPE_M ICE_M(0x3, 2)
+#define GLGEN_RTRIG 0x000B8190
+#define GLGEN_RTRIG_CORER_M BIT(0)
+#define GLGEN_RTRIG_GLOBR_M BIT(1)
+#define GLGEN_STAT 0x000B612C
+#define GLGEN_VFLRSTAT(_i) (0x00093A04 + ((_i) * 4))
+#define PFGEN_CTRL 0x00091000
+#define PFGEN_CTRL_PFSWR_M BIT(0)
+#define PFGEN_STATE 0x00088000
+#define PRTGEN_STATUS 0x000B8100
+#define VFGEN_RSTAT(_VF) (0x00074000 + ((_VF) * 4))
+#define VPGEN_VFRSTAT(_VF) (0x00090800 + ((_VF) * 4))
+#define VPGEN_VFRSTAT_VFRD_M BIT(0)
+#define VPGEN_VFRTRIG(_VF) (0x00090000 + ((_VF) * 4))
+#define VPGEN_VFRTRIG_VFSWR_M BIT(0)
+#define PFHMC_ERRORDATA 0x00520500
+#define PFHMC_ERRORINFO 0x00520400
+#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4))
+#define GLINT_DYN_CTL_INTENA_M BIT(0)
+#define GLINT_DYN_CTL_CLEARPBA_M BIT(1)
+#define GLINT_DYN_CTL_SWINT_TRIG_M BIT(2)
+#define GLINT_DYN_CTL_ITR_INDX_S 3
+#define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, 25)
+#define GLINT_DYN_CTL_INTENA_MSK_M BIT(31)
+#define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4))
+#define GLINT_RATE(_INT) (0x0015A000 + ((_INT) * 4))
+#define GLINT_RATE_INTRL_ENA_M BIT(6)
+#define GLINT_VECT2FUNC(_INT) (0x00162000 + ((_INT) * 4))
+#define GLINT_VECT2FUNC_VF_NUM_S 0
+#define GLINT_VECT2FUNC_VF_NUM_M ICE_M(0xFF, 0)
+#define GLINT_VECT2FUNC_PF_NUM_S 12
+#define GLINT_VECT2FUNC_PF_NUM_M ICE_M(0x7, 12)
+#define GLINT_VECT2FUNC_IS_PF_S 16
+#define GLINT_VECT2FUNC_IS_PF_M BIT(16)
+#define PFINT_FW_CTL 0x0016C800
+#define PFINT_FW_CTL_MSIX_INDX_M ICE_M(0x7FF, 0)
+#define PFINT_FW_CTL_ITR_INDX_S 11
+#define PFINT_FW_CTL_ITR_INDX_M ICE_M(0x3, 11)
+#define PFINT_FW_CTL_CAUSE_ENA_M BIT(30)
+#define PFINT_MBX_CTL 0x0016B280
+#define PFINT_MBX_CTL_MSIX_INDX_M ICE_M(0x7FF, 0)
+#define PFINT_MBX_CTL_ITR_INDX_S 11
+#define PFINT_MBX_CTL_ITR_INDX_M ICE_M(0x3, 11)
+#define PFINT_MBX_CTL_CAUSE_ENA_M BIT(30)
+#define PFINT_OICR 0x0016CA00
+#define PFINT_OICR_ECC_ERR_M BIT(16)
+#define PFINT_OICR_MAL_DETECT_M BIT(19)
+#define PFINT_OICR_GRST_M BIT(20)
+#define PFINT_OICR_PCI_EXCEPTION_M BIT(21)
+#define PFINT_OICR_HMC_ERR_M BIT(26)
+#define PFINT_OICR_PE_CRITERR_M BIT(28)
+#define PFINT_OICR_VFLR_M BIT(29)
+#define PFINT_OICR_CTL 0x0016CA80
+#define PFINT_OICR_CTL_MSIX_INDX_M ICE_M(0x7FF, 0)
+#define PFINT_OICR_CTL_ITR_INDX_S 11
+#define PFINT_OICR_CTL_ITR_INDX_M ICE_M(0x3, 11)
+#define PFINT_OICR_CTL_CAUSE_ENA_M BIT(30)
+#define PFINT_OICR_ENA 0x0016C900
+#define QINT_RQCTL(_QRX) (0x00150000 + ((_QRX) * 4))
+#define QINT_RQCTL_MSIX_INDX_S 0
+#define QINT_RQCTL_ITR_INDX_S 11
+#define QINT_RQCTL_CAUSE_ENA_M BIT(30)
+#define QINT_TQCTL(_DBQM) (0x00140000 + ((_DBQM) * 4))
+#define QINT_TQCTL_MSIX_INDX_S 0
+#define QINT_TQCTL_ITR_INDX_S 11
+#define QINT_TQCTL_CAUSE_ENA_M BIT(30)
+#define VPINT_ALLOC(_VF) (0x001D1000 + ((_VF) * 4))
+#define VPINT_ALLOC_FIRST_S 0
+#define VPINT_ALLOC_FIRST_M ICE_M(0x7FF, 0)
+#define VPINT_ALLOC_LAST_S 12
+#define VPINT_ALLOC_LAST_M ICE_M(0x7FF, 12)
+#define VPINT_ALLOC_VALID_M BIT(31)
+#define VPINT_ALLOC_PCI(_VF) (0x0009D000 + ((_VF) * 4))
+#define VPINT_ALLOC_PCI_FIRST_S 0
+#define VPINT_ALLOC_PCI_FIRST_M ICE_M(0x7FF, 0)
+#define VPINT_ALLOC_PCI_LAST_S 12
+#define VPINT_ALLOC_PCI_LAST_M ICE_M(0x7FF, 12)
+#define VPINT_ALLOC_PCI_VALID_M BIT(31)
+#define GLLAN_RCTL_0 0x002941F8
+#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4))
+#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4))
+#define QRX_CTRL_MAX_INDEX 2047
+#define QRX_CTRL_QENA_REQ_S 0
+#define QRX_CTRL_QENA_REQ_M BIT(0)
+#define QRX_CTRL_QENA_STAT_S 2
+#define QRX_CTRL_QENA_STAT_M BIT(2)
+#define QRX_ITR(_QRX) (0x00292000 + ((_QRX) * 4))
+#define QRX_TAIL(_QRX) (0x00290000 + ((_QRX) * 4))
+#define QRX_TAIL_MAX_INDEX 2047
+#define QRX_TAIL_TAIL_S 0
+#define QRX_TAIL_TAIL_M ICE_M(0x1FFF, 0)
+#define VPLAN_RX_QBASE(_VF) (0x00072000 + ((_VF) * 4))
+#define VPLAN_RX_QBASE_VFFIRSTQ_S 0
+#define VPLAN_RX_QBASE_VFFIRSTQ_M ICE_M(0x7FF, 0)
+#define VPLAN_RX_QBASE_VFNUMQ_S 16
+#define VPLAN_RX_QBASE_VFNUMQ_M ICE_M(0xFF, 16)
+#define VPLAN_RXQ_MAPENA(_VF) (0x00073000 + ((_VF) * 4))
+#define VPLAN_RXQ_MAPENA_RX_ENA_M BIT(0)
+#define VPLAN_TX_QBASE(_VF) (0x001D1800 + ((_VF) * 4))
+#define VPLAN_TX_QBASE_VFFIRSTQ_S 0
+#define VPLAN_TX_QBASE_VFFIRSTQ_M ICE_M(0x3FFF, 0)
+#define VPLAN_TX_QBASE_VFNUMQ_S 16
+#define VPLAN_TX_QBASE_VFNUMQ_M ICE_M(0xFF, 16)
+#define VPLAN_TXQ_MAPENA(_VF) (0x00073800 + ((_VF) * 4))
+#define VPLAN_TXQ_MAPENA_TX_ENA_M BIT(0)
+#define GL_MDET_RX 0x00294C00
+#define GL_MDET_RX_QNUM_S 0
+#define GL_MDET_RX_QNUM_M ICE_M(0x7FFF, 0)
+#define GL_MDET_RX_VF_NUM_S 15
+#define GL_MDET_RX_VF_NUM_M ICE_M(0xFF, 15)
+#define GL_MDET_RX_PF_NUM_S 23
+#define GL_MDET_RX_PF_NUM_M ICE_M(0x7, 23)
+#define GL_MDET_RX_MAL_TYPE_S 26
+#define GL_MDET_RX_MAL_TYPE_M ICE_M(0x1F, 26)
+#define GL_MDET_RX_VALID_M BIT(31)
+#define GL_MDET_TX_PQM 0x002D2E00
+#define GL_MDET_TX_PQM_PF_NUM_S 0
+#define GL_MDET_TX_PQM_PF_NUM_M ICE_M(0x7, 0)
+#define GL_MDET_TX_PQM_VF_NUM_S 4
+#define GL_MDET_TX_PQM_VF_NUM_M ICE_M(0xFF, 4)
+#define GL_MDET_TX_PQM_QNUM_S 12
+#define GL_MDET_TX_PQM_QNUM_M ICE_M(0x3FFF, 12)
+#define GL_MDET_TX_PQM_MAL_TYPE_S 26
+#define GL_MDET_TX_PQM_MAL_TYPE_M ICE_M(0x1F, 26)
+#define GL_MDET_TX_PQM_VALID_M BIT(31)
+#define GL_MDET_TX_TCLAN 0x000FC068
+#define GL_MDET_TX_TCLAN_QNUM_S 0
+#define GL_MDET_TX_TCLAN_QNUM_M ICE_M(0x7FFF, 0)
+#define GL_MDET_TX_TCLAN_VF_NUM_S 15
+#define GL_MDET_TX_TCLAN_VF_NUM_M ICE_M(0xFF, 15)
+#define GL_MDET_TX_TCLAN_PF_NUM_S 23
+#define GL_MDET_TX_TCLAN_PF_NUM_M ICE_M(0x7, 23)
+#define GL_MDET_TX_TCLAN_MAL_TYPE_S 26
+#define GL_MDET_TX_TCLAN_MAL_TYPE_M ICE_M(0x1F, 26)
+#define GL_MDET_TX_TCLAN_VALID_M BIT(31)
+#define PF_MDET_RX 0x00294280
+#define PF_MDET_RX_VALID_M BIT(0)
+#define PF_MDET_TX_PQM 0x002D2C80
+#define PF_MDET_TX_PQM_VALID_M BIT(0)
+#define PF_MDET_TX_TCLAN 0x000FC000
+#define PF_MDET_TX_TCLAN_VALID_M BIT(0)
+#define VP_MDET_RX(_VF) (0x00294400 + ((_VF) * 4))
+#define VP_MDET_RX_VALID_M BIT(0)
+#define VP_MDET_TX_PQM(_VF) (0x002D2000 + ((_VF) * 4))
+#define VP_MDET_TX_PQM_VALID_M BIT(0)
+#define VP_MDET_TX_TCLAN(_VF) (0x000FB800 + ((_VF) * 4))
+#define VP_MDET_TX_TCLAN_VALID_M BIT(0)
+#define VP_MDET_TX_TDPU(_VF) (0x00040000 + ((_VF) * 4))
+#define VP_MDET_TX_TDPU_VALID_M BIT(0)
+#define GLNVM_FLA 0x000B6108
+#define GLNVM_FLA_LOCKED_M BIT(6)
+#define GLNVM_GENS 0x000B6100
+#define GLNVM_GENS_SR_SIZE_S 5
+#define GLNVM_GENS_SR_SIZE_M ICE_M(0x7, 5)
+#define GLNVM_ULD 0x000B6008
+#define GLNVM_ULD_CORER_DONE_M BIT(3)
+#define GLNVM_ULD_GLOBR_DONE_M BIT(4)
+#define PF_FUNC_RID 0x0009E880
+#define PF_FUNC_RID_FUNC_NUM_S 0
+#define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, 0)
+#define PF_PCI_CIAA 0x0009E580
+#define PF_PCI_CIAA_VF_NUM_S 12
+#define PF_PCI_CIAD 0x0009E500
+#define GL_PWR_MODE_CTL 0x000B820C
+#define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30
+#define GL_PWR_MODE_CTL_CAR_MAX_BW_M ICE_M(0x3, 30)
+#define GLPRT_BPRCH(_i) (0x00381384 + ((_i) * 8))
+#define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8))
+#define GLPRT_BPTCH(_i) (0x00381244 + ((_i) * 8))
+#define GLPRT_BPTCL(_i) (0x00381240 + ((_i) * 8))
+#define GLPRT_CRCERRS(_i) (0x00380100 + ((_i) * 8))
+#define GLPRT_GORCH(_i) (0x00380004 + ((_i) * 8))
+#define GLPRT_GORCL(_i) (0x00380000 + ((_i) * 8))
+#define GLPRT_GOTCH(_i) (0x00380B44 + ((_i) * 8))
+#define GLPRT_GOTCL(_i) (0x00380B40 + ((_i) * 8))
+#define GLPRT_ILLERRC(_i) (0x003801C0 + ((_i) * 8))
+#define GLPRT_LXOFFRXC(_i) (0x003802C0 + ((_i) * 8))
+#define GLPRT_LXOFFTXC(_i) (0x00381180 + ((_i) * 8))
+#define GLPRT_LXONRXC(_i) (0x00380280 + ((_i) * 8))
+#define GLPRT_LXONTXC(_i) (0x00381140 + ((_i) * 8))
+#define GLPRT_MLFC(_i) (0x00380040 + ((_i) * 8))
+#define GLPRT_MPRCH(_i) (0x00381344 + ((_i) * 8))
+#define GLPRT_MPRCL(_i) (0x00381340 + ((_i) * 8))
+#define GLPRT_MPTCH(_i) (0x00381204 + ((_i) * 8))
+#define GLPRT_MPTCL(_i) (0x00381200 + ((_i) * 8))
+#define GLPRT_MRFC(_i) (0x00380080 + ((_i) * 8))
+#define GLPRT_PRC1023H(_i) (0x00380A04 + ((_i) * 8))
+#define GLPRT_PRC1023L(_i) (0x00380A00 + ((_i) * 8))
+#define GLPRT_PRC127H(_i) (0x00380944 + ((_i) * 8))
+#define GLPRT_PRC127L(_i) (0x00380940 + ((_i) * 8))
+#define GLPRT_PRC1522H(_i) (0x00380A44 + ((_i) * 8))
+#define GLPRT_PRC1522L(_i) (0x00380A40 + ((_i) * 8))
+#define GLPRT_PRC255H(_i) (0x00380984 + ((_i) * 8))
+#define GLPRT_PRC255L(_i) (0x00380980 + ((_i) * 8))
+#define GLPRT_PRC511H(_i) (0x003809C4 + ((_i) * 8))
+#define GLPRT_PRC511L(_i) (0x003809C0 + ((_i) * 8))
+#define GLPRT_PRC64H(_i) (0x00380904 + ((_i) * 8))
+#define GLPRT_PRC64L(_i) (0x00380900 + ((_i) * 8))
+#define GLPRT_PRC9522H(_i) (0x00380A84 + ((_i) * 8))
+#define GLPRT_PRC9522L(_i) (0x00380A80 + ((_i) * 8))
+#define GLPRT_PTC1023H(_i) (0x00380C84 + ((_i) * 8))
+#define GLPRT_PTC1023L(_i) (0x00380C80 + ((_i) * 8))
+#define GLPRT_PTC127H(_i) (0x00380BC4 + ((_i) * 8))
+#define GLPRT_PTC127L(_i) (0x00380BC0 + ((_i) * 8))
+#define GLPRT_PTC1522H(_i) (0x00380CC4 + ((_i) * 8))
+#define GLPRT_PTC1522L(_i) (0x00380CC0 + ((_i) * 8))
+#define GLPRT_PTC255H(_i) (0x00380C04 + ((_i) * 8))
+#define GLPRT_PTC255L(_i) (0x00380C00 + ((_i) * 8))
+#define GLPRT_PTC511H(_i) (0x00380C44 + ((_i) * 8))
+#define GLPRT_PTC511L(_i) (0x00380C40 + ((_i) * 8))
+#define GLPRT_PTC64H(_i) (0x00380B84 + ((_i) * 8))
+#define GLPRT_PTC64L(_i) (0x00380B80 + ((_i) * 8))
+#define GLPRT_PTC9522H(_i) (0x00380D04 + ((_i) * 8))
+#define GLPRT_PTC9522L(_i) (0x00380D00 + ((_i) * 8))
+#define GLPRT_RFC(_i) (0x00380AC0 + ((_i) * 8))
+#define GLPRT_RJC(_i) (0x00380B00 + ((_i) * 8))
+#define GLPRT_RLEC(_i) (0x00380140 + ((_i) * 8))
+#define GLPRT_ROC(_i) (0x00380240 + ((_i) * 8))
+#define GLPRT_RUC(_i) (0x00380200 + ((_i) * 8))
+#define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8))
+#define GLPRT_UPRCH(_i) (0x00381304 + ((_i) * 8))
+#define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8))
+#define GLPRT_UPTCH(_i) (0x003811C4 + ((_i) * 8))
+#define GLPRT_UPTCL(_i) (0x003811C0 + ((_i) * 8))
+#define GLV_BPRCH(_i) (0x003B6004 + ((_i) * 8))
+#define GLV_BPRCL(_i) (0x003B6000 + ((_i) * 8))
+#define GLV_BPTCH(_i) (0x0030E004 + ((_i) * 8))
+#define GLV_BPTCL(_i) (0x0030E000 + ((_i) * 8))
+#define GLV_GORCH(_i) (0x003B0004 + ((_i) * 8))
+#define GLV_GORCL(_i) (0x003B0000 + ((_i) * 8))
+#define GLV_GOTCH(_i) (0x00300004 + ((_i) * 8))
+#define GLV_GOTCL(_i) (0x00300000 + ((_i) * 8))
+#define GLV_MPRCH(_i) (0x003B4004 + ((_i) * 8))
+#define GLV_MPRCL(_i) (0x003B4000 + ((_i) * 8))
+#define GLV_MPTCH(_i) (0x0030C004 + ((_i) * 8))
+#define GLV_MPTCL(_i) (0x0030C000 + ((_i) * 8))
+#define GLV_RDPC(_i) (0x00294C04 + ((_i) * 4))
+#define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4))
+#define GLV_UPRCH(_i) (0x003B2004 + ((_i) * 8))
+#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8))
+#define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8))
+#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
+#define PF_VT_PFALLOC_HIF 0x0009DD80
+#define VSIQF_HKEY_MAX_INDEX 12
+#define VSIQF_HLUT_MAX_INDEX 15
+#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
+#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1)
#endif /* _ICE_HW_AUTOGEN_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 068dbc740b76..7d2a66739e3f 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -188,23 +188,25 @@ struct ice_32b_rx_flex_desc_nic {
* with a specific metadata (profile 7 reserved for HW)
*/
enum ice_rxdid {
- ICE_RXDID_START = 0,
- ICE_RXDID_LEGACY_0 = ICE_RXDID_START,
- ICE_RXDID_LEGACY_1,
- ICE_RXDID_FLX_START,
- ICE_RXDID_FLEX_NIC = ICE_RXDID_FLX_START,
- ICE_RXDID_FLX_LAST = 63,
- ICE_RXDID_LAST = ICE_RXDID_FLX_LAST
+ ICE_RXDID_LEGACY_0 = 0,
+ ICE_RXDID_LEGACY_1 = 1,
+ ICE_RXDID_FLEX_NIC = 2,
+ ICE_RXDID_FLEX_NIC_2 = 6,
+ ICE_RXDID_HW = 7,
+ ICE_RXDID_LAST = 63,
};
/* Receive Flex Descriptor Rx opcode values */
#define ICE_RX_OPC_MDID 0x01
/* Receive Descriptor MDID values */
-#define ICE_RX_MDID_FLOW_ID_LOWER 5
-#define ICE_RX_MDID_FLOW_ID_HIGH 6
-#define ICE_RX_MDID_HASH_LOW 56
-#define ICE_RX_MDID_HASH_HIGH 57
+enum ice_flex_rx_mdid {
+ ICE_RX_MDID_FLOW_ID_LOWER = 5,
+ ICE_RX_MDID_FLOW_ID_HIGH,
+ ICE_RX_MDID_SRC_VSI = 19,
+ ICE_RX_MDID_HASH_LOW = 56,
+ ICE_RX_MDID_HASH_HIGH,
+};
/* Rx Flag64 packet flag bits */
enum ice_rx_flg64_bits {
@@ -416,6 +418,7 @@ struct ice_tlan_ctx {
u8 pf_num;
u16 vmvf_num;
u8 vmvf_type;
+#define ICE_TLAN_CTX_VMVF_TYPE_VF 0
#define ICE_TLAN_CTX_VMVF_TYPE_VMQ 1
#define ICE_TLAN_CTX_VMVF_TYPE_PF 2
u16 src_vsi;
@@ -471,4 +474,16 @@ static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
{
return ice_ptype_lkup[ptype];
}
+
+#define ICE_LINK_SPEED_UNKNOWN 0
+#define ICE_LINK_SPEED_10MBPS 10
+#define ICE_LINK_SPEED_100MBPS 100
+#define ICE_LINK_SPEED_1000MBPS 1000
+#define ICE_LINK_SPEED_2500MBPS 2500
+#define ICE_LINK_SPEED_5000MBPS 5000
+#define ICE_LINK_SPEED_10000MBPS 10000
+#define ICE_LINK_SPEED_20000MBPS 20000
+#define ICE_LINK_SPEED_25000MBPS 25000
+#define ICE_LINK_SPEED_40000MBPS 40000
+
#endif /* _ICE_LAN_TX_RX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
new file mode 100644
index 000000000000..5bacad01f0c9
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -0,0 +1,2620 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_lib.h"
+
+/**
+ * ice_setup_rx_ctx - Configure a receive ring context
+ * @ring: The Rx ring to configure
+ *
+ * Configure the Rx descriptor ring in RLAN context.
+ */
+static int ice_setup_rx_ctx(struct ice_ring *ring)
+{
+ struct ice_vsi *vsi = ring->vsi;
+ struct ice_hw *hw = &vsi->back->hw;
+ u32 rxdid = ICE_RXDID_FLEX_NIC;
+ struct ice_rlan_ctx rlan_ctx;
+ u32 regval;
+ u16 pf_q;
+ int err;
+
+ /* what is RX queue number in global space of 2K Rx queues */
+ pf_q = vsi->rxq_map[ring->q_index];
+
+ /* clear the context structure first */
+ memset(&rlan_ctx, 0, sizeof(rlan_ctx));
+
+ rlan_ctx.base = ring->dma >> 7;
+
+ rlan_ctx.qlen = ring->count;
+
+ /* Receive Packet Data Buffer Size.
+ * The Packet Data Buffer Size is defined in 128 byte units.
+ */
+ rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
+
+ /* use 32 byte descriptors */
+ rlan_ctx.dsize = 1;
+
+ /* Strip the Ethernet CRC bytes before the packet is posted to host
+ * memory.
+ */
+ rlan_ctx.crcstrip = 1;
+
+ /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */
+ rlan_ctx.l2tsel = 1;
+
+ rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
+ rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
+ rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
+
+ /* This controls whether VLAN is stripped from inner headers
+ * The VLAN in the inner L2 header is stripped to the receive
+ * descriptor if enabled by this flag.
+ */
+ rlan_ctx.showiv = 0;
+
+ /* Max packet size for this queue - must not be set to a larger value
+ * than 5 x DBUF
+ */
+ rlan_ctx.rxmax = min_t(u16, vsi->max_frame,
+ ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len);
+
+ /* Rx queue threshold in units of 64 */
+ rlan_ctx.lrxqthresh = 1;
+
+ /* Enable Flexible Descriptors in the queue context which
+ * allows this driver to select a specific receive descriptor format
+ */
+ if (vsi->type != ICE_VSI_VF) {
+ regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
+ regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
+ QRXFLXP_CNTXT_RXDID_IDX_M;
+
+ /* increasing context priority to pick up profile id;
+ * default is 0x01; setting to 0x03 to ensure profile
+ * is programming if prev context is of same priority
+ */
+ regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
+ QRXFLXP_CNTXT_RXDID_PRIO_M;
+
+ wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
+ }
+
+ /* Absolute queue number out of 2K needs to be passed */
+ err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
+ if (err) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
+ pf_q, err);
+ return -EIO;
+ }
+
+ if (vsi->type == ICE_VSI_VF)
+ return 0;
+
+ /* init queue specific tail register */
+ ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
+ writel(0, ring->tail);
+ ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));
+
+ return 0;
+}
+
+/**
+ * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
+ * @ring: The Tx ring to configure
+ * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
+ * @pf_q: queue index in the PF space
+ *
+ * Configure the Tx descriptor ring in TLAN context.
+ */
+static void
+ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
+{
+ struct ice_vsi *vsi = ring->vsi;
+ struct ice_hw *hw = &vsi->back->hw;
+
+ tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
+
+ tlan_ctx->port_num = vsi->port_info->lport;
+
+ /* Transmit Queue Length */
+ tlan_ctx->qlen = ring->count;
+
+ /* PF number */
+ tlan_ctx->pf_num = hw->pf_id;
+
+ /* queue belongs to a specific VSI type
+ * VF / VM index should be programmed per vmvf_type setting:
+ * for vmvf_type = VF, it is VF number between 0-256
+ * for vmvf_type = VM, it is VM number between 0-767
+ * for PF or EMP this field should be set to zero
+ */
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
+ break;
+ case ICE_VSI_VF:
+ /* Firmware expects vmvf_num to be absolute VF id */
+ tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;
+ tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
+ break;
+ default:
+ return;
+ }
+
+ /* make sure the context is associated with the right VSI */
+ tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
+
+ tlan_ctx->tso_ena = ICE_TX_LEGACY;
+ tlan_ctx->tso_qnum = pf_q;
+
+ /* Legacy or Advanced Host Interface:
+ * 0: Advanced Host Interface
+ * 1: Legacy Host Interface
+ */
+ tlan_ctx->legacy_int = ICE_TX_LEGACY;
+}
+
+/**
+ * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
+ * @pf: the PF being configured
+ * @pf_q: the PF queue
+ * @ena: enable or disable state of the queue
+ *
+ * This routine will wait for the given Rx queue of the PF to reach the
+ * enabled or disabled state.
+ * Returns -ETIMEDOUT in case of failing to reach the requested state after
+ * multiple retries; else will return 0 in case of success.
+ */
+static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
+{
+ int i;
+
+ for (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) {
+ u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q));
+
+ if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
+ break;
+
+ usleep_range(10, 20);
+ }
+ if (i >= ICE_Q_WAIT_RETRY_LIMIT)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/**
+ * ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings
+ * @vsi: the VSI being configured
+ * @ena: start or stop the Rx rings
+ */
+static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ int i, j, ret = 0;
+
+ for (i = 0; i < vsi->num_rxq; i++) {
+ int pf_q = vsi->rxq_map[i];
+ u32 rx_reg;
+
+ for (j = 0; j < ICE_Q_WAIT_MAX_RETRY; j++) {
+ rx_reg = rd32(hw, QRX_CTRL(pf_q));
+ if (((rx_reg >> QRX_CTRL_QENA_REQ_S) & 1) ==
+ ((rx_reg >> QRX_CTRL_QENA_STAT_S) & 1))
+ break;
+ usleep_range(1000, 2000);
+ }
+
+ /* Skip if the queue is already in the requested state */
+ if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
+ continue;
+
+ /* turn on/off the queue */
+ if (ena)
+ rx_reg |= QRX_CTRL_QENA_REQ_M;
+ else
+ rx_reg &= ~QRX_CTRL_QENA_REQ_M;
+ wr32(hw, QRX_CTRL(pf_q), rx_reg);
+
+ /* wait for the change to finish */
+ ret = ice_pf_rxq_wait(pf, pf_q, ena);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "VSI idx %d Rx ring %d %sable timeout\n",
+ vsi->idx, pf_q, (ena ? "en" : "dis"));
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI
+ * @vsi: VSI pointer
+ * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
+ *
+ * On error: returns error code (negative)
+ * On success: returns 0
+ */
+static int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors)
+{
+ struct ice_pf *pf = vsi->back;
+
+ /* allocate memory for both Tx and Rx ring pointers */
+ vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
+ sizeof(struct ice_ring *), GFP_KERNEL);
+ if (!vsi->tx_rings)
+ goto err_txrings;
+
+ vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
+ sizeof(struct ice_ring *), GFP_KERNEL);
+ if (!vsi->rx_rings)
+ goto err_rxrings;
+
+ if (alloc_qvectors) {
+ /* allocate memory for q_vector pointers */
+ vsi->q_vectors = devm_kcalloc(&pf->pdev->dev,
+ vsi->num_q_vectors,
+ sizeof(struct ice_q_vector *),
+ GFP_KERNEL);
+ if (!vsi->q_vectors)
+ goto err_vectors;
+ }
+
+ return 0;
+
+err_vectors:
+ devm_kfree(&pf->pdev->dev, vsi->rx_rings);
+err_rxrings:
+ devm_kfree(&pf->pdev->dev, vsi->tx_rings);
+err_txrings:
+ return -ENOMEM;
+}
+
+/**
+ * ice_vsi_set_num_qs - Set num queues, descriptors and vectors for a VSI
+ * @vsi: the VSI being configured
+ *
+ * Return 0 on success and a negative value on error
+ */
+static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ vsi->alloc_txq = pf->num_lan_tx;
+ vsi->alloc_rxq = pf->num_lan_rx;
+ vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE);
+ vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx);
+ break;
+ case ICE_VSI_VF:
+ vsi->alloc_txq = pf->num_vf_qps;
+ vsi->alloc_rxq = pf->num_vf_qps;
+ /* pf->num_vf_msix includes (VF miscellaneous vector +
+ * data queue interrupts). Since vsi->num_q_vectors is number
+ * of queues vectors, subtract 1 from the original vector
+ * count
+ */
+ vsi->num_q_vectors = pf->num_vf_msix - 1;
+ break;
+ default:
+ dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
+ vsi->type);
+ break;
+ }
+}
+
+/**
+ * ice_get_free_slot - get the next non-NULL location index in array
+ * @array: array to search
+ * @size: size of the array
+ * @curr: last known occupied index to be used as a search hint
+ *
+ * void * is being used to keep the functionality generic. This lets us use this
+ * function on any array of pointers.
+ */
+static int ice_get_free_slot(void *array, int size, int curr)
+{
+ int **tmp_array = (int **)array;
+ int next;
+
+ if (curr < (size - 1) && !tmp_array[curr + 1]) {
+ next = curr + 1;
+ } else {
+ int i = 0;
+
+ while ((i < size) && (tmp_array[i]))
+ i++;
+ if (i == size)
+ next = ICE_NO_VSI;
+ else
+ next = i;
+ }
+ return next;
+}
+
+/**
+ * ice_vsi_delete - delete a VSI from the switch
+ * @vsi: pointer to VSI being removed
+ */
+void ice_vsi_delete(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_vsi_ctx ctxt;
+ enum ice_status status;
+
+ if (vsi->type == ICE_VSI_VF)
+ ctxt.vf_num = vsi->vf_id;
+ ctxt.vsi_num = vsi->vsi_num;
+
+ memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props));
+
+ status = ice_free_vsi(&pf->hw, vsi->idx, &ctxt, false, NULL);
+ if (status)
+ dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n",
+ vsi->vsi_num);
+}
+
+/**
+ * ice_vsi_free_arrays - clean up VSI resources
+ * @vsi: pointer to VSI being cleared
+ * @free_qvectors: bool to specify if q_vectors should be deallocated
+ */
+static void ice_vsi_free_arrays(struct ice_vsi *vsi, bool free_qvectors)
+{
+ struct ice_pf *pf = vsi->back;
+
+ /* free the ring and vector containers */
+ if (free_qvectors && vsi->q_vectors) {
+ devm_kfree(&pf->pdev->dev, vsi->q_vectors);
+ vsi->q_vectors = NULL;
+ }
+ if (vsi->tx_rings) {
+ devm_kfree(&pf->pdev->dev, vsi->tx_rings);
+ vsi->tx_rings = NULL;
+ }
+ if (vsi->rx_rings) {
+ devm_kfree(&pf->pdev->dev, vsi->rx_rings);
+ vsi->rx_rings = NULL;
+ }
+}
+
+/**
+ * ice_vsi_clear - clean up and deallocate the provided VSI
+ * @vsi: pointer to VSI being cleared
+ *
+ * This deallocates the VSI's queue resources, removes it from the PF's
+ * VSI array if necessary, and deallocates the VSI
+ *
+ * Returns 0 on success, negative on failure
+ */
+int ice_vsi_clear(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = NULL;
+
+ if (!vsi)
+ return 0;
+
+ if (!vsi->back)
+ return -EINVAL;
+
+ pf = vsi->back;
+
+ if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
+ dev_dbg(&pf->pdev->dev, "vsi does not exist at pf->vsi[%d]\n",
+ vsi->idx);
+ return -EINVAL;
+ }
+
+ mutex_lock(&pf->sw_mutex);
+ /* updates the PF for this cleared VSI */
+
+ pf->vsi[vsi->idx] = NULL;
+ if (vsi->idx < pf->next_vsi)
+ pf->next_vsi = vsi->idx;
+
+ ice_vsi_free_arrays(vsi, true);
+ mutex_unlock(&pf->sw_mutex);
+ devm_kfree(&pf->pdev->dev, vsi);
+
+ return 0;
+}
+
+/**
+ * ice_msix_clean_rings - MSIX mode Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ */
+static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
+{
+ struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
+
+ if (!q_vector->tx.ring && !q_vector->rx.ring)
+ return IRQ_HANDLED;
+
+ napi_schedule(&q_vector->napi);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ice_vsi_alloc - Allocates the next available struct VSI in the PF
+ * @pf: board private structure
+ * @type: type of VSI
+ *
+ * returns a pointer to a VSI on success, NULL on failure.
+ */
+static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type)
+{
+ struct ice_vsi *vsi = NULL;
+
+ /* Need to protect the allocation of the VSIs at the PF level */
+ mutex_lock(&pf->sw_mutex);
+
+ /* If we have already allocated our maximum number of VSIs,
+ * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index
+ * is available to be populated
+ */
+ if (pf->next_vsi == ICE_NO_VSI) {
+ dev_dbg(&pf->pdev->dev, "out of VSI slots!\n");
+ goto unlock_pf;
+ }
+
+ vsi = devm_kzalloc(&pf->pdev->dev, sizeof(*vsi), GFP_KERNEL);
+ if (!vsi)
+ goto unlock_pf;
+
+ vsi->type = type;
+ vsi->back = pf;
+ set_bit(__ICE_DOWN, vsi->state);
+ vsi->idx = pf->next_vsi;
+ vsi->work_lmt = ICE_DFLT_IRQ_WORK;
+
+ ice_vsi_set_num_qs(vsi);
+
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ if (ice_vsi_alloc_arrays(vsi, true))
+ goto err_rings;
+
+ /* Setup default MSIX irq handler for VSI */
+ vsi->irq_handler = ice_msix_clean_rings;
+ break;
+ case ICE_VSI_VF:
+ if (ice_vsi_alloc_arrays(vsi, true))
+ goto err_rings;
+ break;
+ default:
+ dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
+ goto unlock_pf;
+ }
+
+ /* fill VSI slot in the PF struct */
+ pf->vsi[pf->next_vsi] = vsi;
+
+ /* prepare pf->next_vsi for next use */
+ pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
+ pf->next_vsi);
+ goto unlock_pf;
+
+err_rings:
+ devm_kfree(&pf->pdev->dev, vsi);
+ vsi = NULL;
+unlock_pf:
+ mutex_unlock(&pf->sw_mutex);
+ return vsi;
+}
+
+/**
+ * ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
+ * @vsi: the VSI getting queues
+ *
+ * Return 0 on success and a negative value on error
+ */
+static int ice_vsi_get_qs_contig(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ int offset, ret = 0;
+
+ mutex_lock(&pf->avail_q_mutex);
+ /* look for contiguous block of queues for Tx */
+ offset = bitmap_find_next_zero_area(pf->avail_txqs, ICE_MAX_TXQS,
+ 0, vsi->alloc_txq, 0);
+ if (offset < ICE_MAX_TXQS) {
+ int i;
+
+ bitmap_set(pf->avail_txqs, offset, vsi->alloc_txq);
+ for (i = 0; i < vsi->alloc_txq; i++)
+ vsi->txq_map[i] = i + offset;
+ } else {
+ ret = -ENOMEM;
+ vsi->tx_mapping_mode = ICE_VSI_MAP_SCATTER;
+ }
+
+ /* look for contiguous block of queues for Rx */
+ offset = bitmap_find_next_zero_area(pf->avail_rxqs, ICE_MAX_RXQS,
+ 0, vsi->alloc_rxq, 0);
+ if (offset < ICE_MAX_RXQS) {
+ int i;
+
+ bitmap_set(pf->avail_rxqs, offset, vsi->alloc_rxq);
+ for (i = 0; i < vsi->alloc_rxq; i++)
+ vsi->rxq_map[i] = i + offset;
+ } else {
+ ret = -ENOMEM;
+ vsi->rx_mapping_mode = ICE_VSI_MAP_SCATTER;
+ }
+ mutex_unlock(&pf->avail_q_mutex);
+
+ return ret;
+}
+
+/**
+ * ice_vsi_get_qs_scatter - Assign a scattered queues to VSI
+ * @vsi: the VSI getting queues
+ *
+ * Return 0 on success and a negative value on error
+ */
+static int ice_vsi_get_qs_scatter(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ int i, index = 0;
+
+ mutex_lock(&pf->avail_q_mutex);
+
+ if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) {
+ for (i = 0; i < vsi->alloc_txq; i++) {
+ index = find_next_zero_bit(pf->avail_txqs,
+ ICE_MAX_TXQS, index);
+ if (index < ICE_MAX_TXQS) {
+ set_bit(index, pf->avail_txqs);
+ vsi->txq_map[i] = index;
+ } else {
+ goto err_scatter_tx;
+ }
+ }
+ }
+
+ if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) {
+ for (i = 0; i < vsi->alloc_rxq; i++) {
+ index = find_next_zero_bit(pf->avail_rxqs,
+ ICE_MAX_RXQS, index);
+ if (index < ICE_MAX_RXQS) {
+ set_bit(index, pf->avail_rxqs);
+ vsi->rxq_map[i] = index;
+ } else {
+ goto err_scatter_rx;
+ }
+ }
+ }
+
+ mutex_unlock(&pf->avail_q_mutex);
+ return 0;
+
+err_scatter_rx:
+ /* unflag any queues we have grabbed (i is failed position) */
+ for (index = 0; index < i; index++) {
+ clear_bit(vsi->rxq_map[index], pf->avail_rxqs);
+ vsi->rxq_map[index] = 0;
+ }
+ i = vsi->alloc_txq;
+err_scatter_tx:
+ /* i is either position of failed attempt or vsi->alloc_txq */
+ for (index = 0; index < i; index++) {
+ clear_bit(vsi->txq_map[index], pf->avail_txqs);
+ vsi->txq_map[index] = 0;
+ }
+
+ mutex_unlock(&pf->avail_q_mutex);
+ return -ENOMEM;
+}
+
+/**
+ * ice_vsi_get_qs - Assign queues from PF to VSI
+ * @vsi: the VSI to assign queues to
+ *
+ * Returns 0 on success and a negative value on error
+ */
+static int ice_vsi_get_qs(struct ice_vsi *vsi)
+{
+ int ret = 0;
+
+ vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG;
+ vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG;
+
+ /* NOTE: ice_vsi_get_qs_contig() will set the Rx/Tx mapping
+ * modes individually to scatter if assigning contiguous queues
+ * to Rx or Tx fails
+ */
+ ret = ice_vsi_get_qs_contig(vsi);
+ if (ret < 0) {
+ if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER)
+ vsi->alloc_txq = max_t(u16, vsi->alloc_txq,
+ ICE_MAX_SCATTER_TXQS);
+ if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER)
+ vsi->alloc_rxq = max_t(u16, vsi->alloc_rxq,
+ ICE_MAX_SCATTER_RXQS);
+ ret = ice_vsi_get_qs_scatter(vsi);
+ }
+
+ return ret;
+}
+
+/**
+ * ice_vsi_put_qs - Release queues from VSI to PF
+ * @vsi: the VSI that is going to release queues
+ */
+void ice_vsi_put_qs(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ int i;
+
+ mutex_lock(&pf->avail_q_mutex);
+
+ for (i = 0; i < vsi->alloc_txq; i++) {
+ clear_bit(vsi->txq_map[i], pf->avail_txqs);
+ vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
+ }
+
+ for (i = 0; i < vsi->alloc_rxq; i++) {
+ clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
+ vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
+ }
+
+ mutex_unlock(&pf->avail_q_mutex);
+}
+
+/**
+ * ice_rss_clean - Delete RSS related VSI structures that hold user inputs
+ * @vsi: the VSI being removed
+ */
+static void ice_rss_clean(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf;
+
+ pf = vsi->back;
+
+ if (vsi->rss_hkey_user)
+ devm_kfree(&pf->pdev->dev, vsi->rss_hkey_user);
+ if (vsi->rss_lut_user)
+ devm_kfree(&pf->pdev->dev, vsi->rss_lut_user);
+}
+
+/**
+ * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type
+ * @vsi: the VSI being configured
+ */
+static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
+{
+ struct ice_hw_common_caps *cap;
+ struct ice_pf *pf = vsi->back;
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
+ vsi->rss_size = 1;
+ return;
+ }
+
+ cap = &pf->hw.func_caps.common_cap;
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ /* PF VSI will inherit RSS instance of PF */
+ vsi->rss_table_size = cap->rss_table_size;
+ vsi->rss_size = min_t(int, num_online_cpus(),
+ BIT(cap->rss_table_entry_width));
+ vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
+ break;
+ case ICE_VSI_VF:
+ /* VF VSI will gets a small RSS table
+ * For VSI_LUT, LUT size should be set to 64 bytes
+ */
+ vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
+ vsi->rss_size = min_t(int, num_online_cpus(),
+ BIT(cap->rss_table_entry_width));
+ vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI;
+ break;
+ default:
+ dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n",
+ vsi->type);
+ break;
+ }
+}
+
+/**
+ * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
+ * @ctxt: the VSI context being set
+ *
+ * This initializes a default VSI context for all sections except the Queues.
+ */
+static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
+{
+ u32 table = 0;
+
+ memset(&ctxt->info, 0, sizeof(ctxt->info));
+ /* VSI's should be allocated from shared pool */
+ ctxt->alloc_from_pool = true;
+ /* Src pruning enabled by default */
+ ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
+ /* Traffic from VSI can be sent to LAN */
+ ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
+ /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy
+ * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all
+ * packets untagged/tagged.
+ */
+ ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL &
+ ICE_AQ_VSI_VLAN_MODE_M) >>
+ ICE_AQ_VSI_VLAN_MODE_S);
+ /* Have 1:1 UP mapping for both ingress/egress tables */
+ table |= ICE_UP_TABLE_TRANSLATE(0, 0);
+ table |= ICE_UP_TABLE_TRANSLATE(1, 1);
+ table |= ICE_UP_TABLE_TRANSLATE(2, 2);
+ table |= ICE_UP_TABLE_TRANSLATE(3, 3);
+ table |= ICE_UP_TABLE_TRANSLATE(4, 4);
+ table |= ICE_UP_TABLE_TRANSLATE(5, 5);
+ table |= ICE_UP_TABLE_TRANSLATE(6, 6);
+ table |= ICE_UP_TABLE_TRANSLATE(7, 7);
+ ctxt->info.ingress_table = cpu_to_le32(table);
+ ctxt->info.egress_table = cpu_to_le32(table);
+ /* Have 1:1 UP mapping for outer to inner UP table */
+ ctxt->info.outer_up_table = cpu_to_le32(table);
+ /* No Outer tag support outer_tag_flags remains to zero */
+}
+
+/**
+ * ice_vsi_setup_q_map - Setup a VSI queue map
+ * @vsi: the VSI being configured
+ * @ctxt: VSI context structure
+ */
+static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
+{
+ u16 offset = 0, qmap = 0, numq_tc;
+ u16 pow = 0, max_rss = 0, qcount;
+ u16 qcount_tx = vsi->alloc_txq;
+ u16 qcount_rx = vsi->alloc_rxq;
+ bool ena_tc0 = false;
+ int i;
+
+ /* at least TC0 should be enabled by default */
+ if (vsi->tc_cfg.numtc) {
+ if (!(vsi->tc_cfg.ena_tc & BIT(0)))
+ ena_tc0 = true;
+ } else {
+ ena_tc0 = true;
+ }
+
+ if (ena_tc0) {
+ vsi->tc_cfg.numtc++;
+ vsi->tc_cfg.ena_tc |= 1;
+ }
+
+ numq_tc = qcount_rx / vsi->tc_cfg.numtc;
+
+ /* TC mapping is a function of the number of Rx queues assigned to the
+ * VSI for each traffic class and the offset of these queues.
+ * The first 10 bits are for queue offset for TC0, next 4 bits for no:of
+ * queues allocated to TC0. No:of queues is a power-of-2.
+ *
+ * If TC is not enabled, the queue offset is set to 0, and allocate one
+ * queue, this way, traffic for the given TC will be sent to the default
+ * queue.
+ *
+ * Setup number and offset of Rx queues for all TCs for the VSI
+ */
+
+ qcount = numq_tc;
+ /* qcount will change if RSS is enabled */
+ if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
+ if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) {
+ if (vsi->type == ICE_VSI_PF)
+ max_rss = ICE_MAX_LG_RSS_QS;
+ else
+ max_rss = ICE_MAX_SMALL_RSS_QS;
+ qcount = min_t(int, numq_tc, max_rss);
+ qcount = min_t(int, qcount, vsi->rss_size);
+ }
+ }
+
+ /* find the (rounded up) power-of-2 of qcount */
+ pow = order_base_2(qcount);
+
+ for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
+ if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
+ /* TC is not enabled */
+ vsi->tc_cfg.tc_info[i].qoffset = 0;
+ vsi->tc_cfg.tc_info[i].qcount = 1;
+ ctxt->info.tc_mapping[i] = 0;
+ continue;
+ }
+
+ /* TC is enabled */
+ vsi->tc_cfg.tc_info[i].qoffset = offset;
+ vsi->tc_cfg.tc_info[i].qcount = qcount;
+
+ qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
+ ICE_AQ_VSI_TC_Q_OFFSET_M) |
+ ((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
+ ICE_AQ_VSI_TC_Q_NUM_M);
+ offset += qcount;
+ ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
+ }
+
+ vsi->num_txq = qcount_tx;
+ vsi->num_rxq = offset;
+
+ if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
+ dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
+ /* since there is a chance that num_rxq could have been changed
+ * in the above for loop, make num_txq equal to num_rxq.
+ */
+ vsi->num_txq = vsi->num_rxq;
+ }
+
+ /* Rx queue mapping */
+ ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
+ /* q_mapping buffer holds the info for the first queue allocated for
+ * this VSI in the PF space and also the number of queues associated
+ * with this VSI.
+ */
+ ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
+ ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
+}
+
+/**
+ * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
+ * @ctxt: the VSI context being set
+ * @vsi: the VSI being configured
+ */
+static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
+{
+ u8 lut_type, hash_type;
+
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ /* PF VSI will inherit RSS instance of PF */
+ lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
+ hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
+ break;
+ case ICE_VSI_VF:
+ /* VF VSI will gets a small RSS table which is a VSI LUT type */
+ lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
+ hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
+ break;
+ default:
+ dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
+ vsi->type);
+ return;
+ }
+
+ ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
+ ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
+ ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) &
+ ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
+}
+
+/**
+ * ice_vsi_init - Create and initialize a VSI
+ * @vsi: the VSI being configured
+ *
+ * This initializes a VSI context depending on the VSI type to be added and
+ * passes it down to the add_vsi aq command to create a new VSI.
+ */
+static int ice_vsi_init(struct ice_vsi *vsi)
+{
+ struct ice_vsi_ctx ctxt = { 0 };
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ int ret = 0;
+
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ ctxt.flags = ICE_AQ_VSI_TYPE_PF;
+ break;
+ case ICE_VSI_VF:
+ ctxt.flags = ICE_AQ_VSI_TYPE_VF;
+ /* VF number here is the absolute VF number (0-255) */
+ ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ ice_set_dflt_vsi_ctx(&ctxt);
+ /* if the switch is in VEB mode, allow VSI loopback */
+ if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
+ ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
+
+ /* Set LUT type and HASH type if RSS is enabled */
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
+ ice_set_rss_vsi_ctx(&ctxt, vsi);
+
+ ctxt.info.sw_id = vsi->port_info->sw_id;
+ ice_vsi_setup_q_map(vsi, &ctxt);
+
+ ret = ice_add_vsi(hw, vsi->idx, &ctxt, NULL);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Add VSI failed, err %d\n", ret);
+ return -EIO;
+ }
+
+ /* keep context for update VSI operations */
+ vsi->info = ctxt.info;
+
+ /* record VSI number returned */
+ vsi->vsi_num = ctxt.vsi_num;
+
+ return ret;
+}
+
+/**
+ * ice_free_q_vector - Free memory allocated for a specific interrupt vector
+ * @vsi: VSI having the memory freed
+ * @v_idx: index of the vector to be freed
+ */
+static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
+{
+ struct ice_q_vector *q_vector;
+ struct ice_ring *ring;
+
+ if (!vsi->q_vectors[v_idx]) {
+ dev_dbg(&vsi->back->pdev->dev, "Queue vector at index %d not found\n",
+ v_idx);
+ return;
+ }
+ q_vector = vsi->q_vectors[v_idx];
+
+ ice_for_each_ring(ring, q_vector->tx)
+ ring->q_vector = NULL;
+ ice_for_each_ring(ring, q_vector->rx)
+ ring->q_vector = NULL;
+
+ /* only VSI with an associated netdev is set up with NAPI */
+ if (vsi->netdev)
+ netif_napi_del(&q_vector->napi);
+
+ devm_kfree(&vsi->back->pdev->dev, q_vector);
+ vsi->q_vectors[v_idx] = NULL;
+}
+
+/**
+ * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
+ * @vsi: the VSI having memory freed
+ */
+void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
+{
+ int v_idx;
+
+ for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
+ ice_free_q_vector(vsi, v_idx);
+}
+
+/**
+ * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @vsi: the VSI being configured
+ * @v_idx: index of the vector in the VSI struct
+ *
+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
+ */
+static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_q_vector *q_vector;
+
+ /* allocate q_vector */
+ q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL);
+ if (!q_vector)
+ return -ENOMEM;
+
+ q_vector->vsi = vsi;
+ q_vector->v_idx = v_idx;
+ if (vsi->type == ICE_VSI_VF)
+ goto out;
+ /* only set affinity_mask if the CPU is online */
+ if (cpu_online(v_idx))
+ cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
+
+ /* This will not be called in the driver load path because the netdev
+ * will not be created yet. All other cases with register the NAPI
+ * handler here (i.e. resume, reset/rebuild, etc.)
+ */
+ if (vsi->netdev)
+ netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
+ NAPI_POLL_WEIGHT);
+
+out:
+ /* tie q_vector and VSI together */
+ vsi->q_vectors[v_idx] = q_vector;
+
+ return 0;
+}
+
+/**
+ * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @vsi: the VSI being configured
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ */
+static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ int v_idx = 0, num_q_vectors;
+ int err;
+
+ if (vsi->q_vectors[0]) {
+ dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
+ vsi->vsi_num);
+ return -EEXIST;
+ }
+
+ if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
+ num_q_vectors = vsi->num_q_vectors;
+ } else {
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
+ err = ice_vsi_alloc_q_vector(vsi, v_idx);
+ if (err)
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ while (v_idx--)
+ ice_free_q_vector(vsi, v_idx);
+
+ dev_err(&pf->pdev->dev,
+ "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
+ vsi->num_q_vectors, vsi->vsi_num, err);
+ vsi->num_q_vectors = 0;
+ return err;
+}
+
+/**
+ * ice_vsi_setup_vector_base - Set up the base vector for the given VSI
+ * @vsi: ptr to the VSI
+ *
+ * This should only be called after ice_vsi_alloc() which allocates the
+ * corresponding SW VSI structure and initializes num_queue_pairs for the
+ * newly allocated VSI.
+ *
+ * Returns 0 on success or negative on failure
+ */
+static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ int num_q_vectors = 0;
+
+ if (vsi->sw_base_vector || vsi->hw_base_vector) {
+ dev_dbg(&pf->pdev->dev, "VSI %d has non-zero HW base vector %d or SW base vector %d\n",
+ vsi->vsi_num, vsi->hw_base_vector, vsi->sw_base_vector);
+ return -EEXIST;
+ }
+
+ if (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
+ return -ENOENT;
+
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ num_q_vectors = vsi->num_q_vectors;
+ /* reserve slots from OS requested IRQs */
+ vsi->sw_base_vector = ice_get_res(pf, pf->sw_irq_tracker,
+ num_q_vectors, vsi->idx);
+ if (vsi->sw_base_vector < 0) {
+ dev_err(&pf->pdev->dev,
+ "Failed to get tracking for %d SW vectors for VSI %d, err=%d\n",
+ num_q_vectors, vsi->vsi_num,
+ vsi->sw_base_vector);
+ return -ENOENT;
+ }
+ pf->num_avail_sw_msix -= num_q_vectors;
+
+ /* reserve slots from HW interrupts */
+ vsi->hw_base_vector = ice_get_res(pf, pf->hw_irq_tracker,
+ num_q_vectors, vsi->idx);
+ break;
+ case ICE_VSI_VF:
+ /* take VF misc vector and data vectors into account */
+ num_q_vectors = pf->num_vf_msix;
+ /* For VF VSI, reserve slots only from HW interrupts */
+ vsi->hw_base_vector = ice_get_res(pf, pf->hw_irq_tracker,
+ num_q_vectors, vsi->idx);
+ break;
+ default:
+ dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
+ vsi->type);
+ break;
+ }
+
+ if (vsi->hw_base_vector < 0) {
+ dev_err(&pf->pdev->dev,
+ "Failed to get tracking for %d HW vectors for VSI %d, err=%d\n",
+ num_q_vectors, vsi->vsi_num, vsi->hw_base_vector);
+ if (vsi->type != ICE_VSI_VF) {
+ ice_free_res(vsi->back->sw_irq_tracker,
+ vsi->sw_base_vector, vsi->idx);
+ pf->num_avail_sw_msix += num_q_vectors;
+ }
+ return -ENOENT;
+ }
+
+ pf->num_avail_hw_msix -= num_q_vectors;
+
+ return 0;
+}
+
+/**
+ * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
+ * @vsi: the VSI having rings deallocated
+ */
+static void ice_vsi_clear_rings(struct ice_vsi *vsi)
+{
+ int i;
+
+ if (vsi->tx_rings) {
+ for (i = 0; i < vsi->alloc_txq; i++) {
+ if (vsi->tx_rings[i]) {
+ kfree_rcu(vsi->tx_rings[i], rcu);
+ vsi->tx_rings[i] = NULL;
+ }
+ }
+ }
+ if (vsi->rx_rings) {
+ for (i = 0; i < vsi->alloc_rxq; i++) {
+ if (vsi->rx_rings[i]) {
+ kfree_rcu(vsi->rx_rings[i], rcu);
+ vsi->rx_rings[i] = NULL;
+ }
+ }
+ }
+}
+
+/**
+ * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI
+ * @vsi: VSI which is having rings allocated
+ */
+static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ int i;
+
+ /* Allocate tx_rings */
+ for (i = 0; i < vsi->alloc_txq; i++) {
+ struct ice_ring *ring;
+
+ /* allocate with kzalloc(), free with kfree_rcu() */
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+
+ if (!ring)
+ goto err_out;
+
+ ring->q_index = i;
+ ring->reg_idx = vsi->txq_map[i];
+ ring->ring_active = false;
+ ring->vsi = vsi;
+ ring->dev = &pf->pdev->dev;
+ ring->count = vsi->num_desc;
+ vsi->tx_rings[i] = ring;
+ }
+
+ /* Allocate rx_rings */
+ for (i = 0; i < vsi->alloc_rxq; i++) {
+ struct ice_ring *ring;
+
+ /* allocate with kzalloc(), free with kfree_rcu() */
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring)
+ goto err_out;
+
+ ring->q_index = i;
+ ring->reg_idx = vsi->rxq_map[i];
+ ring->ring_active = false;
+ ring->vsi = vsi;
+ ring->netdev = vsi->netdev;
+ ring->dev = &pf->pdev->dev;
+ ring->count = vsi->num_desc;
+ vsi->rx_rings[i] = ring;
+ }
+
+ return 0;
+
+err_out:
+ ice_vsi_clear_rings(vsi);
+ return -ENOMEM;
+}
+
+/**
+ * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
+ * @vsi: the VSI being configured
+ *
+ * This function maps descriptor rings to the queue-specific vectors allotted
+ * through the MSI-X enabling code. On a constrained vector budget, we map Tx
+ * and Rx rings to the vector as "efficiently" as possible.
+ */
+static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
+{
+ int q_vectors = vsi->num_q_vectors;
+ int tx_rings_rem, rx_rings_rem;
+ int v_id;
+
+ /* initially assigning remaining rings count to VSIs num queue value */
+ tx_rings_rem = vsi->num_txq;
+ rx_rings_rem = vsi->num_rxq;
+
+ for (v_id = 0; v_id < q_vectors; v_id++) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
+ int tx_rings_per_v, rx_rings_per_v, q_id, q_base;
+
+ /* Tx rings mapping to vector */
+ tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id);
+ q_vector->num_ring_tx = tx_rings_per_v;
+ q_vector->tx.ring = NULL;
+ q_vector->tx.itr_idx = ICE_TX_ITR;
+ q_base = vsi->num_txq - tx_rings_rem;
+
+ for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
+ struct ice_ring *tx_ring = vsi->tx_rings[q_id];
+
+ tx_ring->q_vector = q_vector;
+ tx_ring->next = q_vector->tx.ring;
+ q_vector->tx.ring = tx_ring;
+ }
+ tx_rings_rem -= tx_rings_per_v;
+
+ /* Rx rings mapping to vector */
+ rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id);
+ q_vector->num_ring_rx = rx_rings_per_v;
+ q_vector->rx.ring = NULL;
+ q_vector->rx.itr_idx = ICE_RX_ITR;
+ q_base = vsi->num_rxq - rx_rings_rem;
+
+ for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
+ struct ice_ring *rx_ring = vsi->rx_rings[q_id];
+
+ rx_ring->q_vector = q_vector;
+ rx_ring->next = q_vector->rx.ring;
+ q_vector->rx.ring = rx_ring;
+ }
+ rx_rings_rem -= rx_rings_per_v;
+ }
+}
+
+/**
+ * ice_vsi_manage_rss_lut - disable/enable RSS
+ * @vsi: the VSI being changed
+ * @ena: boolean value indicating if this is an enable or disable request
+ *
+ * In the event of disable request for RSS, this function will zero out RSS
+ * LUT, while in the event of enable request for RSS, it will reconfigure RSS
+ * LUT.
+ */
+int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
+{
+ int err = 0;
+ u8 *lut;
+
+ lut = devm_kzalloc(&vsi->back->pdev->dev, vsi->rss_table_size,
+ GFP_KERNEL);
+ if (!lut)
+ return -ENOMEM;
+
+ if (ena) {
+ if (vsi->rss_lut_user)
+ memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
+ else
+ ice_fill_rss_lut(lut, vsi->rss_table_size,
+ vsi->rss_size);
+ }
+
+ err = ice_set_rss(vsi, NULL, lut, vsi->rss_table_size);
+ devm_kfree(&vsi->back->pdev->dev, lut);
+ return err;
+}
+
+/**
+ * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI
+ * @vsi: VSI to be configured
+ */
+static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
+{
+ u8 seed[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE];
+ struct ice_aqc_get_set_rss_keys *key;
+ struct ice_pf *pf = vsi->back;
+ enum ice_status status;
+ int err = 0;
+ u8 *lut;
+
+ vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq);
+
+ lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL);
+ if (!lut)
+ return -ENOMEM;
+
+ if (vsi->rss_lut_user)
+ memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
+ else
+ ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
+
+ status = ice_aq_set_rss_lut(&pf->hw, vsi->idx, vsi->rss_lut_type, lut,
+ vsi->rss_table_size);
+
+ if (status) {
+ dev_err(&vsi->back->pdev->dev,
+ "set_rss_lut failed, error %d\n", status);
+ err = -EIO;
+ goto ice_vsi_cfg_rss_exit;
+ }
+
+ key = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*key), GFP_KERNEL);
+ if (!key) {
+ err = -ENOMEM;
+ goto ice_vsi_cfg_rss_exit;
+ }
+
+ if (vsi->rss_hkey_user)
+ memcpy(seed, vsi->rss_hkey_user,
+ ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
+ else
+ netdev_rss_key_fill((void *)seed,
+ ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
+ memcpy(&key->standard_rss_key, seed,
+ ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
+
+ status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key);
+
+ if (status) {
+ dev_err(&vsi->back->pdev->dev, "set_rss_key failed, error %d\n",
+ status);
+ err = -EIO;
+ }
+
+ devm_kfree(&pf->pdev->dev, key);
+ice_vsi_cfg_rss_exit:
+ devm_kfree(&pf->pdev->dev, lut);
+ return err;
+}
+
+/**
+ * ice_add_mac_to_list - Add a mac address filter entry to the list
+ * @vsi: the VSI to be forwarded to
+ * @add_list: pointer to the list which contains MAC filter entries
+ * @macaddr: the MAC address to be added.
+ *
+ * Adds mac address filter entry to the temp list
+ *
+ * Returns 0 on success or ENOMEM on failure.
+ */
+int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
+ const u8 *macaddr)
+{
+ struct ice_fltr_list_entry *tmp;
+ struct ice_pf *pf = vsi->back;
+
+ tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_ATOMIC);
+ if (!tmp)
+ return -ENOMEM;
+
+ tmp->fltr_info.flag = ICE_FLTR_TX;
+ tmp->fltr_info.src_id = ICE_SRC_ID_VSI;
+ tmp->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
+ tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
+ tmp->fltr_info.vsi_handle = vsi->idx;
+ ether_addr_copy(tmp->fltr_info.l_data.mac.mac_addr, macaddr);
+
+ INIT_LIST_HEAD(&tmp->list_entry);
+ list_add(&tmp->list_entry, add_list);
+
+ return 0;
+}
+
+/**
+ * ice_update_eth_stats - Update VSI-specific ethernet statistics counters
+ * @vsi: the VSI to be updated
+ */
+void ice_update_eth_stats(struct ice_vsi *vsi)
+{
+ struct ice_eth_stats *prev_es, *cur_es;
+ struct ice_hw *hw = &vsi->back->hw;
+ u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */
+
+ prev_es = &vsi->eth_stats_prev;
+ cur_es = &vsi->eth_stats;
+
+ ice_stat_update40(hw, GLV_GORCH(vsi_num), GLV_GORCL(vsi_num),
+ vsi->stat_offsets_loaded, &prev_es->rx_bytes,
+ &cur_es->rx_bytes);
+
+ ice_stat_update40(hw, GLV_UPRCH(vsi_num), GLV_UPRCL(vsi_num),
+ vsi->stat_offsets_loaded, &prev_es->rx_unicast,
+ &cur_es->rx_unicast);
+
+ ice_stat_update40(hw, GLV_MPRCH(vsi_num), GLV_MPRCL(vsi_num),
+ vsi->stat_offsets_loaded, &prev_es->rx_multicast,
+ &cur_es->rx_multicast);
+
+ ice_stat_update40(hw, GLV_BPRCH(vsi_num), GLV_BPRCL(vsi_num),
+ vsi->stat_offsets_loaded, &prev_es->rx_broadcast,
+ &cur_es->rx_broadcast);
+
+ ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded,
+ &prev_es->rx_discards, &cur_es->rx_discards);
+
+ ice_stat_update40(hw, GLV_GOTCH(vsi_num), GLV_GOTCL(vsi_num),
+ vsi->stat_offsets_loaded, &prev_es->tx_bytes,
+ &cur_es->tx_bytes);
+
+ ice_stat_update40(hw, GLV_UPTCH(vsi_num), GLV_UPTCL(vsi_num),
+ vsi->stat_offsets_loaded, &prev_es->tx_unicast,
+ &cur_es->tx_unicast);
+
+ ice_stat_update40(hw, GLV_MPTCH(vsi_num), GLV_MPTCL(vsi_num),
+ vsi->stat_offsets_loaded, &prev_es->tx_multicast,
+ &cur_es->tx_multicast);
+
+ ice_stat_update40(hw, GLV_BPTCH(vsi_num), GLV_BPTCL(vsi_num),
+ vsi->stat_offsets_loaded, &prev_es->tx_broadcast,
+ &cur_es->tx_broadcast);
+
+ ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded,
+ &prev_es->tx_errors, &cur_es->tx_errors);
+
+ vsi->stat_offsets_loaded = true;
+}
+
+/**
+ * ice_free_fltr_list - free filter lists helper
+ * @dev: pointer to the device struct
+ * @h: pointer to the list head to be freed
+ *
+ * Helper function to free filter lists previously created using
+ * ice_add_mac_to_list
+ */
+void ice_free_fltr_list(struct device *dev, struct list_head *h)
+{
+ struct ice_fltr_list_entry *e, *tmp;
+
+ list_for_each_entry_safe(e, tmp, h, list_entry) {
+ list_del(&e->list_entry);
+ devm_kfree(dev, e);
+ }
+}
+
+/**
+ * ice_vsi_add_vlan - Add VSI membership for given VLAN
+ * @vsi: the VSI being configured
+ * @vid: VLAN id to be added
+ */
+int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
+{
+ struct ice_fltr_list_entry *tmp;
+ struct ice_pf *pf = vsi->back;
+ LIST_HEAD(tmp_add_list);
+ enum ice_status status;
+ int err = 0;
+
+ tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ tmp->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
+ tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
+ tmp->fltr_info.flag = ICE_FLTR_TX;
+ tmp->fltr_info.src_id = ICE_SRC_ID_VSI;
+ tmp->fltr_info.vsi_handle = vsi->idx;
+ tmp->fltr_info.l_data.vlan.vlan_id = vid;
+
+ INIT_LIST_HEAD(&tmp->list_entry);
+ list_add(&tmp->list_entry, &tmp_add_list);
+
+ status = ice_add_vlan(&pf->hw, &tmp_add_list);
+ if (status) {
+ err = -ENODEV;
+ dev_err(&pf->pdev->dev, "Failure Adding VLAN %d on VSI %i\n",
+ vid, vsi->vsi_num);
+ }
+
+ ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+ return err;
+}
+
+/**
+ * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN
+ * @vsi: the VSI being configured
+ * @vid: VLAN id to be removed
+ *
+ * Returns 0 on success and negative on failure
+ */
+int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
+{
+ struct ice_fltr_list_entry *list;
+ struct ice_pf *pf = vsi->back;
+ LIST_HEAD(tmp_add_list);
+ int status = 0;
+
+ list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
+ if (!list)
+ return -ENOMEM;
+
+ list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
+ list->fltr_info.vsi_handle = vsi->idx;
+ list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
+ list->fltr_info.l_data.vlan.vlan_id = vid;
+ list->fltr_info.flag = ICE_FLTR_TX;
+ list->fltr_info.src_id = ICE_SRC_ID_VSI;
+
+ INIT_LIST_HEAD(&list->list_entry);
+ list_add(&list->list_entry, &tmp_add_list);
+
+ if (ice_remove_vlan(&pf->hw, &tmp_add_list)) {
+ dev_err(&pf->pdev->dev, "Error removing VLAN %d on vsi %i\n",
+ vid, vsi->vsi_num);
+ status = -EIO;
+ }
+
+ ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+ return status;
+}
+
+/**
+ * ice_vsi_cfg_rxqs - Configure the VSI for Rx
+ * @vsi: the VSI being configured
+ *
+ * Return 0 on success and a negative value on error
+ * Configure the Rx VSI for operation.
+ */
+int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
+{
+ int err = 0;
+ u16 i;
+
+ if (vsi->type == ICE_VSI_VF)
+ goto setup_rings;
+
+ if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN)
+ vsi->max_frame = vsi->netdev->mtu +
+ ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ else
+ vsi->max_frame = ICE_RXBUF_2048;
+
+ vsi->rx_buf_len = ICE_RXBUF_2048;
+setup_rings:
+ /* set up individual rings */
+ for (i = 0; i < vsi->num_rxq && !err; i++)
+ err = ice_setup_rx_ctx(vsi->rx_rings[i]);
+
+ if (err) {
+ dev_err(&vsi->back->pdev->dev, "ice_setup_rx_ctx failed\n");
+ return -EIO;
+ }
+ return err;
+}
+
+/**
+ * ice_vsi_cfg_txqs - Configure the VSI for Tx
+ * @vsi: the VSI being configured
+ *
+ * Return 0 on success and a negative value on error
+ * Configure the Tx VSI for operation.
+ */
+int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
+{
+ struct ice_aqc_add_tx_qgrp *qg_buf;
+ struct ice_aqc_add_txqs_perq *txq;
+ struct ice_pf *pf = vsi->back;
+ enum ice_status status;
+ u16 buf_len, i, pf_q;
+ int err = 0, tc = 0;
+ u8 num_q_grps;
+
+ buf_len = sizeof(struct ice_aqc_add_tx_qgrp);
+ qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL);
+ if (!qg_buf)
+ return -ENOMEM;
+
+ if (vsi->num_txq > ICE_MAX_TXQ_PER_TXQG) {
+ err = -EINVAL;
+ goto err_cfg_txqs;
+ }
+ qg_buf->num_txqs = 1;
+ num_q_grps = 1;
+
+ /* set up and configure the Tx queues */
+ ice_for_each_txq(vsi, i) {
+ struct ice_tlan_ctx tlan_ctx = { 0 };
+
+ pf_q = vsi->txq_map[i];
+ ice_setup_tx_ctx(vsi->tx_rings[i], &tlan_ctx, pf_q);
+ /* copy context contents into the qg_buf */
+ qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
+ ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
+ ice_tlan_ctx_info);
+
+ /* init queue specific tail reg. It is referred as transmit
+ * comm scheduler queue doorbell.
+ */
+ vsi->tx_rings[i]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
+ status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
+ num_q_grps, qg_buf, buf_len, NULL);
+ if (status) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to set LAN Tx queue context, error: %d\n",
+ status);
+ err = -ENODEV;
+ goto err_cfg_txqs;
+ }
+
+ /* Add Tx Queue TEID into the VSI Tx ring from the response
+ * This will complete configuring and enabling the queue.
+ */
+ txq = &qg_buf->txqs[0];
+ if (pf_q == le16_to_cpu(txq->txq_id))
+ vsi->tx_rings[i]->txq_teid =
+ le32_to_cpu(txq->q_teid);
+ }
+err_cfg_txqs:
+ devm_kfree(&pf->pdev->dev, qg_buf);
+ return err;
+}
+
+/**
+ * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
+ * @intrl: interrupt rate limit in usecs
+ * @gran: interrupt rate limit granularity in usecs
+ *
+ * This function converts a decimal interrupt rate limit in usecs to the format
+ * expected by firmware.
+ */
+static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
+{
+ u32 val = intrl / gran;
+
+ if (val)
+ return val | GLINT_RATE_INTRL_ENA_M;
+ return 0;
+}
+
+/**
+ * ice_cfg_itr - configure the initial interrupt throttle values
+ * @hw: pointer to the HW structure
+ * @q_vector: interrupt vector that's being configured
+ * @vector: HW vector index to apply the interrupt throttling to
+ *
+ * Configure interrupt throttling values for the ring containers that are
+ * associated with the interrupt vector passed in.
+ */
+static void
+ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector)
+{
+ u8 itr_gran = hw->itr_gran;
+
+ if (q_vector->num_ring_rx) {
+ struct ice_ring_container *rc = &q_vector->rx;
+
+ rc->itr = ITR_TO_REG(ICE_DFLT_RX_ITR, itr_gran);
+ rc->latency_range = ICE_LOW_LATENCY;
+ wr32(hw, GLINT_ITR(rc->itr_idx, vector), rc->itr);
+ }
+
+ if (q_vector->num_ring_tx) {
+ struct ice_ring_container *rc = &q_vector->tx;
+
+ rc->itr = ITR_TO_REG(ICE_DFLT_TX_ITR, itr_gran);
+ rc->latency_range = ICE_LOW_LATENCY;
+ wr32(hw, GLINT_ITR(rc->itr_idx, vector), rc->itr);
+ }
+}
+
+/**
+ * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
+ * @vsi: the VSI being configured
+ */
+void ice_vsi_cfg_msix(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ u16 vector = vsi->hw_base_vector;
+ struct ice_hw *hw = &pf->hw;
+ u32 txq = 0, rxq = 0;
+ int i, q;
+
+ for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[i];
+
+ ice_cfg_itr(hw, q_vector, vector);
+
+ wr32(hw, GLINT_RATE(vector),
+ ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
+
+ /* Both Transmit Queue Interrupt Cause Control register
+ * and Receive Queue Interrupt Cause control register
+ * expects MSIX_INDX field to be the vector index
+ * within the function space and not the absolute
+ * vector index across PF or across device.
+ * For SR-IOV VF VSIs queue vector index always starts
+ * with 1 since first vector index(0) is used for OICR
+ * in VF space. Since VMDq and other PF VSIs are within
+ * the PF function space, use the vector index that is
+ * tracked for this PF.
+ */
+ for (q = 0; q < q_vector->num_ring_tx; q++) {
+ int itr_idx = q_vector->tx.itr_idx;
+ u32 val;
+
+ if (vsi->type == ICE_VSI_VF)
+ val = QINT_TQCTL_CAUSE_ENA_M |
+ (itr_idx << QINT_TQCTL_ITR_INDX_S) |
+ ((i + 1) << QINT_TQCTL_MSIX_INDX_S);
+ else
+ val = QINT_TQCTL_CAUSE_ENA_M |
+ (itr_idx << QINT_TQCTL_ITR_INDX_S) |
+ (vector << QINT_TQCTL_MSIX_INDX_S);
+ wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
+ txq++;
+ }
+
+ for (q = 0; q < q_vector->num_ring_rx; q++) {
+ int itr_idx = q_vector->rx.itr_idx;
+ u32 val;
+
+ if (vsi->type == ICE_VSI_VF)
+ val = QINT_RQCTL_CAUSE_ENA_M |
+ (itr_idx << QINT_RQCTL_ITR_INDX_S) |
+ ((i + 1) << QINT_RQCTL_MSIX_INDX_S);
+ else
+ val = QINT_RQCTL_CAUSE_ENA_M |
+ (itr_idx << QINT_RQCTL_ITR_INDX_S) |
+ (vector << QINT_RQCTL_MSIX_INDX_S);
+ wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
+ rxq++;
+ }
+ }
+
+ ice_flush(hw);
+}
+
+/**
+ * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx
+ * @vsi: the VSI being changed
+ */
+int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
+{
+ struct device *dev = &vsi->back->pdev->dev;
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx ctxt = { 0 };
+ enum ice_status status;
+
+ /* Here we are configuring the VSI to let the driver add VLAN tags by
+ * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag
+ * insertion happens in the Tx hot path, in ice_tx_map.
+ */
+ ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
+
+ ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
+
+ status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
+ if (status) {
+ dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
+ status, hw->adminq.sq_last_status);
+ return -EIO;
+ }
+
+ vsi->info.vlan_flags = ctxt.info.vlan_flags;
+ return 0;
+}
+
+/**
+ * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx
+ * @vsi: the VSI being changed
+ * @ena: boolean value indicating if this is a enable or disable request
+ */
+int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
+{
+ struct device *dev = &vsi->back->pdev->dev;
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx ctxt = { 0 };
+ enum ice_status status;
+
+ /* Here we are configuring what the VSI should do with the VLAN tag in
+ * the Rx packet. We can either leave the tag in the packet or put it in
+ * the Rx descriptor.
+ */
+ if (ena) {
+ /* Strip VLAN tag from Rx packet and put it in the desc */
+ ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
+ } else {
+ /* Disable stripping. Leave tag in packet */
+ ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
+ }
+
+ /* Allow all packets untagged/tagged */
+ ctxt.info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
+
+ ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
+
+ status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
+ if (status) {
+ dev_err(dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n",
+ ena, status, hw->adminq.sq_last_status);
+ return -EIO;
+ }
+
+ vsi->info.vlan_flags = ctxt.info.vlan_flags;
+ return 0;
+}
+
+/**
+ * ice_vsi_start_rx_rings - start VSI's Rx rings
+ * @vsi: the VSI whose rings are to be started
+ *
+ * Returns 0 on success and a negative value on error
+ */
+int ice_vsi_start_rx_rings(struct ice_vsi *vsi)
+{
+ return ice_vsi_ctrl_rx_rings(vsi, true);
+}
+
+/**
+ * ice_vsi_stop_rx_rings - stop VSI's Rx rings
+ * @vsi: the VSI
+ *
+ * Returns 0 on success and a negative value on error
+ */
+int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
+{
+ return ice_vsi_ctrl_rx_rings(vsi, false);
+}
+
+/**
+ * ice_vsi_stop_tx_rings - Disable Tx rings
+ * @vsi: the VSI being configured
+ * @rst_src: reset source
+ * @rel_vmvf_num: Relative id of VF/VM
+ */
+int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+ u16 rel_vmvf_num)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ u32 *q_teids, val;
+ u16 *q_ids, i;
+ int err = 0;
+
+ if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
+ return -EINVAL;
+
+ q_teids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_teids),
+ GFP_KERNEL);
+ if (!q_teids)
+ return -ENOMEM;
+
+ q_ids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_ids),
+ GFP_KERNEL);
+ if (!q_ids) {
+ err = -ENOMEM;
+ goto err_alloc_q_ids;
+ }
+
+ /* set up the Tx queue list to be disabled */
+ ice_for_each_txq(vsi, i) {
+ u16 v_idx;
+
+ if (!vsi->tx_rings || !vsi->tx_rings[i]) {
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ q_ids[i] = vsi->txq_map[i];
+ q_teids[i] = vsi->tx_rings[i]->txq_teid;
+
+ /* clear cause_ena bit for disabled queues */
+ val = rd32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
+ val &= ~QINT_TQCTL_CAUSE_ENA_M;
+ wr32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
+
+ /* software is expected to wait for 100 ns */
+ ndelay(100);
+
+ /* trigger a software interrupt for the vector associated to
+ * the queue to schedule NAPI handler
+ */
+ v_idx = vsi->tx_rings[i]->q_vector->v_idx;
+ wr32(hw, GLINT_DYN_CTL(vsi->hw_base_vector + v_idx),
+ GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M);
+ }
+ status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids,
+ rst_src, rel_vmvf_num, NULL);
+ /* if the disable queue command was exercised during an active reset
+ * flow, ICE_ERR_RESET_ONGOING is returned. This is not an error as
+ * the reset operation disables queues at the hardware level anyway.
+ */
+ if (status == ICE_ERR_RESET_ONGOING) {
+ dev_info(&pf->pdev->dev,
+ "Reset in progress. LAN Tx queues already disabled\n");
+ } else if (status) {
+ dev_err(&pf->pdev->dev,
+ "Failed to disable LAN Tx queues, error: %d\n",
+ status);
+ err = -ENODEV;
+ }
+
+err_out:
+ devm_kfree(&pf->pdev->dev, q_ids);
+
+err_alloc_q_ids:
+ devm_kfree(&pf->pdev->dev, q_teids);
+
+ return err;
+}
+
+/**
+ * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
+ * @vsi: VSI to enable or disable VLAN pruning on
+ * @ena: set to true to enable VLAN pruning and false to disable it
+ *
+ * returns 0 if VSI is updated, negative otherwise
+ */
+int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
+{
+ struct ice_vsi_ctx *ctxt;
+ struct device *dev;
+ int status;
+
+ if (!vsi)
+ return -EINVAL;
+
+ dev = &vsi->back->pdev->dev;
+ ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ctxt->info = vsi->info;
+
+ if (ena) {
+ ctxt->info.sec_flags |=
+ ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
+ ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+ } else {
+ ctxt->info.sec_flags &=
+ ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
+ ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+ }
+
+ ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
+
+ status = ice_update_vsi(&vsi->back->hw, vsi->idx, ctxt, NULL);
+ if (status) {
+ netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n",
+ ena ? "Ena" : "Dis", vsi->idx, vsi->vsi_num, status,
+ vsi->back->hw.adminq.sq_last_status);
+ goto err_out;
+ }
+
+ vsi->info.sec_flags = ctxt->info.sec_flags;
+ vsi->info.sw_flags2 = ctxt->info.sw_flags2;
+
+ devm_kfree(dev, ctxt);
+ return 0;
+
+err_out:
+ devm_kfree(dev, ctxt);
+ return -EIO;
+}
+
+/**
+ * ice_vsi_setup - Set up a VSI by a given type
+ * @pf: board private structure
+ * @pi: pointer to the port_info instance
+ * @type: VSI type
+ * @vf_id: defines VF id to which this VSI connects. This field is meant to be
+ * used only for ICE_VSI_VF VSI type. For other VSI types, should
+ * fill-in ICE_INVAL_VFID as input.
+ *
+ * This allocates the sw VSI structure and its queue resources.
+ *
+ * Returns pointer to the successfully allocated and configured VSI sw struct on
+ * success, NULL on failure.
+ */
+struct ice_vsi *
+ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
+ enum ice_vsi_type type, u16 vf_id)
+{
+ u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ struct device *dev = &pf->pdev->dev;
+ struct ice_vsi *vsi;
+ int ret, i;
+
+ vsi = ice_vsi_alloc(pf, type);
+ if (!vsi) {
+ dev_err(dev, "could not allocate VSI\n");
+ return NULL;
+ }
+
+ vsi->port_info = pi;
+ vsi->vsw = pf->first_sw;
+ if (vsi->type == ICE_VSI_VF)
+ vsi->vf_id = vf_id;
+
+ if (ice_vsi_get_qs(vsi)) {
+ dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
+ vsi->idx);
+ goto unroll_get_qs;
+ }
+
+ /* set RSS capabilities */
+ ice_vsi_set_rss_params(vsi);
+
+ /* create the VSI */
+ ret = ice_vsi_init(vsi);
+ if (ret)
+ goto unroll_get_qs;
+
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ ret = ice_vsi_alloc_q_vectors(vsi);
+ if (ret)
+ goto unroll_vsi_init;
+
+ ret = ice_vsi_setup_vector_base(vsi);
+ if (ret)
+ goto unroll_alloc_q_vector;
+
+ ret = ice_vsi_alloc_rings(vsi);
+ if (ret)
+ goto unroll_vector_base;
+
+ ice_vsi_map_rings_to_vectors(vsi);
+
+ /* Do not exit if configuring RSS had an issue, at least
+ * receive traffic on first queue. Hence no need to capture
+ * return value
+ */
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
+ ice_vsi_cfg_rss_lut_key(vsi);
+ break;
+ case ICE_VSI_VF:
+ /* VF driver will take care of creating netdev for this type and
+ * map queues to vectors through Virtchnl, PF driver only
+ * creates a VSI and corresponding structures for bookkeeping
+ * purpose
+ */
+ ret = ice_vsi_alloc_q_vectors(vsi);
+ if (ret)
+ goto unroll_vsi_init;
+
+ ret = ice_vsi_alloc_rings(vsi);
+ if (ret)
+ goto unroll_alloc_q_vector;
+
+ /* Setup Vector base only during VF init phase or when VF asks
+ * for more vectors than assigned number. In all other cases,
+ * assign hw_base_vector to the value given earlier.
+ */
+ if (test_bit(ICE_VF_STATE_CFG_INTR, pf->vf[vf_id].vf_states)) {
+ ret = ice_vsi_setup_vector_base(vsi);
+ if (ret)
+ goto unroll_vector_base;
+ } else {
+ vsi->hw_base_vector = pf->vf[vf_id].first_vector_idx;
+ }
+ pf->q_left_tx -= vsi->alloc_txq;
+ pf->q_left_rx -= vsi->alloc_rxq;
+ break;
+ default:
+ /* if VSI type is not recognized, clean up the resources and
+ * exit
+ */
+ goto unroll_vsi_init;
+ }
+
+ ice_vsi_set_tc_cfg(vsi);
+
+ /* configure VSI nodes based on number of queues and TC's */
+ for (i = 0; i < vsi->tc_cfg.numtc; i++)
+ max_txqs[i] = vsi->num_txq;
+
+ ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
+ max_txqs);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "Failed VSI lan queue config\n");
+ goto unroll_vector_base;
+ }
+
+ return vsi;
+
+unroll_vector_base:
+ /* reclaim SW interrupts back to the common pool */
+ ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
+ pf->num_avail_sw_msix += vsi->num_q_vectors;
+ /* reclaim HW interrupt back to the common pool */
+ ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx);
+ pf->num_avail_hw_msix += vsi->num_q_vectors;
+unroll_alloc_q_vector:
+ ice_vsi_free_q_vectors(vsi);
+unroll_vsi_init:
+ ice_vsi_delete(vsi);
+unroll_get_qs:
+ ice_vsi_put_qs(vsi);
+ pf->q_left_tx += vsi->alloc_txq;
+ pf->q_left_rx += vsi->alloc_rxq;
+ ice_vsi_clear(vsi);
+
+ return NULL;
+}
+
+/**
+ * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW
+ * @vsi: the VSI being cleaned up
+ */
+static void ice_vsi_release_msix(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ u16 vector = vsi->hw_base_vector;
+ struct ice_hw *hw = &pf->hw;
+ u32 txq = 0;
+ u32 rxq = 0;
+ int i, q;
+
+ for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[i];
+
+ wr32(hw, GLINT_ITR(ICE_IDX_ITR0, vector), 0);
+ wr32(hw, GLINT_ITR(ICE_IDX_ITR1, vector), 0);
+ for (q = 0; q < q_vector->num_ring_tx; q++) {
+ wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
+ txq++;
+ }
+
+ for (q = 0; q < q_vector->num_ring_rx; q++) {
+ wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);
+ rxq++;
+ }
+ }
+
+ ice_flush(hw);
+}
+
+/**
+ * ice_vsi_free_irq - Free the IRQ association with the OS
+ * @vsi: the VSI being configured
+ */
+void ice_vsi_free_irq(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ int base = vsi->sw_base_vector;
+
+ if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
+ int i;
+
+ if (!vsi->q_vectors || !vsi->irqs_ready)
+ return;
+
+ ice_vsi_release_msix(vsi);
+ if (vsi->type == ICE_VSI_VF)
+ return;
+
+ vsi->irqs_ready = false;
+ for (i = 0; i < vsi->num_q_vectors; i++) {
+ u16 vector = i + base;
+ int irq_num;
+
+ irq_num = pf->msix_entries[vector].vector;
+
+ /* free only the irqs that were actually requested */
+ if (!vsi->q_vectors[i] ||
+ !(vsi->q_vectors[i]->num_ring_tx ||
+ vsi->q_vectors[i]->num_ring_rx))
+ continue;
+
+ /* clear the affinity notifier in the IRQ descriptor */
+ irq_set_affinity_notifier(irq_num, NULL);
+
+ /* clear the affinity_mask in the IRQ descriptor */
+ irq_set_affinity_hint(irq_num, NULL);
+ synchronize_irq(irq_num);
+ devm_free_irq(&pf->pdev->dev, irq_num,
+ vsi->q_vectors[i]);
+ }
+ }
+}
+
+/**
+ * ice_vsi_free_tx_rings - Free Tx resources for VSI queues
+ * @vsi: the VSI having resources freed
+ */
+void ice_vsi_free_tx_rings(struct ice_vsi *vsi)
+{
+ int i;
+
+ if (!vsi->tx_rings)
+ return;
+
+ ice_for_each_txq(vsi, i)
+ if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
+ ice_free_tx_ring(vsi->tx_rings[i]);
+}
+
+/**
+ * ice_vsi_free_rx_rings - Free Rx resources for VSI queues
+ * @vsi: the VSI having resources freed
+ */
+void ice_vsi_free_rx_rings(struct ice_vsi *vsi)
+{
+ int i;
+
+ if (!vsi->rx_rings)
+ return;
+
+ ice_for_each_rxq(vsi, i)
+ if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
+ ice_free_rx_ring(vsi->rx_rings[i]);
+}
+
+/**
+ * ice_vsi_close - Shut down a VSI
+ * @vsi: the VSI being shut down
+ */
+void ice_vsi_close(struct ice_vsi *vsi)
+{
+ if (!test_and_set_bit(__ICE_DOWN, vsi->state))
+ ice_down(vsi);
+
+ ice_vsi_free_irq(vsi);
+ ice_vsi_free_tx_rings(vsi);
+ ice_vsi_free_rx_rings(vsi);
+}
+
+/**
+ * ice_free_res - free a block of resources
+ * @res: pointer to the resource
+ * @index: starting index previously returned by ice_get_res
+ * @id: identifier to track owner
+ *
+ * Returns number of resources freed
+ */
+int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
+{
+ int count = 0;
+ int i;
+
+ if (!res || index >= res->num_entries)
+ return -EINVAL;
+
+ id |= ICE_RES_VALID_BIT;
+ for (i = index; i < res->num_entries && res->list[i] == id; i++) {
+ res->list[i] = 0;
+ count++;
+ }
+
+ return count;
+}
+
+/**
+ * ice_search_res - Search the tracker for a block of resources
+ * @res: pointer to the resource
+ * @needed: size of the block needed
+ * @id: identifier to track owner
+ *
+ * Returns the base item index of the block, or -ENOMEM for error
+ */
+static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
+{
+ int start = res->search_hint;
+ int end = start;
+
+ if ((start + needed) > res->num_entries)
+ return -ENOMEM;
+
+ id |= ICE_RES_VALID_BIT;
+
+ do {
+ /* skip already allocated entries */
+ if (res->list[end++] & ICE_RES_VALID_BIT) {
+ start = end;
+ if ((start + needed) > res->num_entries)
+ break;
+ }
+
+ if (end == (start + needed)) {
+ int i = start;
+
+ /* there was enough, so assign it to the requestor */
+ while (i != end)
+ res->list[i++] = id;
+
+ if (end == res->num_entries)
+ end = 0;
+
+ res->search_hint = end;
+ return start;
+ }
+ } while (1);
+
+ return -ENOMEM;
+}
+
+/**
+ * ice_get_res - get a block of resources
+ * @pf: board private structure
+ * @res: pointer to the resource
+ * @needed: size of the block needed
+ * @id: identifier to track owner
+ *
+ * Returns the base item index of the block, or -ENOMEM for error
+ * The search_hint trick and lack of advanced fit-finding only works
+ * because we're highly likely to have all the same sized requests.
+ * Linear search time and any fragmentation should be minimal.
+ */
+int
+ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
+{
+ int ret;
+
+ if (!res || !pf)
+ return -EINVAL;
+
+ if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
+ dev_err(&pf->pdev->dev,
+ "param err: needed=%d, num_entries = %d id=0x%04x\n",
+ needed, res->num_entries, id);
+ return -EINVAL;
+ }
+
+ /* search based on search_hint */
+ ret = ice_search_res(res, needed, id);
+
+ if (ret < 0) {
+ /* previous search failed. Reset search hint and try again */
+ res->search_hint = 0;
+ ret = ice_search_res(res, needed, id);
+ }
+
+ return ret;
+}
+
+/**
+ * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
+ * @vsi: the VSI being un-configured
+ */
+void ice_vsi_dis_irq(struct ice_vsi *vsi)
+{
+ int base = vsi->sw_base_vector;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ u32 val;
+ int i;
+
+ /* disable interrupt causation from each queue */
+ if (vsi->tx_rings) {
+ ice_for_each_txq(vsi, i) {
+ if (vsi->tx_rings[i]) {
+ u16 reg;
+
+ reg = vsi->tx_rings[i]->reg_idx;
+ val = rd32(hw, QINT_TQCTL(reg));
+ val &= ~QINT_TQCTL_CAUSE_ENA_M;
+ wr32(hw, QINT_TQCTL(reg), val);
+ }
+ }
+ }
+
+ if (vsi->rx_rings) {
+ ice_for_each_rxq(vsi, i) {
+ if (vsi->rx_rings[i]) {
+ u16 reg;
+
+ reg = vsi->rx_rings[i]->reg_idx;
+ val = rd32(hw, QINT_RQCTL(reg));
+ val &= ~QINT_RQCTL_CAUSE_ENA_M;
+ wr32(hw, QINT_RQCTL(reg), val);
+ }
+ }
+ }
+
+ /* disable each interrupt */
+ if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
+ for (i = vsi->hw_base_vector;
+ i < (vsi->num_q_vectors + vsi->hw_base_vector); i++)
+ wr32(hw, GLINT_DYN_CTL(i), 0);
+
+ ice_flush(hw);
+ for (i = 0; i < vsi->num_q_vectors; i++)
+ synchronize_irq(pf->msix_entries[i + base].vector);
+ }
+}
+
+/**
+ * ice_vsi_release - Delete a VSI and free its resources
+ * @vsi: the VSI being removed
+ *
+ * Returns 0 on success or < 0 on error
+ */
+int ice_vsi_release(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf;
+ struct ice_vf *vf;
+
+ if (!vsi->back)
+ return -ENODEV;
+ pf = vsi->back;
+ vf = &pf->vf[vsi->vf_id];
+ /* do not unregister and free netdevs while driver is in the reset
+ * recovery pending state. Since reset/rebuild happens through PF
+ * service task workqueue, its not a good idea to unregister netdev
+ * that is associated to the PF that is running the work queue items
+ * currently. This is done to avoid check_flush_dependency() warning
+ * on this wq
+ */
+ if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) {
+ unregister_netdev(vsi->netdev);
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ }
+
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
+ ice_rss_clean(vsi);
+
+ /* Disable VSI and free resources */
+ ice_vsi_dis_irq(vsi);
+ ice_vsi_close(vsi);
+
+ /* reclaim interrupt vectors back to PF */
+ if (vsi->type != ICE_VSI_VF) {
+ /* reclaim SW interrupts back to the common pool */
+ ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector,
+ vsi->idx);
+ pf->num_avail_sw_msix += vsi->num_q_vectors;
+ /* reclaim HW interrupts back to the common pool */
+ ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector,
+ vsi->idx);
+ pf->num_avail_hw_msix += vsi->num_q_vectors;
+ } else if (test_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states)) {
+ /* Reclaim VF resources back only while freeing all VFs or
+ * vector reassignment is requested
+ */
+ ice_free_res(vsi->back->hw_irq_tracker, vf->first_vector_idx,
+ vsi->idx);
+ pf->num_avail_hw_msix += pf->num_vf_msix;
+ }
+
+ ice_remove_vsi_fltr(&pf->hw, vsi->idx);
+ ice_vsi_delete(vsi);
+ ice_vsi_free_q_vectors(vsi);
+ ice_vsi_clear_rings(vsi);
+
+ ice_vsi_put_qs(vsi);
+ pf->q_left_tx += vsi->alloc_txq;
+ pf->q_left_rx += vsi->alloc_rxq;
+
+ /* retain SW VSI data structure since it is needed to unregister and
+ * free VSI netdev when PF is not in reset recovery pending state,\
+ * for ex: during rmmod.
+ */
+ if (!ice_is_reset_in_progress(pf->state))
+ ice_vsi_clear(vsi);
+
+ return 0;
+}
+
+/**
+ * ice_vsi_rebuild - Rebuild VSI after reset
+ * @vsi: VSI to be rebuild
+ *
+ * Returns 0 on success and negative value on failure
+ */
+int ice_vsi_rebuild(struct ice_vsi *vsi)
+{
+ u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ int ret, i;
+
+ if (!vsi)
+ return -EINVAL;
+
+ ice_vsi_free_q_vectors(vsi);
+ ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
+ ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx);
+ vsi->sw_base_vector = 0;
+ vsi->hw_base_vector = 0;
+ ice_vsi_clear_rings(vsi);
+ ice_vsi_free_arrays(vsi, false);
+ ice_dev_onetime_setup(&vsi->back->hw);
+ ice_vsi_set_num_qs(vsi);
+
+ /* Initialize VSI struct elements and create VSI in FW */
+ ret = ice_vsi_init(vsi);
+ if (ret < 0)
+ goto err_vsi;
+
+ ret = ice_vsi_alloc_arrays(vsi, false);
+ if (ret < 0)
+ goto err_vsi;
+
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ ret = ice_vsi_alloc_q_vectors(vsi);
+ if (ret)
+ goto err_rings;
+
+ ret = ice_vsi_setup_vector_base(vsi);
+ if (ret)
+ goto err_vectors;
+
+ ret = ice_vsi_alloc_rings(vsi);
+ if (ret)
+ goto err_vectors;
+
+ ice_vsi_map_rings_to_vectors(vsi);
+ break;
+ case ICE_VSI_VF:
+ ret = ice_vsi_alloc_q_vectors(vsi);
+ if (ret)
+ goto err_rings;
+
+ ret = ice_vsi_setup_vector_base(vsi);
+ if (ret)
+ goto err_vectors;
+
+ ret = ice_vsi_alloc_rings(vsi);
+ if (ret)
+ goto err_vectors;
+
+ vsi->back->q_left_tx -= vsi->alloc_txq;
+ vsi->back->q_left_rx -= vsi->alloc_rxq;
+ break;
+ default:
+ break;
+ }
+
+ ice_vsi_set_tc_cfg(vsi);
+
+ /* configure VSI nodes based on number of queues and TC's */
+ for (i = 0; i < vsi->tc_cfg.numtc; i++)
+ max_txqs[i] = vsi->num_txq;
+
+ ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
+ max_txqs);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "Failed VSI lan queue config\n");
+ goto err_vectors;
+ }
+ return 0;
+
+err_vectors:
+ ice_vsi_free_q_vectors(vsi);
+err_rings:
+ if (vsi->netdev) {
+ vsi->current_netdev_flags = 0;
+ unregister_netdev(vsi->netdev);
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ }
+err_vsi:
+ ice_vsi_clear(vsi);
+ set_bit(__ICE_RESET_FAILED, vsi->back->state);
+ return ret;
+}
+
+/**
+ * ice_is_reset_in_progress - check for a reset in progress
+ * @state: pf state field
+ */
+bool ice_is_reset_in_progress(unsigned long *state)
+{
+ return test_bit(__ICE_RESET_OICR_RECV, state) ||
+ test_bit(__ICE_PFR_REQ, state) ||
+ test_bit(__ICE_CORER_REQ, state) ||
+ test_bit(__ICE_GLOBR_REQ, state);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
new file mode 100644
index 000000000000..3831b4f0960a
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Intel Corporation. */
+
+#ifndef _ICE_LIB_H_
+#define _ICE_LIB_H_
+
+#include "ice.h"
+
+int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
+ const u8 *macaddr);
+
+void ice_free_fltr_list(struct device *dev, struct list_head *h);
+
+void ice_update_eth_stats(struct ice_vsi *vsi);
+
+int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
+
+int ice_vsi_cfg_txqs(struct ice_vsi *vsi);
+
+void ice_vsi_cfg_msix(struct ice_vsi *vsi);
+
+int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid);
+
+int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid);
+
+int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi);
+
+int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena);
+
+int ice_vsi_start_rx_rings(struct ice_vsi *vsi);
+
+int ice_vsi_stop_rx_rings(struct ice_vsi *vsi);
+
+int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+ u16 rel_vmvf_num);
+
+int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena);
+
+void ice_vsi_delete(struct ice_vsi *vsi);
+
+int ice_vsi_clear(struct ice_vsi *vsi);
+
+struct ice_vsi *
+ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
+ enum ice_vsi_type type, u16 vf_id);
+
+int ice_vsi_release(struct ice_vsi *vsi);
+
+void ice_vsi_close(struct ice_vsi *vsi);
+
+int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id);
+
+int
+ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id);
+
+int ice_vsi_rebuild(struct ice_vsi *vsi);
+
+bool ice_is_reset_in_progress(unsigned long *state);
+
+void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
+
+void ice_vsi_put_qs(struct ice_vsi *vsi);
+
+void ice_vsi_dis_irq(struct ice_vsi *vsi);
+
+void ice_vsi_free_irq(struct ice_vsi *vsi);
+
+void ice_vsi_free_rx_rings(struct ice_vsi *vsi);
+
+void ice_vsi_free_tx_rings(struct ice_vsi *vsi);
+
+int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
+
+int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
+
+#endif /* !_ICE_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 3f047bb43348..05993451147a 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -6,8 +6,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "ice.h"
+#include "ice_lib.h"
-#define DRV_VERSION "ice-0.7.0-k"
+#define DRV_VERSION "0.7.2-k"
#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
const char ice_drv_ver[] = DRV_VERSION;
static const char ice_driver_string[] = DRV_SUMMARY;
@@ -15,7 +16,7 @@ static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION(DRV_SUMMARY);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION);
static int debug = -1;
@@ -31,173 +32,84 @@ static const struct net_device_ops ice_netdev_ops;
static void ice_pf_dis_all_vsi(struct ice_pf *pf);
static void ice_rebuild(struct ice_pf *pf);
-static int ice_vsi_release(struct ice_vsi *vsi);
+
+static void ice_vsi_release_all(struct ice_pf *pf);
static void ice_update_vsi_stats(struct ice_vsi *vsi);
static void ice_update_pf_stats(struct ice_pf *pf);
/**
- * ice_get_free_slot - get the next non-NULL location index in array
- * @array: array to search
- * @size: size of the array
- * @curr: last known occupied index to be used as a search hint
- *
- * void * is being used to keep the functionality generic. This lets us use this
- * function on any array of pointers.
+ * ice_get_tx_pending - returns number of Tx descriptors not processed
+ * @ring: the ring of descriptors
*/
-static int ice_get_free_slot(void *array, int size, int curr)
+static u32 ice_get_tx_pending(struct ice_ring *ring)
{
- int **tmp_array = (int **)array;
- int next;
+ u32 head, tail;
- if (curr < (size - 1) && !tmp_array[curr + 1]) {
- next = curr + 1;
- } else {
- int i = 0;
+ head = ring->next_to_clean;
+ tail = readl(ring->tail);
- while ((i < size) && (tmp_array[i]))
- i++;
- if (i == size)
- next = ICE_NO_VSI;
- else
- next = i;
- }
- return next;
+ if (head != tail)
+ return (head < tail) ?
+ tail - head : (tail + ring->count - head);
+ return 0;
}
/**
- * ice_search_res - Search the tracker for a block of resources
- * @res: pointer to the resource
- * @needed: size of the block needed
- * @id: identifier to track owner
- * Returns the base item index of the block, or -ENOMEM for error
+ * ice_check_for_hang_subtask - check for and recover hung queues
+ * @pf: pointer to PF struct
*/
-static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
+static void ice_check_for_hang_subtask(struct ice_pf *pf)
{
- int start = res->search_hint;
- int end = start;
-
- id |= ICE_RES_VALID_BIT;
+ struct ice_vsi *vsi = NULL;
+ unsigned int i;
+ u32 v, v_idx;
+ int packets;
- do {
- /* skip already allocated entries */
- if (res->list[end++] & ICE_RES_VALID_BIT) {
- start = end;
- if ((start + needed) > res->num_entries)
- break;
+ ice_for_each_vsi(pf, v)
+ if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
+ vsi = pf->vsi[v];
+ break;
}
- if (end == (start + needed)) {
- int i = start;
+ if (!vsi || test_bit(__ICE_DOWN, vsi->state))
+ return;
- /* there was enough, so assign it to the requestor */
- while (i != end)
- res->list[i++] = id;
+ if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
+ return;
- if (end == res->num_entries)
- end = 0;
+ for (i = 0; i < vsi->num_txq; i++) {
+ struct ice_ring *tx_ring = vsi->tx_rings[i];
+
+ if (tx_ring && tx_ring->desc) {
+ int itr = ICE_ITR_NONE;
+
+ /* If packet counter has not changed the queue is
+ * likely stalled, so force an interrupt for this
+ * queue.
+ *
+ * prev_pkt would be negative if there was no
+ * pending work.
+ */
+ packets = tx_ring->stats.pkts & INT_MAX;
+ if (tx_ring->tx_stats.prev_pkt == packets) {
+ /* Trigger sw interrupt to revive the queue */
+ v_idx = tx_ring->q_vector->v_idx;
+ wr32(&vsi->back->hw,
+ GLINT_DYN_CTL(vsi->hw_base_vector + v_idx),
+ (itr << GLINT_DYN_CTL_ITR_INDX_S) |
+ GLINT_DYN_CTL_SWINT_TRIG_M |
+ GLINT_DYN_CTL_INTENA_MSK_M);
+ continue;
+ }
- res->search_hint = end;
- return start;
+ /* Memory barrier between read of packet count and call
+ * to ice_get_tx_pending()
+ */
+ smp_rmb();
+ tx_ring->tx_stats.prev_pkt =
+ ice_get_tx_pending(tx_ring) ? packets : -1;
}
- } while (1);
-
- return -ENOMEM;
-}
-
-/**
- * ice_get_res - get a block of resources
- * @pf: board private structure
- * @res: pointer to the resource
- * @needed: size of the block needed
- * @id: identifier to track owner
- *
- * Returns the base item index of the block, or -ENOMEM for error
- * The search_hint trick and lack of advanced fit-finding only works
- * because we're highly likely to have all the same sized requests.
- * Linear search time and any fragmentation should be minimal.
- */
-static int
-ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
-{
- int ret;
-
- if (!res || !pf)
- return -EINVAL;
-
- if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
- dev_err(&pf->pdev->dev,
- "param err: needed=%d, num_entries = %d id=0x%04x\n",
- needed, res->num_entries, id);
- return -EINVAL;
- }
-
- /* search based on search_hint */
- ret = ice_search_res(res, needed, id);
-
- if (ret < 0) {
- /* previous search failed. Reset search hint and try again */
- res->search_hint = 0;
- ret = ice_search_res(res, needed, id);
}
-
- return ret;
-}
-
-/**
- * ice_free_res - free a block of resources
- * @res: pointer to the resource
- * @index: starting index previously returned by ice_get_res
- * @id: identifier to track owner
- * Returns number of resources freed
- */
-static int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
-{
- int count = 0;
- int i;
-
- if (!res || index >= res->num_entries)
- return -EINVAL;
-
- id |= ICE_RES_VALID_BIT;
- for (i = index; i < res->num_entries && res->list[i] == id; i++) {
- res->list[i] = 0;
- count++;
- }
-
- return count;
-}
-
-/**
- * ice_add_mac_to_list - Add a mac address filter entry to the list
- * @vsi: the VSI to be forwarded to
- * @add_list: pointer to the list which contains MAC filter entries
- * @macaddr: the MAC address to be added.
- *
- * Adds mac address filter entry to the temp list
- *
- * Returns 0 on success or ENOMEM on failure.
- */
-static int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
- const u8 *macaddr)
-{
- struct ice_fltr_list_entry *tmp;
- struct ice_pf *pf = vsi->back;
-
- tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_ATOMIC);
- if (!tmp)
- return -ENOMEM;
-
- tmp->fltr_info.flag = ICE_FLTR_TX;
- tmp->fltr_info.src = vsi->vsi_num;
- tmp->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
- tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
- tmp->fltr_info.fwd_id.vsi_id = vsi->vsi_num;
- ether_addr_copy(tmp->fltr_info.l_data.mac.mac_addr, macaddr);
-
- INIT_LIST_HEAD(&tmp->list_entry);
- list_add(&tmp->list_entry, add_list);
-
- return 0;
}
/**
@@ -243,24 +155,6 @@ static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
}
/**
- * ice_free_fltr_list - free filter lists helper
- * @dev: pointer to the device struct
- * @h: pointer to the list head to be freed
- *
- * Helper function to free filter lists previously created using
- * ice_add_mac_to_list
- */
-static void ice_free_fltr_list(struct device *dev, struct list_head *h)
-{
- struct ice_fltr_list_entry *e, *tmp;
-
- list_for_each_entry_safe(e, tmp, h, list_entry) {
- list_del(&e->list_entry);
- devm_kfree(dev, e);
- }
-}
-
-/**
* ice_vsi_fltr_changed - check if filter state changed
* @vsi: VSI to be checked
*
@@ -359,7 +253,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
if (vsi->current_netdev_flags & IFF_PROMISC) {
/* Apply TX filter rule to get traffic from VMs */
- status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, true,
+ status = ice_cfg_dflt_vsi(hw, vsi->idx, true,
ICE_FLTR_TX);
if (status) {
netdev_err(netdev, "Error setting default VSI %i tx rule\n",
@@ -369,7 +263,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
goto out_promisc;
}
/* Apply RX filter rule to get traffic from wire */
- status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, true,
+ status = ice_cfg_dflt_vsi(hw, vsi->idx, true,
ICE_FLTR_RX);
if (status) {
netdev_err(netdev, "Error setting default VSI %i rx rule\n",
@@ -380,7 +274,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
} else {
/* Clear TX filter rule to stop traffic from VMs */
- status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, false,
+ status = ice_cfg_dflt_vsi(hw, vsi->idx, false,
ICE_FLTR_TX);
if (status) {
netdev_err(netdev, "Error clearing default VSI %i tx rule\n",
@@ -389,8 +283,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
err = -EIO;
goto out_promisc;
}
- /* Clear filter RX to remove traffic from wire */
- status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, false,
+ /* Clear RX filter to remove traffic from wire */
+ status = ice_cfg_dflt_vsi(hw, vsi->idx, false,
ICE_FLTR_RX);
if (status) {
netdev_err(netdev, "Error clearing default VSI %i rx rule\n",
@@ -438,15 +332,6 @@ static void ice_sync_fltr_subtask(struct ice_pf *pf)
}
/**
- * ice_is_reset_recovery_pending - schedule a reset
- * @state: pf state field
- */
-static bool ice_is_reset_recovery_pending(unsigned long int *state)
-{
- return test_bit(__ICE_RESET_RECOVERY_PENDING, state);
-}
-
-/**
* ice_prepare_for_reset - prep for the core to reset
* @pf: board private structure
*
@@ -456,23 +341,17 @@ static void
ice_prepare_for_reset(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
- u32 v;
- ice_for_each_vsi(pf, v)
- if (pf->vsi[v])
- ice_remove_vsi_fltr(hw, pf->vsi[v]->vsi_num);
-
- dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
+ /* Notify VFs of impending reset */
+ if (ice_check_sq_alive(hw, &hw->mailboxq))
+ ice_vc_notify_reset(pf);
/* disable the VSIs and their queues that are not already DOWN */
- /* pf_dis_all_vsi modifies netdev structures -rtnl_lock needed */
ice_pf_dis_all_vsi(pf);
- ice_for_each_vsi(pf, v)
- if (pf->vsi[v])
- pf->vsi[v]->vsi_num = 0;
-
ice_shutdown_all_ctrlq(hw);
+
+ set_bit(__ICE_PREPARED_FOR_RESET, pf->state);
}
/**
@@ -489,27 +368,29 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
WARN_ON(in_interrupt());
- /* PFR is a bit of a special case because it doesn't result in an OICR
- * interrupt. So for PFR, we prepare for reset, issue the reset and
- * rebuild sequentially.
- */
- if (reset_type == ICE_RESET_PFR) {
- set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
- ice_prepare_for_reset(pf);
- }
+ ice_prepare_for_reset(pf);
/* trigger the reset */
if (ice_reset(hw, reset_type)) {
dev_err(dev, "reset %d failed\n", reset_type);
set_bit(__ICE_RESET_FAILED, pf->state);
- clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
+ clear_bit(__ICE_RESET_OICR_RECV, pf->state);
+ clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
+ clear_bit(__ICE_PFR_REQ, pf->state);
+ clear_bit(__ICE_CORER_REQ, pf->state);
+ clear_bit(__ICE_GLOBR_REQ, pf->state);
return;
}
+ /* PFR is a bit of a special case because it doesn't result in an OICR
+ * interrupt. So for PFR, rebuild after the reset and clear the reset-
+ * associated state bits.
+ */
if (reset_type == ICE_RESET_PFR) {
pf->pfr_count++;
ice_rebuild(pf);
- clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
+ clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
+ clear_bit(__ICE_PFR_REQ, pf->state);
}
}
@@ -519,77 +400,60 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
*/
static void ice_reset_subtask(struct ice_pf *pf)
{
- enum ice_reset_req reset_type;
-
- rtnl_lock();
+ enum ice_reset_req reset_type = ICE_RESET_INVAL;
/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
- * OICR interrupt. The OICR handler (ice_misc_intr) determines what
- * type of reset happened and sets __ICE_RESET_RECOVERY_PENDING bit in
- * pf->state. So if reset/recovery is pending (as indicated by this bit)
- * we do a rebuild and return.
+ * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
+ * of reset is pending and sets bits in pf->state indicating the reset
+ * type and __ICE_RESET_OICR_RECV. So, if the latter bit is set
+ * prepare for pending reset if not already (for PF software-initiated
+ * global resets the software should already be prepared for it as
+ * indicated by __ICE_PREPARED_FOR_RESET; for global resets initiated
+ * by firmware or software on other PFs, that bit is not set so prepare
+ * for the reset now), poll for reset done, rebuild and return.
*/
- if (ice_is_reset_recovery_pending(pf->state)) {
+ if (test_bit(__ICE_RESET_OICR_RECV, pf->state)) {
clear_bit(__ICE_GLOBR_RECV, pf->state);
clear_bit(__ICE_CORER_RECV, pf->state);
- ice_prepare_for_reset(pf);
+ if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
+ ice_prepare_for_reset(pf);
/* make sure we are ready to rebuild */
- if (ice_check_reset(&pf->hw))
+ if (ice_check_reset(&pf->hw)) {
set_bit(__ICE_RESET_FAILED, pf->state);
- else
+ } else {
+ /* done with reset. start rebuild */
+ pf->hw.reset_ongoing = false;
ice_rebuild(pf);
- clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
- goto unlock;
+ /* clear bit to resume normal operations, but
+ * ICE_NEEDS_RESTART bit is set incase rebuild failed
+ */
+ clear_bit(__ICE_RESET_OICR_RECV, pf->state);
+ clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
+ clear_bit(__ICE_PFR_REQ, pf->state);
+ clear_bit(__ICE_CORER_REQ, pf->state);
+ clear_bit(__ICE_GLOBR_REQ, pf->state);
+ }
+
+ return;
}
/* No pending resets to finish processing. Check for new resets */
- if (test_and_clear_bit(__ICE_GLOBR_REQ, pf->state))
- reset_type = ICE_RESET_GLOBR;
- else if (test_and_clear_bit(__ICE_CORER_REQ, pf->state))
- reset_type = ICE_RESET_CORER;
- else if (test_and_clear_bit(__ICE_PFR_REQ, pf->state))
+ if (test_bit(__ICE_PFR_REQ, pf->state))
reset_type = ICE_RESET_PFR;
- else
- goto unlock;
+ if (test_bit(__ICE_CORER_REQ, pf->state))
+ reset_type = ICE_RESET_CORER;
+ if (test_bit(__ICE_GLOBR_REQ, pf->state))
+ reset_type = ICE_RESET_GLOBR;
+ /* If no valid reset type requested just return */
+ if (reset_type == ICE_RESET_INVAL)
+ return;
- /* reset if not already down or resetting */
+ /* reset if not already down or busy */
if (!test_bit(__ICE_DOWN, pf->state) &&
!test_bit(__ICE_CFG_BUSY, pf->state)) {
ice_do_reset(pf, reset_type);
}
-
-unlock:
- rtnl_unlock();
-}
-
-/**
- * ice_watchdog_subtask - periodic tasks not using event driven scheduling
- * @pf: board private structure
- */
-static void ice_watchdog_subtask(struct ice_pf *pf)
-{
- int i;
-
- /* if interface is down do nothing */
- if (test_bit(__ICE_DOWN, pf->state) ||
- test_bit(__ICE_CFG_BUSY, pf->state))
- return;
-
- /* make sure we don't do these things too often */
- if (time_before(jiffies,
- pf->serv_tmr_prev + pf->serv_tmr_period))
- return;
-
- pf->serv_tmr_prev = jiffies;
-
- /* Update the stats for active netdevs so the network stack
- * can look at updated numbers whenever it cares to
- */
- ice_update_pf_stats(pf);
- for (i = 0; i < pf->num_alloc_vsi; i++)
- if (pf->vsi[i] && pf->vsi[i]->netdev)
- ice_update_vsi_stats(pf->vsi[i]);
}
/**
@@ -662,36 +526,6 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
}
/**
- * ice_init_link_events - enable/initialize link events
- * @pi: pointer to the port_info instance
- *
- * Returns -EIO on failure, 0 on success
- */
-static int ice_init_link_events(struct ice_port_info *pi)
-{
- u16 mask;
-
- mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
- ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));
-
- if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
- dev_dbg(ice_hw_to_dev(pi->hw),
- "Failed to set link event mask for port %d\n",
- pi->lport);
- return -EIO;
- }
-
- if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
- dev_dbg(ice_hw_to_dev(pi->hw),
- "Failed to enable link events for port %d\n",
- pi->lport);
- return -EIO;
- }
-
- return 0;
-}
-
-/**
* ice_vsi_link_event - update the vsi's netdev
* @vsi: the vsi on which the link event occurred
* @link_up: whether or not the vsi needs to be set up or down
@@ -772,31 +606,41 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi)
}
}
+ ice_vc_notify_link_state(pf);
+
return 0;
}
/**
- * ice_handle_link_event - handle link event via ARQ
- * @pf: pf that the link event is associated with
- *
- * Return -EINVAL if port_info is null
- * Return status on succes
+ * ice_watchdog_subtask - periodic tasks not using event driven scheduling
+ * @pf: board private structure
*/
-static int ice_handle_link_event(struct ice_pf *pf)
+static void ice_watchdog_subtask(struct ice_pf *pf)
{
- struct ice_port_info *port_info;
- int status;
+ int i;
- port_info = pf->hw.port_info;
- if (!port_info)
- return -EINVAL;
+ /* if interface is down do nothing */
+ if (test_bit(__ICE_DOWN, pf->state) ||
+ test_bit(__ICE_CFG_BUSY, pf->state))
+ return;
- status = ice_link_event(pf, port_info);
- if (status)
- dev_dbg(&pf->pdev->dev,
- "Could not process link event, error %d\n", status);
+ /* make sure we don't do these things too often */
+ if (time_before(jiffies,
+ pf->serv_tmr_prev + pf->serv_tmr_period))
+ return;
- return status;
+ pf->serv_tmr_prev = jiffies;
+
+ if (ice_link_event(pf, pf->hw.port_info))
+ dev_dbg(&pf->pdev->dev, "ice_link_event failed\n");
+
+ /* Update the stats for active netdevs so the network stack
+ * can look at updated numbers whenever it cares to
+ */
+ ice_update_pf_stats(pf);
+ for (i = 0; i < pf->num_alloc_vsi; i++)
+ if (pf->vsi[i] && pf->vsi[i]->netdev)
+ ice_update_vsi_stats(pf->vsi[i]);
}
/**
@@ -822,6 +666,10 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
cq = &hw->adminq;
qtype = "Admin";
break;
+ case ICE_CTL_Q_MAILBOX:
+ cq = &hw->mailboxq;
+ qtype = "Mailbox";
+ break;
default:
dev_warn(&pf->pdev->dev, "Unknown control queue type 0x%x\n",
q_type);
@@ -898,10 +746,11 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
opcode = le16_to_cpu(event.desc.opcode);
switch (opcode) {
- case ice_aqc_opc_get_link_status:
- if (ice_handle_link_event(pf))
- dev_err(&pf->pdev->dev,
- "Could not handle link event\n");
+ case ice_mbx_opc_send_msg_to_pf:
+ ice_vc_process_vf_msg(pf, &event);
+ break;
+ case ice_aqc_opc_fw_logging:
+ ice_output_fw_log(hw, &event.desc, event.msg_buf);
break;
default:
dev_dbg(&pf->pdev->dev,
@@ -959,6 +808,28 @@ static void ice_clean_adminq_subtask(struct ice_pf *pf)
}
/**
+ * ice_clean_mailboxq_subtask - clean the MailboxQ rings
+ * @pf: board private structure
+ */
+static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+
+ if (!test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state))
+ return;
+
+ if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
+ return;
+
+ clear_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
+
+ if (ice_ctrlq_pending(hw, &hw->mailboxq))
+ __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
+
+ ice_flush(hw);
+}
+
+/**
* ice_service_task_schedule - schedule the service task to wake up
* @pf: board private structure
*
@@ -966,8 +837,9 @@ static void ice_clean_adminq_subtask(struct ice_pf *pf)
*/
static void ice_service_task_schedule(struct ice_pf *pf)
{
- if (!test_bit(__ICE_DOWN, pf->state) &&
- !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state))
+ if (!test_bit(__ICE_SERVICE_DIS, pf->state) &&
+ !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) &&
+ !test_bit(__ICE_NEEDS_RESTART, pf->state))
queue_work(ice_wq, &pf->serv_task);
}
@@ -985,6 +857,22 @@ static void ice_service_task_complete(struct ice_pf *pf)
}
/**
+ * ice_service_task_stop - stop service task and cancel works
+ * @pf: board private structure
+ */
+static void ice_service_task_stop(struct ice_pf *pf)
+{
+ set_bit(__ICE_SERVICE_DIS, pf->state);
+
+ if (pf->serv_tmr.function)
+ del_timer_sync(&pf->serv_tmr);
+ if (pf->serv_task.func)
+ cancel_work_sync(&pf->serv_task);
+
+ clear_bit(__ICE_SERVICE_SCHED, pf->state);
+}
+
+/**
* ice_service_timer - timer callback to schedule service task
* @t: pointer to timer_list
*/
@@ -997,6 +885,160 @@ static void ice_service_timer(struct timer_list *t)
}
/**
+ * ice_handle_mdd_event - handle malicious driver detect event
+ * @pf: pointer to the PF structure
+ *
+ * Called from service task. OICR interrupt handler indicates MDD event
+ */
+static void ice_handle_mdd_event(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ bool mdd_detected = false;
+ u32 reg;
+ int i;
+
+ if (!test_bit(__ICE_MDD_EVENT_PENDING, pf->state))
+ return;
+
+ /* find what triggered the MDD event */
+ reg = rd32(hw, GL_MDET_TX_PQM);
+ if (reg & GL_MDET_TX_PQM_VALID_M) {
+ u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
+ GL_MDET_TX_PQM_PF_NUM_S;
+ u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
+ GL_MDET_TX_PQM_VF_NUM_S;
+ u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
+ GL_MDET_TX_PQM_MAL_TYPE_S;
+ u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
+ GL_MDET_TX_PQM_QNUM_S);
+
+ if (netif_msg_tx_err(pf))
+ dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
+ event, queue, pf_num, vf_num);
+ wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
+ mdd_detected = true;
+ }
+
+ reg = rd32(hw, GL_MDET_TX_TCLAN);
+ if (reg & GL_MDET_TX_TCLAN_VALID_M) {
+ u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
+ GL_MDET_TX_TCLAN_PF_NUM_S;
+ u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
+ GL_MDET_TX_TCLAN_VF_NUM_S;
+ u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
+ GL_MDET_TX_TCLAN_MAL_TYPE_S;
+ u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
+ GL_MDET_TX_TCLAN_QNUM_S);
+
+ if (netif_msg_rx_err(pf))
+ dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
+ event, queue, pf_num, vf_num);
+ wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
+ mdd_detected = true;
+ }
+
+ reg = rd32(hw, GL_MDET_RX);
+ if (reg & GL_MDET_RX_VALID_M) {
+ u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
+ GL_MDET_RX_PF_NUM_S;
+ u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
+ GL_MDET_RX_VF_NUM_S;
+ u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
+ GL_MDET_RX_MAL_TYPE_S;
+ u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
+ GL_MDET_RX_QNUM_S);
+
+ if (netif_msg_rx_err(pf))
+ dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
+ event, queue, pf_num, vf_num);
+ wr32(hw, GL_MDET_RX, 0xffffffff);
+ mdd_detected = true;
+ }
+
+ if (mdd_detected) {
+ bool pf_mdd_detected = false;
+
+ reg = rd32(hw, PF_MDET_TX_PQM);
+ if (reg & PF_MDET_TX_PQM_VALID_M) {
+ wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
+ dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
+ pf_mdd_detected = true;
+ }
+
+ reg = rd32(hw, PF_MDET_TX_TCLAN);
+ if (reg & PF_MDET_TX_TCLAN_VALID_M) {
+ wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
+ dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
+ pf_mdd_detected = true;
+ }
+
+ reg = rd32(hw, PF_MDET_RX);
+ if (reg & PF_MDET_RX_VALID_M) {
+ wr32(hw, PF_MDET_RX, 0xFFFF);
+ dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
+ pf_mdd_detected = true;
+ }
+ /* Queue belongs to the PF initiate a reset */
+ if (pf_mdd_detected) {
+ set_bit(__ICE_NEEDS_RESTART, pf->state);
+ ice_service_task_schedule(pf);
+ }
+ }
+
+ /* see if one of the VFs needs to be reset */
+ for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
+ struct ice_vf *vf = &pf->vf[i];
+
+ reg = rd32(hw, VP_MDET_TX_PQM(i));
+ if (reg & VP_MDET_TX_PQM_VALID_M) {
+ wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
+ vf->num_mdd_events++;
+ dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
+ i);
+ }
+
+ reg = rd32(hw, VP_MDET_TX_TCLAN(i));
+ if (reg & VP_MDET_TX_TCLAN_VALID_M) {
+ wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
+ vf->num_mdd_events++;
+ dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
+ i);
+ }
+
+ reg = rd32(hw, VP_MDET_TX_TDPU(i));
+ if (reg & VP_MDET_TX_TDPU_VALID_M) {
+ wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
+ vf->num_mdd_events++;
+ dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
+ i);
+ }
+
+ reg = rd32(hw, VP_MDET_RX(i));
+ if (reg & VP_MDET_RX_VALID_M) {
+ wr32(hw, VP_MDET_RX(i), 0xFFFF);
+ vf->num_mdd_events++;
+ dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
+ i);
+ }
+
+ if (vf->num_mdd_events > ICE_DFLT_NUM_MDD_EVENTS_ALLOWED) {
+ dev_info(&pf->pdev->dev,
+ "Too many MDD events on VF %d, disabled\n", i);
+ dev_info(&pf->pdev->dev,
+ "Use PF Control I/F to re-enable the VF\n");
+ set_bit(ICE_VF_STATE_DIS, vf->vf_states);
+ }
+ }
+
+ /* re-enable MDD interrupt cause */
+ clear_bit(__ICE_MDD_EVENT_PENDING, pf->state);
+ reg = rd32(hw, PFINT_OICR_ENA);
+ reg |= PFINT_OICR_MAL_DETECT_M;
+ wr32(hw, PFINT_OICR_ENA, reg);
+ ice_flush(hw);
+}
+
+/**
* ice_service_task - manage and run subtasks
* @work: pointer to work_struct contained by the PF struct
*/
@@ -1010,16 +1052,21 @@ static void ice_service_task(struct work_struct *work)
/* process reset requests first */
ice_reset_subtask(pf);
- /* bail if a reset/recovery cycle is pending */
- if (ice_is_reset_recovery_pending(pf->state) ||
- test_bit(__ICE_SUSPENDED, pf->state)) {
+ /* bail if a reset/recovery cycle is pending or rebuild failed */
+ if (ice_is_reset_in_progress(pf->state) ||
+ test_bit(__ICE_SUSPENDED, pf->state) ||
+ test_bit(__ICE_NEEDS_RESTART, pf->state)) {
ice_service_task_complete(pf);
return;
}
+ ice_check_for_hang_subtask(pf);
ice_sync_fltr_subtask(pf);
+ ice_handle_mdd_event(pf);
+ ice_process_vflr_event(pf);
ice_watchdog_subtask(pf);
ice_clean_adminq_subtask(pf);
+ ice_clean_mailboxq_subtask(pf);
/* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */
ice_service_task_complete(pf);
@@ -1029,6 +1076,9 @@ static void ice_service_task(struct work_struct *work)
* schedule the service task now.
*/
if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
+ test_bit(__ICE_MDD_EVENT_PENDING, pf->state) ||
+ test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
+ test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
mod_timer(&pf->serv_tmr, jiffies);
}
@@ -1043,6 +1093,10 @@ static void ice_set_ctrlq_len(struct ice_hw *hw)
hw->adminq.num_sq_entries = ICE_AQ_LEN;
hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
+ hw->mailboxq.num_rq_entries = ICE_MBXQ_LEN;
+ hw->mailboxq.num_sq_entries = ICE_MBXQ_LEN;
+ hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
+ hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
}
/**
@@ -1073,57 +1127,6 @@ static void ice_irq_affinity_notify(struct irq_affinity_notify *notify,
static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
/**
- * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
- * @vsi: the VSI being un-configured
- */
-static void ice_vsi_dis_irq(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
- struct ice_hw *hw = &pf->hw;
- int base = vsi->base_vector;
- u32 val;
- int i;
-
- /* disable interrupt causation from each queue */
- if (vsi->tx_rings) {
- ice_for_each_txq(vsi, i) {
- if (vsi->tx_rings[i]) {
- u16 reg;
-
- reg = vsi->tx_rings[i]->reg_idx;
- val = rd32(hw, QINT_TQCTL(reg));
- val &= ~QINT_TQCTL_CAUSE_ENA_M;
- wr32(hw, QINT_TQCTL(reg), val);
- }
- }
- }
-
- if (vsi->rx_rings) {
- ice_for_each_rxq(vsi, i) {
- if (vsi->rx_rings[i]) {
- u16 reg;
-
- reg = vsi->rx_rings[i]->reg_idx;
- val = rd32(hw, QINT_RQCTL(reg));
- val &= ~QINT_RQCTL_CAUSE_ENA_M;
- wr32(hw, QINT_RQCTL(reg), val);
- }
- }
- }
-
- /* disable each interrupt */
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
- for (i = vsi->base_vector;
- i < (vsi->num_q_vectors + vsi->base_vector); i++)
- wr32(hw, GLINT_DYN_CTL(i), 0);
-
- ice_flush(hw);
- for (i = 0; i < vsi->num_q_vectors; i++)
- synchronize_irq(pf->msix_entries[i + base].vector);
- }
-}
-
-/**
* ice_vsi_ena_irq - Enable IRQ for the given VSI
* @vsi: the VSI being configured
*/
@@ -1144,26 +1147,6 @@ static int ice_vsi_ena_irq(struct ice_vsi *vsi)
}
/**
- * ice_vsi_delete - delete a VSI from the switch
- * @vsi: pointer to VSI being removed
- */
-static void ice_vsi_delete(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
- struct ice_vsi_ctx ctxt;
- enum ice_status status;
-
- ctxt.vsi_num = vsi->vsi_num;
-
- memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props));
-
- status = ice_aq_free_vsi(&pf->hw, &ctxt, false, NULL);
- if (status)
- dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n",
- vsi->vsi_num);
-}
-
-/**
* ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
* @vsi: the VSI being configured
* @basename: name for the vector
@@ -1172,7 +1155,7 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
{
int q_vectors = vsi->num_q_vectors;
struct ice_pf *pf = vsi->back;
- int base = vsi->base_vector;
+ int base = vsi->sw_base_vector;
int rx_int_idx = 0;
int tx_int_idx = 0;
int vector, err;
@@ -1231,467 +1214,6 @@ free_q_irqs:
}
/**
- * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type
- * @vsi: the VSI being configured
- */
-static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
-{
- struct ice_hw_common_caps *cap;
- struct ice_pf *pf = vsi->back;
-
- if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
- vsi->rss_size = 1;
- return;
- }
-
- cap = &pf->hw.func_caps.common_cap;
- switch (vsi->type) {
- case ICE_VSI_PF:
- /* PF VSI will inherit RSS instance of PF */
- vsi->rss_table_size = cap->rss_table_size;
- vsi->rss_size = min_t(int, num_online_cpus(),
- BIT(cap->rss_table_entry_width));
- vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
- break;
- default:
- dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
- break;
- }
-}
-
-/**
- * ice_vsi_setup_q_map - Setup a VSI queue map
- * @vsi: the VSI being configured
- * @ctxt: VSI context structure
- */
-static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
-{
- u16 offset = 0, qmap = 0, numq_tc;
- u16 pow = 0, max_rss = 0, qcount;
- u16 qcount_tx = vsi->alloc_txq;
- u16 qcount_rx = vsi->alloc_rxq;
- bool ena_tc0 = false;
- int i;
-
- /* at least TC0 should be enabled by default */
- if (vsi->tc_cfg.numtc) {
- if (!(vsi->tc_cfg.ena_tc & BIT(0)))
- ena_tc0 = true;
- } else {
- ena_tc0 = true;
- }
-
- if (ena_tc0) {
- vsi->tc_cfg.numtc++;
- vsi->tc_cfg.ena_tc |= 1;
- }
-
- numq_tc = qcount_rx / vsi->tc_cfg.numtc;
-
- /* TC mapping is a function of the number of Rx queues assigned to the
- * VSI for each traffic class and the offset of these queues.
- * The first 10 bits are for queue offset for TC0, next 4 bits for no:of
- * queues allocated to TC0. No:of queues is a power-of-2.
- *
- * If TC is not enabled, the queue offset is set to 0, and allocate one
- * queue, this way, traffic for the given TC will be sent to the default
- * queue.
- *
- * Setup number and offset of Rx queues for all TCs for the VSI
- */
-
- /* qcount will change if RSS is enabled */
- if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
- if (vsi->type == ICE_VSI_PF)
- max_rss = ICE_MAX_LG_RSS_QS;
- else
- max_rss = ICE_MAX_SMALL_RSS_QS;
-
- qcount = min_t(int, numq_tc, max_rss);
- qcount = min_t(int, qcount, vsi->rss_size);
- } else {
- qcount = numq_tc;
- }
-
- /* find the (rounded up) power-of-2 of qcount */
- pow = order_base_2(qcount);
-
- for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
- if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
- /* TC is not enabled */
- vsi->tc_cfg.tc_info[i].qoffset = 0;
- vsi->tc_cfg.tc_info[i].qcount = 1;
- ctxt->info.tc_mapping[i] = 0;
- continue;
- }
-
- /* TC is enabled */
- vsi->tc_cfg.tc_info[i].qoffset = offset;
- vsi->tc_cfg.tc_info[i].qcount = qcount;
-
- qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
- ICE_AQ_VSI_TC_Q_OFFSET_M) |
- ((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
- ICE_AQ_VSI_TC_Q_NUM_M);
- offset += qcount;
- ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
- }
-
- vsi->num_txq = qcount_tx;
- vsi->num_rxq = offset;
-
- /* Rx queue mapping */
- ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
- /* q_mapping buffer holds the info for the first queue allocated for
- * this VSI in the PF space and also the number of queues associated
- * with this VSI.
- */
- ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
- ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
-}
-
-/**
- * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
- * @ctxt: the VSI context being set
- *
- * This initializes a default VSI context for all sections except the Queues.
- */
-static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
-{
- u32 table = 0;
-
- memset(&ctxt->info, 0, sizeof(ctxt->info));
- /* VSI's should be allocated from shared pool */
- ctxt->alloc_from_pool = true;
- /* Src pruning enabled by default */
- ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
- /* Traffic from VSI can be sent to LAN */
- ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
-
- /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy
- * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all
- * packets untagged/tagged.
- */
- ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL &
- ICE_AQ_VSI_VLAN_MODE_M) >>
- ICE_AQ_VSI_VLAN_MODE_S);
-
- /* Have 1:1 UP mapping for both ingress/egress tables */
- table |= ICE_UP_TABLE_TRANSLATE(0, 0);
- table |= ICE_UP_TABLE_TRANSLATE(1, 1);
- table |= ICE_UP_TABLE_TRANSLATE(2, 2);
- table |= ICE_UP_TABLE_TRANSLATE(3, 3);
- table |= ICE_UP_TABLE_TRANSLATE(4, 4);
- table |= ICE_UP_TABLE_TRANSLATE(5, 5);
- table |= ICE_UP_TABLE_TRANSLATE(6, 6);
- table |= ICE_UP_TABLE_TRANSLATE(7, 7);
- ctxt->info.ingress_table = cpu_to_le32(table);
- ctxt->info.egress_table = cpu_to_le32(table);
- /* Have 1:1 UP mapping for outer to inner UP table */
- ctxt->info.outer_up_table = cpu_to_le32(table);
- /* No Outer tag support outer_tag_flags remains to zero */
-}
-
-/**
- * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
- * @ctxt: the VSI context being set
- * @vsi: the VSI being configured
- */
-static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
-{
- u8 lut_type, hash_type;
-
- switch (vsi->type) {
- case ICE_VSI_PF:
- /* PF VSI will inherit RSS instance of PF */
- lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
- hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
- break;
- default:
- dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
- vsi->type);
- return;
- }
-
- ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
- ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
- ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) &
- ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
-}
-
-/**
- * ice_vsi_add - Create a new VSI or fetch preallocated VSI
- * @vsi: the VSI being configured
- *
- * This initializes a VSI context depending on the VSI type to be added and
- * passes it down to the add_vsi aq command to create a new VSI.
- */
-static int ice_vsi_add(struct ice_vsi *vsi)
-{
- struct ice_vsi_ctx ctxt = { 0 };
- struct ice_pf *pf = vsi->back;
- struct ice_hw *hw = &pf->hw;
- int ret = 0;
-
- switch (vsi->type) {
- case ICE_VSI_PF:
- ctxt.flags = ICE_AQ_VSI_TYPE_PF;
- break;
- default:
- return -ENODEV;
- }
-
- ice_set_dflt_vsi_ctx(&ctxt);
- /* if the switch is in VEB mode, allow VSI loopback */
- if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
- ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
-
- /* Set LUT type and HASH type if RSS is enabled */
- if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
- ice_set_rss_vsi_ctx(&ctxt, vsi);
-
- ctxt.info.sw_id = vsi->port_info->sw_id;
- ice_vsi_setup_q_map(vsi, &ctxt);
-
- ret = ice_aq_add_vsi(hw, &ctxt, NULL);
- if (ret) {
- dev_err(&vsi->back->pdev->dev,
- "Add VSI AQ call failed, err %d\n", ret);
- return -EIO;
- }
- vsi->info = ctxt.info;
- vsi->vsi_num = ctxt.vsi_num;
-
- return ret;
-}
-
-/**
- * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW
- * @vsi: the VSI being cleaned up
- */
-static void ice_vsi_release_msix(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
- u16 vector = vsi->base_vector;
- struct ice_hw *hw = &pf->hw;
- u32 txq = 0;
- u32 rxq = 0;
- int i, q;
-
- for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
- struct ice_q_vector *q_vector = vsi->q_vectors[i];
-
- wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), 0);
- wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), 0);
- for (q = 0; q < q_vector->num_ring_tx; q++) {
- wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
- txq++;
- }
-
- for (q = 0; q < q_vector->num_ring_rx; q++) {
- wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);
- rxq++;
- }
- }
-
- ice_flush(hw);
-}
-
-/**
- * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
- * @vsi: the VSI having rings deallocated
- */
-static void ice_vsi_clear_rings(struct ice_vsi *vsi)
-{
- int i;
-
- if (vsi->tx_rings) {
- for (i = 0; i < vsi->alloc_txq; i++) {
- if (vsi->tx_rings[i]) {
- kfree_rcu(vsi->tx_rings[i], rcu);
- vsi->tx_rings[i] = NULL;
- }
- }
- }
- if (vsi->rx_rings) {
- for (i = 0; i < vsi->alloc_rxq; i++) {
- if (vsi->rx_rings[i]) {
- kfree_rcu(vsi->rx_rings[i], rcu);
- vsi->rx_rings[i] = NULL;
- }
- }
- }
-}
-
-/**
- * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI
- * @vsi: VSI which is having rings allocated
- */
-static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
- int i;
-
- /* Allocate tx_rings */
- for (i = 0; i < vsi->alloc_txq; i++) {
- struct ice_ring *ring;
-
- /* allocate with kzalloc(), free with kfree_rcu() */
- ring = kzalloc(sizeof(*ring), GFP_KERNEL);
-
- if (!ring)
- goto err_out;
-
- ring->q_index = i;
- ring->reg_idx = vsi->txq_map[i];
- ring->ring_active = false;
- ring->vsi = vsi;
- ring->netdev = vsi->netdev;
- ring->dev = &pf->pdev->dev;
- ring->count = vsi->num_desc;
-
- vsi->tx_rings[i] = ring;
- }
-
- /* Allocate rx_rings */
- for (i = 0; i < vsi->alloc_rxq; i++) {
- struct ice_ring *ring;
-
- /* allocate with kzalloc(), free with kfree_rcu() */
- ring = kzalloc(sizeof(*ring), GFP_KERNEL);
- if (!ring)
- goto err_out;
-
- ring->q_index = i;
- ring->reg_idx = vsi->rxq_map[i];
- ring->ring_active = false;
- ring->vsi = vsi;
- ring->netdev = vsi->netdev;
- ring->dev = &pf->pdev->dev;
- ring->count = vsi->num_desc;
- vsi->rx_rings[i] = ring;
- }
-
- return 0;
-
-err_out:
- ice_vsi_clear_rings(vsi);
- return -ENOMEM;
-}
-
-/**
- * ice_vsi_free_irq - Free the irq association with the OS
- * @vsi: the VSI being configured
- */
-static void ice_vsi_free_irq(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
- int base = vsi->base_vector;
-
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
- int i;
-
- if (!vsi->q_vectors || !vsi->irqs_ready)
- return;
-
- vsi->irqs_ready = false;
- for (i = 0; i < vsi->num_q_vectors; i++) {
- u16 vector = i + base;
- int irq_num;
-
- irq_num = pf->msix_entries[vector].vector;
-
- /* free only the irqs that were actually requested */
- if (!vsi->q_vectors[i] ||
- !(vsi->q_vectors[i]->num_ring_tx ||
- vsi->q_vectors[i]->num_ring_rx))
- continue;
-
- /* clear the affinity notifier in the IRQ descriptor */
- irq_set_affinity_notifier(irq_num, NULL);
-
- /* clear the affinity_mask in the IRQ descriptor */
- irq_set_affinity_hint(irq_num, NULL);
- synchronize_irq(irq_num);
- devm_free_irq(&pf->pdev->dev, irq_num,
- vsi->q_vectors[i]);
- }
- ice_vsi_release_msix(vsi);
- }
-}
-
-/**
- * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
- * @vsi: the VSI being configured
- */
-static void ice_vsi_cfg_msix(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
- u16 vector = vsi->base_vector;
- struct ice_hw *hw = &pf->hw;
- u32 txq = 0, rxq = 0;
- int i, q, itr;
- u8 itr_gran;
-
- for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
- struct ice_q_vector *q_vector = vsi->q_vectors[i];
-
- itr_gran = hw->itr_gran_200;
-
- if (q_vector->num_ring_rx) {
- q_vector->rx.itr =
- ITR_TO_REG(vsi->rx_rings[rxq]->rx_itr_setting,
- itr_gran);
- q_vector->rx.latency_range = ICE_LOW_LATENCY;
- }
-
- if (q_vector->num_ring_tx) {
- q_vector->tx.itr =
- ITR_TO_REG(vsi->tx_rings[txq]->tx_itr_setting,
- itr_gran);
- q_vector->tx.latency_range = ICE_LOW_LATENCY;
- }
- wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), q_vector->rx.itr);
- wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), q_vector->tx.itr);
-
- /* Both Transmit Queue Interrupt Cause Control register
- * and Receive Queue Interrupt Cause control register
- * expects MSIX_INDX field to be the vector index
- * within the function space and not the absolute
- * vector index across PF or across device.
- * For SR-IOV VF VSIs queue vector index always starts
- * with 1 since first vector index(0) is used for OICR
- * in VF space. Since VMDq and other PF VSIs are withtin
- * the PF function space, use the vector index thats
- * tracked for this PF.
- */
- for (q = 0; q < q_vector->num_ring_tx; q++) {
- u32 val;
-
- itr = ICE_TX_ITR;
- val = QINT_TQCTL_CAUSE_ENA_M |
- (itr << QINT_TQCTL_ITR_INDX_S) |
- (vector << QINT_TQCTL_MSIX_INDX_S);
- wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
- txq++;
- }
-
- for (q = 0; q < q_vector->num_ring_rx; q++) {
- u32 val;
-
- itr = ICE_RX_ITR;
- val = QINT_RQCTL_CAUSE_ENA_M |
- (itr << QINT_RQCTL_ITR_INDX_S) |
- (vector << QINT_RQCTL_MSIX_INDX_S);
- wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
- rxq++;
- }
- }
-
- ice_flush(hw);
-}
-
-/**
* ice_ena_misc_vector - enable the non-queue interrupts
* @pf: board private structure
*/
@@ -1708,13 +1230,14 @@ static void ice_ena_misc_vector(struct ice_pf *pf)
PFINT_OICR_MAL_DETECT_M |
PFINT_OICR_GRST_M |
PFINT_OICR_PCI_EXCEPTION_M |
+ PFINT_OICR_VFLR_M |
PFINT_OICR_HMC_ERR_M |
PFINT_OICR_PE_CRITERR_M);
wr32(hw, PFINT_OICR_ENA, val);
/* SW_ITR_IDX = 0, but don't change INTENA */
- wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
+ wr32(hw, GLINT_DYN_CTL(pf->hw_oicr_idx),
GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
}
@@ -1731,12 +1254,23 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
u32 oicr, ena_mask;
set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
+ set_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
oicr = rd32(hw, PFINT_OICR);
ena_mask = rd32(hw, PFINT_OICR_ENA);
+ if (oicr & PFINT_OICR_MAL_DETECT_M) {
+ ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
+ set_bit(__ICE_MDD_EVENT_PENDING, pf->state);
+ }
+ if (oicr & PFINT_OICR_VFLR_M) {
+ ena_mask &= ~PFINT_OICR_VFLR_M;
+ set_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
+ }
+
if (oicr & PFINT_OICR_GRST_M) {
u32 reset;
+
/* we have a reset warning */
ena_mask &= ~PFINT_OICR_GRST_M;
reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
@@ -1746,15 +1280,18 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
pf->corer_count++;
else if (reset == ICE_RESET_GLOBR)
pf->globr_count++;
- else
+ else if (reset == ICE_RESET_EMPR)
pf->empr_count++;
+ else
+ dev_dbg(&pf->pdev->dev, "Invalid reset type %d\n",
+ reset);
/* If a reset cycle isn't already in progress, we set a bit in
* pf->state so that the service task can start a reset/rebuild.
* We also make note of which reset happened so that peer
* devices/drivers can be informed.
*/
- if (!test_bit(__ICE_RESET_RECOVERY_PENDING, pf->state)) {
+ if (!test_and_set_bit(__ICE_RESET_OICR_RECV, pf->state)) {
if (reset == ICE_RESET_CORER)
set_bit(__ICE_CORER_RECV, pf->state);
else if (reset == ICE_RESET_GLOBR)
@@ -1762,7 +1299,20 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
else
set_bit(__ICE_EMPR_RECV, pf->state);
- set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
+ /* There are couple of different bits at play here.
+ * hw->reset_ongoing indicates whether the hardware is
+ * in reset. This is set to true when a reset interrupt
+ * is received and set back to false after the driver
+ * has determined that the hardware is out of reset.
+ *
+ * __ICE_RESET_OICR_RECV in pf->state indicates
+ * that a post reset rebuild is required before the
+ * driver is operational again. This is set above.
+ *
+ * As this is the start of the reset/rebuild cycle, set
+ * both to indicate that.
+ */
+ hw->reset_ongoing = true;
}
}
@@ -1803,208 +1353,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
}
/**
- * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
- * @vsi: the VSI being configured
- *
- * This function maps descriptor rings to the queue-specific vectors allotted
- * through the MSI-X enabling code. On a constrained vector budget, we map Tx
- * and Rx rings to the vector as "efficiently" as possible.
- */
-static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
-{
- int q_vectors = vsi->num_q_vectors;
- int tx_rings_rem, rx_rings_rem;
- int v_id;
-
- /* initially assigning remaining rings count to VSIs num queue value */
- tx_rings_rem = vsi->num_txq;
- rx_rings_rem = vsi->num_rxq;
-
- for (v_id = 0; v_id < q_vectors; v_id++) {
- struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
- int tx_rings_per_v, rx_rings_per_v, q_id, q_base;
-
- /* Tx rings mapping to vector */
- tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id);
- q_vector->num_ring_tx = tx_rings_per_v;
- q_vector->tx.ring = NULL;
- q_base = vsi->num_txq - tx_rings_rem;
-
- for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
- struct ice_ring *tx_ring = vsi->tx_rings[q_id];
-
- tx_ring->q_vector = q_vector;
- tx_ring->next = q_vector->tx.ring;
- q_vector->tx.ring = tx_ring;
- }
- tx_rings_rem -= tx_rings_per_v;
-
- /* Rx rings mapping to vector */
- rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id);
- q_vector->num_ring_rx = rx_rings_per_v;
- q_vector->rx.ring = NULL;
- q_base = vsi->num_rxq - rx_rings_rem;
-
- for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
- struct ice_ring *rx_ring = vsi->rx_rings[q_id];
-
- rx_ring->q_vector = q_vector;
- rx_ring->next = q_vector->rx.ring;
- q_vector->rx.ring = rx_ring;
- }
- rx_rings_rem -= rx_rings_per_v;
- }
-}
-
-/**
- * ice_vsi_set_num_qs - Set num queues, descriptors and vectors for a VSI
- * @vsi: the VSI being configured
- *
- * Return 0 on success and a negative value on error
- */
-static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
-
- switch (vsi->type) {
- case ICE_VSI_PF:
- vsi->alloc_txq = pf->num_lan_tx;
- vsi->alloc_rxq = pf->num_lan_rx;
- vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE);
- vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx);
- break;
- default:
- dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
- vsi->type);
- break;
- }
-}
-
-/**
- * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
- * @vsi: VSI pointer
- * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
- *
- * On error: returns error code (negative)
- * On success: returns 0
- */
-static int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors)
-{
- struct ice_pf *pf = vsi->back;
-
- /* allocate memory for both Tx and Rx ring pointers */
- vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
- sizeof(struct ice_ring *), GFP_KERNEL);
- if (!vsi->tx_rings)
- goto err_txrings;
-
- vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
- sizeof(struct ice_ring *), GFP_KERNEL);
- if (!vsi->rx_rings)
- goto err_rxrings;
-
- if (alloc_qvectors) {
- /* allocate memory for q_vector pointers */
- vsi->q_vectors = devm_kcalloc(&pf->pdev->dev,
- vsi->num_q_vectors,
- sizeof(struct ice_q_vector *),
- GFP_KERNEL);
- if (!vsi->q_vectors)
- goto err_vectors;
- }
-
- return 0;
-
-err_vectors:
- devm_kfree(&pf->pdev->dev, vsi->rx_rings);
-err_rxrings:
- devm_kfree(&pf->pdev->dev, vsi->tx_rings);
-err_txrings:
- return -ENOMEM;
-}
-
-/**
- * ice_msix_clean_rings - MSIX mode Interrupt Handler
- * @irq: interrupt number
- * @data: pointer to a q_vector
- */
-static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
-{
- struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
-
- if (!q_vector->tx.ring && !q_vector->rx.ring)
- return IRQ_HANDLED;
-
- napi_schedule(&q_vector->napi);
-
- return IRQ_HANDLED;
-}
-
-/**
- * ice_vsi_alloc - Allocates the next available struct vsi in the PF
- * @pf: board private structure
- * @type: type of VSI
- *
- * returns a pointer to a VSI on success, NULL on failure.
- */
-static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type)
-{
- struct ice_vsi *vsi = NULL;
-
- /* Need to protect the allocation of the VSIs at the PF level */
- mutex_lock(&pf->sw_mutex);
-
- /* If we have already allocated our maximum number of VSIs,
- * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index
- * is available to be populated
- */
- if (pf->next_vsi == ICE_NO_VSI) {
- dev_dbg(&pf->pdev->dev, "out of VSI slots!\n");
- goto unlock_pf;
- }
-
- vsi = devm_kzalloc(&pf->pdev->dev, sizeof(*vsi), GFP_KERNEL);
- if (!vsi)
- goto unlock_pf;
-
- vsi->type = type;
- vsi->back = pf;
- set_bit(__ICE_DOWN, vsi->state);
- vsi->idx = pf->next_vsi;
- vsi->work_lmt = ICE_DFLT_IRQ_WORK;
-
- ice_vsi_set_num_qs(vsi);
-
- switch (vsi->type) {
- case ICE_VSI_PF:
- if (ice_vsi_alloc_arrays(vsi, true))
- goto err_rings;
-
- /* Setup default MSIX irq handler for VSI */
- vsi->irq_handler = ice_msix_clean_rings;
- break;
- default:
- dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
- goto unlock_pf;
- }
-
- /* fill VSI slot in the PF struct */
- pf->vsi[pf->next_vsi] = vsi;
-
- /* prepare pf->next_vsi for next use */
- pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
- pf->next_vsi);
- goto unlock_pf;
-
-err_rings:
- devm_kfree(&pf->pdev->dev, vsi);
- vsi = NULL;
-unlock_pf:
- mutex_unlock(&pf->sw_mutex);
- return vsi;
-}
-
-/**
* ice_free_irq_msix_misc - Unroll misc vector setup
* @pf: board private structure
*/
@@ -2015,12 +1363,15 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf)
ice_flush(&pf->hw);
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) {
- synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
+ synchronize_irq(pf->msix_entries[pf->sw_oicr_idx].vector);
devm_free_irq(&pf->pdev->dev,
- pf->msix_entries[pf->oicr_idx].vector, pf);
+ pf->msix_entries[pf->sw_oicr_idx].vector, pf);
}
- ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
+ pf->num_avail_sw_msix += 1;
+ ice_free_res(pf->sw_irq_tracker, pf->sw_oicr_idx, ICE_RES_MISC_VEC_ID);
+ pf->num_avail_hw_msix += 1;
+ ice_free_res(pf->hw_irq_tracker, pf->hw_oicr_idx, ICE_RES_MISC_VEC_ID);
}
/**
@@ -2047,42 +1398,61 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
* lost during reset. Note that this function is called only during
* rebuild path and not while reset is in progress.
*/
- if (ice_is_reset_recovery_pending(pf->state))
+ if (ice_is_reset_in_progress(pf->state))
goto skip_req_irq;
- /* reserve one vector in irq_tracker for misc interrupts */
- oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
+ /* reserve one vector in sw_irq_tracker for misc interrupts */
+ oicr_idx = ice_get_res(pf, pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
if (oicr_idx < 0)
return oicr_idx;
- pf->oicr_idx = oicr_idx;
+ pf->num_avail_sw_msix -= 1;
+ pf->sw_oicr_idx = oicr_idx;
+
+ /* reserve one vector in hw_irq_tracker for misc interrupts */
+ oicr_idx = ice_get_res(pf, pf->hw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
+ if (oicr_idx < 0) {
+ ice_free_res(pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
+ pf->num_avail_sw_msix += 1;
+ return oicr_idx;
+ }
+ pf->num_avail_hw_msix -= 1;
+ pf->hw_oicr_idx = oicr_idx;
err = devm_request_irq(&pf->pdev->dev,
- pf->msix_entries[pf->oicr_idx].vector,
+ pf->msix_entries[pf->sw_oicr_idx].vector,
ice_misc_intr, 0, pf->int_name, pf);
if (err) {
dev_err(&pf->pdev->dev,
"devm_request_irq for %s failed: %d\n",
pf->int_name, err);
- ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
+ ice_free_res(pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
+ pf->num_avail_sw_msix += 1;
+ ice_free_res(pf->hw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
+ pf->num_avail_hw_msix += 1;
return err;
}
skip_req_irq:
ice_ena_misc_vector(pf);
- val = ((pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
+ val = ((pf->hw_oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
PFINT_OICR_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_OICR_CTL, val);
/* This enables Admin queue Interrupt causes */
- val = ((pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) |
+ val = ((pf->hw_oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) |
PFINT_FW_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_FW_CTL, val);
- itr_gran = hw->itr_gran_200;
+ /* This enables Mailbox queue Interrupt causes */
+ val = ((pf->hw_oicr_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
+ PFINT_MBX_CTL_CAUSE_ENA_M);
+ wr32(hw, PFINT_MBX_CTL, val);
+
+ itr_gran = hw->itr_gran;
- wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
+ wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->hw_oicr_idx),
ITR_TO_REG(ICE_ITR_8K, itr_gran));
ice_flush(hw);
@@ -2092,209 +1462,43 @@ skip_req_irq:
}
/**
- * ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
- * @vsi: the VSI getting queues
- *
- * Return 0 on success and a negative value on error
+ * ice_napi_del - Remove NAPI handler for the VSI
+ * @vsi: VSI for which NAPI handler is to be removed
*/
-static int ice_vsi_get_qs_contig(struct ice_vsi *vsi)
+static void ice_napi_del(struct ice_vsi *vsi)
{
- struct ice_pf *pf = vsi->back;
- int offset, ret = 0;
-
- mutex_lock(&pf->avail_q_mutex);
- /* look for contiguous block of queues for tx */
- offset = bitmap_find_next_zero_area(pf->avail_txqs, ICE_MAX_TXQS,
- 0, vsi->alloc_txq, 0);
- if (offset < ICE_MAX_TXQS) {
- int i;
-
- bitmap_set(pf->avail_txqs, offset, vsi->alloc_txq);
- for (i = 0; i < vsi->alloc_txq; i++)
- vsi->txq_map[i] = i + offset;
- } else {
- ret = -ENOMEM;
- vsi->tx_mapping_mode = ICE_VSI_MAP_SCATTER;
- }
-
- /* look for contiguous block of queues for rx */
- offset = bitmap_find_next_zero_area(pf->avail_rxqs, ICE_MAX_RXQS,
- 0, vsi->alloc_rxq, 0);
- if (offset < ICE_MAX_RXQS) {
- int i;
-
- bitmap_set(pf->avail_rxqs, offset, vsi->alloc_rxq);
- for (i = 0; i < vsi->alloc_rxq; i++)
- vsi->rxq_map[i] = i + offset;
- } else {
- ret = -ENOMEM;
- vsi->rx_mapping_mode = ICE_VSI_MAP_SCATTER;
- }
- mutex_unlock(&pf->avail_q_mutex);
-
- return ret;
-}
-
-/**
- * ice_vsi_get_qs_scatter - Assign a scattered queues to VSI
- * @vsi: the VSI getting queues
- *
- * Return 0 on success and a negative value on error
- */
-static int ice_vsi_get_qs_scatter(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
- int i, index = 0;
-
- mutex_lock(&pf->avail_q_mutex);
-
- if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) {
- for (i = 0; i < vsi->alloc_txq; i++) {
- index = find_next_zero_bit(pf->avail_txqs,
- ICE_MAX_TXQS, index);
- if (index < ICE_MAX_TXQS) {
- set_bit(index, pf->avail_txqs);
- vsi->txq_map[i] = index;
- } else {
- goto err_scatter_tx;
- }
- }
- }
-
- if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) {
- for (i = 0; i < vsi->alloc_rxq; i++) {
- index = find_next_zero_bit(pf->avail_rxqs,
- ICE_MAX_RXQS, index);
- if (index < ICE_MAX_RXQS) {
- set_bit(index, pf->avail_rxqs);
- vsi->rxq_map[i] = index;
- } else {
- goto err_scatter_rx;
- }
- }
- }
-
- mutex_unlock(&pf->avail_q_mutex);
- return 0;
+ int v_idx;
-err_scatter_rx:
- /* unflag any queues we have grabbed (i is failed position) */
- for (index = 0; index < i; index++) {
- clear_bit(vsi->rxq_map[index], pf->avail_rxqs);
- vsi->rxq_map[index] = 0;
- }
- i = vsi->alloc_txq;
-err_scatter_tx:
- /* i is either position of failed attempt or vsi->alloc_txq */
- for (index = 0; index < i; index++) {
- clear_bit(vsi->txq_map[index], pf->avail_txqs);
- vsi->txq_map[index] = 0;
- }
+ if (!vsi->netdev)
+ return;
- mutex_unlock(&pf->avail_q_mutex);
- return -ENOMEM;
+ for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
+ netif_napi_del(&vsi->q_vectors[v_idx]->napi);
}
/**
- * ice_vsi_get_qs - Assign queues from PF to VSI
- * @vsi: the VSI to assign queues to
+ * ice_napi_add - register NAPI handler for the VSI
+ * @vsi: VSI for which NAPI handler is to be registered
*
- * Returns 0 on success and a negative value on error
+ * This function is only called in the driver's load path. Registering the NAPI
+ * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
+ * reset/rebuild, etc.)
*/
-static int ice_vsi_get_qs(struct ice_vsi *vsi)
+static void ice_napi_add(struct ice_vsi *vsi)
{
- int ret = 0;
-
- vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG;
- vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG;
-
- /* NOTE: ice_vsi_get_qs_contig() will set the rx/tx mapping
- * modes individually to scatter if assigning contiguous queues
- * to rx or tx fails
- */
- ret = ice_vsi_get_qs_contig(vsi);
- if (ret < 0) {
- if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER)
- vsi->alloc_txq = max_t(u16, vsi->alloc_txq,
- ICE_MAX_SCATTER_TXQS);
- if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER)
- vsi->alloc_rxq = max_t(u16, vsi->alloc_rxq,
- ICE_MAX_SCATTER_RXQS);
- ret = ice_vsi_get_qs_scatter(vsi);
- }
-
- return ret;
-}
-
-/**
- * ice_vsi_put_qs - Release queues from VSI to PF
- * @vsi: the VSI thats going to release queues
- */
-static void ice_vsi_put_qs(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
- int i;
-
- mutex_lock(&pf->avail_q_mutex);
-
- for (i = 0; i < vsi->alloc_txq; i++) {
- clear_bit(vsi->txq_map[i], pf->avail_txqs);
- vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
- }
-
- for (i = 0; i < vsi->alloc_rxq; i++) {
- clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
- vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
- }
-
- mutex_unlock(&pf->avail_q_mutex);
-}
-
-/**
- * ice_free_q_vector - Free memory allocated for a specific interrupt vector
- * @vsi: VSI having the memory freed
- * @v_idx: index of the vector to be freed
- */
-static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
-{
- struct ice_q_vector *q_vector;
- struct ice_ring *ring;
+ int v_idx;
- if (!vsi->q_vectors[v_idx]) {
- dev_dbg(&vsi->back->pdev->dev, "Queue vector at index %d not found\n",
- v_idx);
+ if (!vsi->netdev)
return;
- }
- q_vector = vsi->q_vectors[v_idx];
-
- ice_for_each_ring(ring, q_vector->tx)
- ring->q_vector = NULL;
- ice_for_each_ring(ring, q_vector->rx)
- ring->q_vector = NULL;
-
- /* only VSI with an associated netdev is set up with NAPI */
- if (vsi->netdev)
- netif_napi_del(&q_vector->napi);
-
- devm_kfree(&vsi->back->pdev->dev, q_vector);
- vsi->q_vectors[v_idx] = NULL;
-}
-
-/**
- * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
- * @vsi: the VSI having memory freed
- */
-static void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
-{
- int v_idx;
for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
- ice_free_q_vector(vsi, v_idx);
+ netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
+ ice_napi_poll, NAPI_POLL_WEIGHT);
}
/**
- * ice_cfg_netdev - Setup the netdev flags
- * @vsi: the VSI being configured
+ * ice_cfg_netdev - Allocate, configure and register a netdev
+ * @vsi: the VSI associated with the new netdev
*
* Returns 0 on success, negative value on failure
*/
@@ -2307,6 +1511,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
struct ice_netdev_priv *np;
struct net_device *netdev;
u8 mac_addr[ETH_ALEN];
+ int err;
netdev = alloc_etherdev_mqs(sizeof(struct ice_netdev_priv),
vsi->alloc_txq, vsi->alloc_rxq);
@@ -2364,195 +1569,14 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = ICE_MAX_MTU;
- return 0;
-}
-
-/**
- * ice_vsi_free_arrays - clean up vsi resources
- * @vsi: pointer to VSI being cleared
- * @free_qvectors: bool to specify if q_vectors should be deallocated
- */
-static void ice_vsi_free_arrays(struct ice_vsi *vsi, bool free_qvectors)
-{
- struct ice_pf *pf = vsi->back;
-
- /* free the ring and vector containers */
- if (free_qvectors && vsi->q_vectors) {
- devm_kfree(&pf->pdev->dev, vsi->q_vectors);
- vsi->q_vectors = NULL;
- }
- if (vsi->tx_rings) {
- devm_kfree(&pf->pdev->dev, vsi->tx_rings);
- vsi->tx_rings = NULL;
- }
- if (vsi->rx_rings) {
- devm_kfree(&pf->pdev->dev, vsi->rx_rings);
- vsi->rx_rings = NULL;
- }
-}
-
-/**
- * ice_vsi_clear - clean up and deallocate the provided vsi
- * @vsi: pointer to VSI being cleared
- *
- * This deallocates the vsi's queue resources, removes it from the PF's
- * VSI array if necessary, and deallocates the VSI
- *
- * Returns 0 on success, negative on failure
- */
-static int ice_vsi_clear(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = NULL;
-
- if (!vsi)
- return 0;
-
- if (!vsi->back)
- return -EINVAL;
-
- pf = vsi->back;
-
- if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
- dev_dbg(&pf->pdev->dev, "vsi does not exist at pf->vsi[%d]\n",
- vsi->idx);
- return -EINVAL;
- }
-
- mutex_lock(&pf->sw_mutex);
- /* updates the PF for this cleared vsi */
-
- pf->vsi[vsi->idx] = NULL;
- if (vsi->idx < pf->next_vsi)
- pf->next_vsi = vsi->idx;
-
- ice_vsi_free_arrays(vsi, true);
- mutex_unlock(&pf->sw_mutex);
- devm_kfree(&pf->pdev->dev, vsi);
-
- return 0;
-}
-
-/**
- * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
- * @vsi: the VSI being configured
- * @v_idx: index of the vector in the vsi struct
- *
- * We allocate one q_vector. If allocation fails we return -ENOMEM.
- */
-static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
-{
- struct ice_pf *pf = vsi->back;
- struct ice_q_vector *q_vector;
-
- /* allocate q_vector */
- q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL);
- if (!q_vector)
- return -ENOMEM;
-
- q_vector->vsi = vsi;
- q_vector->v_idx = v_idx;
- /* only set affinity_mask if the CPU is online */
- if (cpu_online(v_idx))
- cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
-
- if (vsi->netdev)
- netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
- NAPI_POLL_WEIGHT);
- /* tie q_vector and vsi together */
- vsi->q_vectors[v_idx] = q_vector;
-
- return 0;
-}
-
-/**
- * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
- * @vsi: the VSI being configured
- *
- * We allocate one q_vector per queue interrupt. If allocation fails we
- * return -ENOMEM.
- */
-static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
- int v_idx = 0, num_q_vectors;
- int err;
-
- if (vsi->q_vectors[0]) {
- dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
- vsi->vsi_num);
- return -EEXIST;
- }
-
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
- num_q_vectors = vsi->num_q_vectors;
- } else {
- err = -EINVAL;
- goto err_out;
- }
-
- for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
- err = ice_vsi_alloc_q_vector(vsi, v_idx);
- if (err)
- goto err_out;
- }
-
- return 0;
-
-err_out:
- while (v_idx--)
- ice_free_q_vector(vsi, v_idx);
-
- dev_err(&pf->pdev->dev,
- "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
- vsi->num_q_vectors, vsi->vsi_num, err);
- vsi->num_q_vectors = 0;
- return err;
-}
-
-/**
- * ice_vsi_setup_vector_base - Set up the base vector for the given VSI
- * @vsi: ptr to the VSI
- *
- * This should only be called after ice_vsi_alloc() which allocates the
- * corresponding SW VSI structure and initializes num_queue_pairs for the
- * newly allocated VSI.
- *
- * Returns 0 on success or negative on failure
- */
-static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
- int num_q_vectors = 0;
-
- if (vsi->base_vector) {
- dev_dbg(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
- vsi->vsi_num, vsi->base_vector);
- return -EEXIST;
- }
-
- if (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- return -ENOENT;
-
- switch (vsi->type) {
- case ICE_VSI_PF:
- num_q_vectors = vsi->num_q_vectors;
- break;
- default:
- dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
- vsi->type);
- break;
- }
+ err = register_netdev(vsi->netdev);
+ if (err)
+ return err;
- if (num_q_vectors)
- vsi->base_vector = ice_get_res(pf, pf->irq_tracker,
- num_q_vectors, vsi->idx);
+ netif_carrier_off(vsi->netdev);
- if (vsi->base_vector < 0) {
- dev_err(&pf->pdev->dev,
- "Failed to get tracking for %d vectors for VSI %d, err=%d\n",
- num_q_vectors, vsi->vsi_num, vsi->base_vector);
- return -ENOENT;
- }
+ /* make sure transmit queues start off as stopped */
+ netif_tx_stop_all_queues(vsi->netdev);
return 0;
}
@@ -2572,327 +1596,17 @@ void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
}
/**
- * ice_vsi_cfg_rss - Configure RSS params for a VSI
- * @vsi: VSI to be configured
- */
-static int ice_vsi_cfg_rss(struct ice_vsi *vsi)
-{
- u8 seed[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE];
- struct ice_aqc_get_set_rss_keys *key;
- struct ice_pf *pf = vsi->back;
- enum ice_status status;
- int err = 0;
- u8 *lut;
-
- vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq);
-
- lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL);
- if (!lut)
- return -ENOMEM;
-
- if (vsi->rss_lut_user)
- memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
- else
- ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
-
- status = ice_aq_set_rss_lut(&pf->hw, vsi->vsi_num, vsi->rss_lut_type,
- lut, vsi->rss_table_size);
-
- if (status) {
- dev_err(&vsi->back->pdev->dev,
- "set_rss_lut failed, error %d\n", status);
- err = -EIO;
- goto ice_vsi_cfg_rss_exit;
- }
-
- key = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*key), GFP_KERNEL);
- if (!key) {
- err = -ENOMEM;
- goto ice_vsi_cfg_rss_exit;
- }
-
- if (vsi->rss_hkey_user)
- memcpy(seed, vsi->rss_hkey_user,
- ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
- else
- netdev_rss_key_fill((void *)seed,
- ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
- memcpy(&key->standard_rss_key, seed,
- ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
-
- status = ice_aq_set_rss_key(&pf->hw, vsi->vsi_num, key);
-
- if (status) {
- dev_err(&vsi->back->pdev->dev, "set_rss_key failed, error %d\n",
- status);
- err = -EIO;
- }
-
- devm_kfree(&pf->pdev->dev, key);
-ice_vsi_cfg_rss_exit:
- devm_kfree(&pf->pdev->dev, lut);
- return err;
-}
-
-/**
- * ice_vsi_reinit_setup - return resource and reallocate resource for a VSI
- * @vsi: pointer to the ice_vsi
- *
- * This reallocates the VSIs queue resources
- *
- * Returns 0 on success and negative value on failure
- */
-static int ice_vsi_reinit_setup(struct ice_vsi *vsi)
-{
- u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
- int ret, i;
-
- if (!vsi)
- return -EINVAL;
-
- ice_vsi_free_q_vectors(vsi);
- ice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx);
- vsi->base_vector = 0;
- ice_vsi_clear_rings(vsi);
- ice_vsi_free_arrays(vsi, false);
- ice_vsi_set_num_qs(vsi);
-
- /* Initialize VSI struct elements and create VSI in FW */
- ret = ice_vsi_add(vsi);
- if (ret < 0)
- goto err_vsi;
-
- ret = ice_vsi_alloc_arrays(vsi, false);
- if (ret < 0)
- goto err_vsi;
-
- switch (vsi->type) {
- case ICE_VSI_PF:
- if (!vsi->netdev) {
- ret = ice_cfg_netdev(vsi);
- if (ret)
- goto err_rings;
-
- ret = register_netdev(vsi->netdev);
- if (ret)
- goto err_rings;
-
- netif_carrier_off(vsi->netdev);
- netif_tx_stop_all_queues(vsi->netdev);
- }
-
- ret = ice_vsi_alloc_q_vectors(vsi);
- if (ret)
- goto err_rings;
-
- ret = ice_vsi_setup_vector_base(vsi);
- if (ret)
- goto err_vectors;
-
- ret = ice_vsi_alloc_rings(vsi);
- if (ret)
- goto err_vectors;
-
- ice_vsi_map_rings_to_vectors(vsi);
- break;
- default:
- break;
- }
-
- ice_vsi_set_tc_cfg(vsi);
-
- /* configure VSI nodes based on number of queues and TC's */
- for (i = 0; i < vsi->tc_cfg.numtc; i++)
- max_txqs[i] = vsi->num_txq;
-
- ret = ice_cfg_vsi_lan(vsi->port_info, vsi->vsi_num,
- vsi->tc_cfg.ena_tc, max_txqs);
- if (ret) {
- dev_info(&vsi->back->pdev->dev,
- "Failed VSI lan queue config\n");
- goto err_vectors;
- }
- return 0;
-
-err_vectors:
- ice_vsi_free_q_vectors(vsi);
-err_rings:
- if (vsi->netdev) {
- vsi->current_netdev_flags = 0;
- unregister_netdev(vsi->netdev);
- free_netdev(vsi->netdev);
- vsi->netdev = NULL;
- }
-err_vsi:
- ice_vsi_clear(vsi);
- set_bit(__ICE_RESET_FAILED, vsi->back->state);
- return ret;
-}
-
-/**
- * ice_vsi_setup - Set up a VSI by a given type
+ * ice_pf_vsi_setup - Set up a PF VSI
* @pf: board private structure
- * @type: VSI type
* @pi: pointer to the port_info instance
*
- * This allocates the sw VSI structure and its queue resources.
- *
- * Returns pointer to the successfully allocated and configure VSI sw struct on
- * success, otherwise returns NULL on failure.
+ * Returns pointer to the successfully allocated VSI sw struct on success,
+ * otherwise returns NULL on failure.
*/
static struct ice_vsi *
-ice_vsi_setup(struct ice_pf *pf, enum ice_vsi_type type,
- struct ice_port_info *pi)
-{
- u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
- struct device *dev = &pf->pdev->dev;
- struct ice_vsi_ctx ctxt = { 0 };
- struct ice_vsi *vsi;
- int ret, i;
-
- vsi = ice_vsi_alloc(pf, type);
- if (!vsi) {
- dev_err(dev, "could not allocate VSI\n");
- return NULL;
- }
-
- vsi->port_info = pi;
- vsi->vsw = pf->first_sw;
-
- if (ice_vsi_get_qs(vsi)) {
- dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
- vsi->idx);
- goto err_get_qs;
- }
-
- /* set RSS capabilities */
- ice_vsi_set_rss_params(vsi);
-
- /* create the VSI */
- ret = ice_vsi_add(vsi);
- if (ret)
- goto err_vsi;
-
- ctxt.vsi_num = vsi->vsi_num;
-
- switch (vsi->type) {
- case ICE_VSI_PF:
- ret = ice_cfg_netdev(vsi);
- if (ret)
- goto err_cfg_netdev;
-
- ret = register_netdev(vsi->netdev);
- if (ret)
- goto err_register_netdev;
-
- netif_carrier_off(vsi->netdev);
-
- /* make sure transmit queues start off as stopped */
- netif_tx_stop_all_queues(vsi->netdev);
- ret = ice_vsi_alloc_q_vectors(vsi);
- if (ret)
- goto err_msix;
-
- ret = ice_vsi_setup_vector_base(vsi);
- if (ret)
- goto err_rings;
-
- ret = ice_vsi_alloc_rings(vsi);
- if (ret)
- goto err_rings;
-
- ice_vsi_map_rings_to_vectors(vsi);
-
- /* Do not exit if configuring RSS had an issue, at least
- * receive traffic on first queue. Hence no need to capture
- * return value
- */
- if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
- ice_vsi_cfg_rss(vsi);
- break;
- default:
- /* if vsi type is not recognized, clean up the resources and
- * exit
- */
- goto err_rings;
- }
-
- ice_vsi_set_tc_cfg(vsi);
-
- /* configure VSI nodes based on number of queues and TC's */
- for (i = 0; i < vsi->tc_cfg.numtc; i++)
- max_txqs[i] = vsi->num_txq;
-
- ret = ice_cfg_vsi_lan(vsi->port_info, vsi->vsi_num,
- vsi->tc_cfg.ena_tc, max_txqs);
- if (ret) {
- dev_info(&pf->pdev->dev, "Failed VSI lan queue config\n");
- goto err_rings;
- }
-
- return vsi;
-
-err_rings:
- ice_vsi_free_q_vectors(vsi);
-err_msix:
- if (vsi->netdev && vsi->netdev->reg_state == NETREG_REGISTERED)
- unregister_netdev(vsi->netdev);
-err_register_netdev:
- if (vsi->netdev) {
- free_netdev(vsi->netdev);
- vsi->netdev = NULL;
- }
-err_cfg_netdev:
- ret = ice_aq_free_vsi(&pf->hw, &ctxt, false, NULL);
- if (ret)
- dev_err(&vsi->back->pdev->dev,
- "Free VSI AQ call failed, err %d\n", ret);
-err_vsi:
- ice_vsi_put_qs(vsi);
-err_get_qs:
- pf->q_left_tx += vsi->alloc_txq;
- pf->q_left_rx += vsi->alloc_rxq;
- ice_vsi_clear(vsi);
-
- return NULL;
-}
-
-/**
- * ice_vsi_add_vlan - Add vsi membership for given vlan
- * @vsi: the vsi being configured
- * @vid: vlan id to be added
- */
-static int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
+ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- struct ice_fltr_list_entry *tmp;
- struct ice_pf *pf = vsi->back;
- LIST_HEAD(tmp_add_list);
- enum ice_status status;
- int err = 0;
-
- tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
-
- tmp->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
- tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
- tmp->fltr_info.flag = ICE_FLTR_TX;
- tmp->fltr_info.src = vsi->vsi_num;
- tmp->fltr_info.fwd_id.vsi_id = vsi->vsi_num;
- tmp->fltr_info.l_data.vlan.vlan_id = vid;
-
- INIT_LIST_HEAD(&tmp->list_entry);
- list_add(&tmp->list_entry, &tmp_add_list);
-
- status = ice_add_vlan(&pf->hw, &tmp_add_list);
- if (status) {
- err = -ENODEV;
- dev_err(&pf->pdev->dev, "Failure Adding VLAN %d on VSI %i\n",
- vid, vsi->vsi_num);
- }
-
- ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
- return err;
+ return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID);
}
/**
@@ -2908,7 +1622,7 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
- int ret = 0;
+ int ret;
if (vid >= VLAN_N_VID) {
netdev_err(netdev, "VLAN id requested %d is out of range %d\n",
@@ -2919,6 +1633,13 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
if (vsi->info.pvid)
return -EINVAL;
+ /* Enable VLAN pruning when VLAN 0 is added */
+ if (unlikely(!vid)) {
+ ret = ice_cfg_vlan_pruning(vsi, true);
+ if (ret)
+ return ret;
+ }
+
/* Add all VLAN ids including 0 to the switch filter. VLAN id 0 is
* needed to continue allowing all untagged packets since VLAN prune
* list is applied to all packets by the switch
@@ -2932,38 +1653,6 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
}
/**
- * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN
- * @vsi: the VSI being configured
- * @vid: VLAN id to be removed
- */
-static void ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
-{
- struct ice_fltr_list_entry *list;
- struct ice_pf *pf = vsi->back;
- LIST_HEAD(tmp_add_list);
-
- list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
- if (!list)
- return;
-
- list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
- list->fltr_info.fwd_id.vsi_id = vsi->vsi_num;
- list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
- list->fltr_info.l_data.vlan.vlan_id = vid;
- list->fltr_info.flag = ICE_FLTR_TX;
- list->fltr_info.src = vsi->vsi_num;
-
- INIT_LIST_HEAD(&list->list_entry);
- list_add(&list->list_entry, &tmp_add_list);
-
- if (ice_remove_vlan(&pf->hw, &tmp_add_list))
- dev_err(&pf->pdev->dev, "Error removing VLAN %d on vsi %i\n",
- vid, vsi->vsi_num);
-
- ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
-}
-
-/**
* ice_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
* @netdev: network interface to be adjusted
* @proto: unused protocol
@@ -2976,19 +1665,25 @@ static int ice_vlan_rx_kill_vid(struct net_device *netdev,
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
+ int status;
if (vsi->info.pvid)
return -EINVAL;
- /* return code is ignored as there is nothing a user
- * can do about failure to remove and a log message was
- * already printed from the other function
+ /* Make sure ice_vsi_kill_vlan is successful before updating VLAN
+ * information
*/
- ice_vsi_kill_vlan(vsi, vid);
+ status = ice_vsi_kill_vlan(vsi, vid);
+ if (status)
+ return status;
clear_bit(vid, vsi->active_vlans);
- return 0;
+ /* Disable VLAN pruning when VLAN 0 is removed */
+ if (unlikely(!vid))
+ status = ice_cfg_vlan_pruning(vsi, false);
+
+ return status;
}
/**
@@ -3004,59 +1699,73 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
struct ice_vsi *vsi;
int status = 0;
- if (!ice_is_reset_recovery_pending(pf->state)) {
- vsi = ice_vsi_setup(pf, ICE_VSI_PF, pf->hw.port_info);
- if (!vsi) {
- status = -ENOMEM;
- goto error_exit;
- }
- } else {
- vsi = pf->vsi[0];
- status = ice_vsi_reinit_setup(vsi);
- if (status < 0)
- return -EIO;
+ if (ice_is_reset_in_progress(pf->state))
+ return -EBUSY;
+
+ vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
+ if (!vsi) {
+ status = -ENOMEM;
+ goto unroll_vsi_setup;
+ }
+
+ status = ice_cfg_netdev(vsi);
+ if (status) {
+ status = -ENODEV;
+ goto unroll_vsi_setup;
}
- /* tmp_add_list contains a list of MAC addresses for which MAC
- * filters need to be programmed. Add the VSI's unicast MAC to
- * this list
+ /* registering the NAPI handler requires both the queues and
+ * netdev to be created, which are done in ice_pf_vsi_setup()
+ * and ice_cfg_netdev() respectively
*/
+ ice_napi_add(vsi);
+
+ /* To add a MAC filter, first add the MAC to a list and then
+ * pass the list to ice_add_mac.
+ */
+
+ /* Add a unicast MAC filter so the VSI can get its packets */
status = ice_add_mac_to_list(vsi, &tmp_add_list,
vsi->port_info->mac.perm_addr);
if (status)
- goto error_exit;
+ goto unroll_napi_add;
/* VSI needs to receive broadcast traffic, so add the broadcast
- * MAC address to the list.
+ * MAC address to the list as well.
*/
eth_broadcast_addr(broadcast);
status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
if (status)
- goto error_exit;
+ goto free_mac_list;
/* program MAC filters for entries in tmp_add_list */
status = ice_add_mac(&pf->hw, &tmp_add_list);
if (status) {
dev_err(&pf->pdev->dev, "Could not add MAC filters\n");
status = -ENOMEM;
- goto error_exit;
+ goto free_mac_list;
}
ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
return status;
-error_exit:
+free_mac_list:
ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+unroll_napi_add:
if (vsi) {
- ice_vsi_free_q_vectors(vsi);
- if (vsi->netdev && vsi->netdev->reg_state == NETREG_REGISTERED)
- unregister_netdev(vsi->netdev);
+ ice_napi_del(vsi);
if (vsi->netdev) {
+ if (vsi->netdev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(vsi->netdev);
free_netdev(vsi->netdev);
vsi->netdev = NULL;
}
+ }
+unroll_vsi_setup:
+ if (vsi) {
+ ice_vsi_free_q_vectors(vsi);
ice_vsi_delete(vsi);
ice_vsi_put_qs(vsi);
pf->q_left_tx += vsi->alloc_txq;
@@ -3097,10 +1806,7 @@ static void ice_determine_q_usage(struct ice_pf *pf)
*/
static void ice_deinit_pf(struct ice_pf *pf)
{
- if (pf->serv_tmr.function)
- del_timer_sync(&pf->serv_tmr);
- if (pf->serv_task.func)
- cancel_work_sync(&pf->serv_task);
+ ice_service_task_stop(pf);
mutex_destroy(&pf->sw_mutex);
mutex_destroy(&pf->avail_q_mutex);
}
@@ -3113,6 +1819,15 @@ static void ice_init_pf(struct ice_pf *pf)
{
bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS);
set_bit(ICE_FLAG_MSIX_ENA, pf->flags);
+#ifdef CONFIG_PCI_IOV
+ if (pf->hw.func_caps.common_cap.sr_iov_1_1) {
+ struct ice_hw *hw = &pf->hw;
+
+ set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
+ pf->num_vfs_supported = min_t(int, hw->func_caps.num_allocd_vfs,
+ ICE_MAX_VF_COUNT);
+ }
+#endif /* CONFIG_PCI_IOV */
mutex_init(&pf->sw_mutex);
mutex_init(&pf->avail_q_mutex);
@@ -3155,6 +1870,7 @@ static int ice_ena_msix_range(struct ice_pf *pf)
/* reserve vectors for LAN traffic */
pf->num_lan_msix = min_t(int, num_online_cpus(), v_left);
v_budget += pf->num_lan_msix;
+ v_left -= pf->num_lan_msix;
pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget,
sizeof(struct msix_entry), GFP_KERNEL);
@@ -3182,10 +1898,11 @@ static int ice_ena_msix_range(struct ice_pf *pf)
"not enough vectors. requested = %d, obtained = %d\n",
v_budget, v_actual);
if (v_actual >= (pf->num_lan_msix + 1)) {
- pf->num_avail_msix = v_actual - (pf->num_lan_msix + 1);
+ pf->num_avail_sw_msix = v_actual -
+ (pf->num_lan_msix + 1);
} else if (v_actual >= 2) {
pf->num_lan_msix = 1;
- pf->num_avail_msix = v_actual - 2;
+ pf->num_avail_sw_msix = v_actual - 2;
} else {
pci_disable_msix(pf->pdev);
err = -ERANGE;
@@ -3218,12 +1935,32 @@ static void ice_dis_msix(struct ice_pf *pf)
}
/**
+ * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
+ * @pf: board private structure
+ */
+static void ice_clear_interrupt_scheme(struct ice_pf *pf)
+{
+ if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
+ ice_dis_msix(pf);
+
+ if (pf->sw_irq_tracker) {
+ devm_kfree(&pf->pdev->dev, pf->sw_irq_tracker);
+ pf->sw_irq_tracker = NULL;
+ }
+
+ if (pf->hw_irq_tracker) {
+ devm_kfree(&pf->pdev->dev, pf->hw_irq_tracker);
+ pf->hw_irq_tracker = NULL;
+ }
+}
+
+/**
* ice_init_interrupt_scheme - Determine proper interrupt scheme
* @pf: board private structure to initialize
*/
static int ice_init_interrupt_scheme(struct ice_pf *pf)
{
- int vectors = 0;
+ int vectors = 0, hw_vectors = 0;
ssize_t size;
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
@@ -3237,30 +1974,31 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
/* set up vector assignment tracking */
size = sizeof(struct ice_res_tracker) + (sizeof(u16) * vectors);
- pf->irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL);
- if (!pf->irq_tracker) {
+ pf->sw_irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL);
+ if (!pf->sw_irq_tracker) {
ice_dis_msix(pf);
return -ENOMEM;
}
- pf->irq_tracker->num_entries = vectors;
+ /* populate SW interrupts pool with number of OS granted IRQs. */
+ pf->num_avail_sw_msix = vectors;
+ pf->sw_irq_tracker->num_entries = vectors;
- return 0;
-}
+ /* set up HW vector assignment tracking */
+ hw_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
+ size = sizeof(struct ice_res_tracker) + (sizeof(u16) * hw_vectors);
-/**
- * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
- * @pf: board private structure
- */
-static void ice_clear_interrupt_scheme(struct ice_pf *pf)
-{
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- ice_dis_msix(pf);
-
- if (pf->irq_tracker) {
- devm_kfree(&pf->pdev->dev, pf->irq_tracker);
- pf->irq_tracker = NULL;
+ pf->hw_irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL);
+ if (!pf->hw_irq_tracker) {
+ ice_clear_interrupt_scheme(pf);
+ return -ENOMEM;
}
+
+ /* populate HW interrupts pool with number of HW supported irqs. */
+ pf->num_avail_hw_msix = hw_vectors;
+ pf->hw_irq_tracker->num_entries = hw_vectors;
+
+ return 0;
}
/**
@@ -3307,6 +2045,8 @@ static int ice_probe(struct pci_dev *pdev,
pf->pdev = pdev;
pci_set_drvdata(pdev, pf);
set_bit(__ICE_DOWN, pf->state);
+ /* Disable service task until DOWN bit is cleared */
+ set_bit(__ICE_SERVICE_DIS, pf->state);
hw = &pf->hw;
hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
@@ -3364,6 +2104,9 @@ static int ice_probe(struct pci_dev *pdev,
goto err_init_interrupt_unroll;
}
+ /* Driver is mostly up */
+ clear_bit(__ICE_DOWN, pf->state);
+
/* In case of MSIX we are going to setup the misc vector right here
* to handle admin queue events etc. In case of legacy and MSI
* the misc functionality and queue processing is combined in
@@ -3386,7 +2129,11 @@ static int ice_probe(struct pci_dev *pdev,
goto err_msix_misc_unroll;
}
- pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
+ if (hw->evb_veb)
+ pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
+ else
+ pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
+
pf->first_sw->pf = pf;
/* record the sw_id available for later use */
@@ -3399,21 +2146,15 @@ static int ice_probe(struct pci_dev *pdev,
goto err_alloc_sw_unroll;
}
- /* Driver is mostly up */
- clear_bit(__ICE_DOWN, pf->state);
+ clear_bit(__ICE_SERVICE_DIS, pf->state);
/* since everything is good, start the service timer */
mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
- err = ice_init_link_events(pf->hw.port_info);
- if (err) {
- dev_err(&pdev->dev, "ice_init_link_events failed: %d\n", err);
- goto err_alloc_sw_unroll;
- }
-
return 0;
err_alloc_sw_unroll:
+ set_bit(__ICE_SERVICE_DIS, pf->state);
set_bit(__ICE_DOWN, pf->state);
devm_kfree(&pf->pdev->dev, pf->first_sw);
err_msix_misc_unroll:
@@ -3436,25 +2177,23 @@ err_exit_unroll:
static void ice_remove(struct pci_dev *pdev)
{
struct ice_pf *pf = pci_get_drvdata(pdev);
- int i = 0;
- int err;
+ int i;
if (!pf)
return;
set_bit(__ICE_DOWN, pf->state);
+ ice_service_task_stop(pf);
- for (i = 0; i < pf->num_alloc_vsi; i++) {
+ if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags))
+ ice_free_vfs(pf);
+ ice_vsi_release_all(pf);
+ ice_free_irq_msix_misc(pf);
+ ice_for_each_vsi(pf, i) {
if (!pf->vsi[i])
continue;
-
- err = ice_vsi_release(pf->vsi[i]);
- if (err)
- dev_dbg(&pf->pdev->dev, "Failed to release VSI index %d (err %d)\n",
- i, err);
+ ice_vsi_free_q_vectors(pf->vsi[i]);
}
-
- ice_free_irq_msix_misc(pf);
ice_clear_interrupt_scheme(pf);
ice_deinit_pf(pf);
ice_deinit_hw(&pf->hw);
@@ -3470,11 +2209,9 @@ static void ice_remove(struct pci_dev *pdev)
* Class, Class Mask, private data (not used) }
*/
static const struct pci_device_id ice_pci_tbl[] = {
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_BACKPLANE), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_QSFP), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_SFP), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_10G_BASE_T), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_SGMII), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
/* required last entry */
{ 0, }
};
@@ -3485,6 +2222,7 @@ static struct pci_driver ice_driver = {
.id_table = ice_pci_tbl,
.probe = ice_probe,
.remove = ice_remove,
+ .sriov_configure = ice_sriov_configure,
};
/**
@@ -3500,7 +2238,7 @@ static int __init ice_module_init(void)
pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver);
pr_info("%s\n", ice_copyright);
- ice_wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, KBUILD_MODNAME);
+ ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
if (!ice_wq) {
pr_err("Failed to create workqueue\n");
return -ENOMEM;
@@ -3562,7 +2300,7 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
}
if (test_bit(__ICE_DOWN, pf->state) ||
- ice_is_reset_recovery_pending(pf->state)) {
+ ice_is_reset_in_progress(pf->state)) {
netdev_err(netdev, "can't set mac %pM. device not ready\n",
mac);
return -EBUSY;
@@ -3722,78 +2460,6 @@ static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
}
/**
- * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx
- * @vsi: the vsi being changed
- */
-static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
-{
- struct device *dev = &vsi->back->pdev->dev;
- struct ice_hw *hw = &vsi->back->hw;
- struct ice_vsi_ctx ctxt = { 0 };
- enum ice_status status;
-
- /* Here we are configuring the VSI to let the driver add VLAN tags by
- * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag
- * insertion happens in the Tx hot path, in ice_tx_map.
- */
- ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
-
- ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
- ctxt.vsi_num = vsi->vsi_num;
-
- status = ice_aq_update_vsi(hw, &ctxt, NULL);
- if (status) {
- dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
- status, hw->adminq.sq_last_status);
- return -EIO;
- }
-
- vsi->info.vlan_flags = ctxt.info.vlan_flags;
- return 0;
-}
-
-/**
- * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx
- * @vsi: the vsi being changed
- * @ena: boolean value indicating if this is a enable or disable request
- */
-static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
-{
- struct device *dev = &vsi->back->pdev->dev;
- struct ice_hw *hw = &vsi->back->hw;
- struct ice_vsi_ctx ctxt = { 0 };
- enum ice_status status;
-
- /* Here we are configuring what the VSI should do with the VLAN tag in
- * the Rx packet. We can either leave the tag in the packet or put it in
- * the Rx descriptor.
- */
- if (ena) {
- /* Strip VLAN tag from Rx packet and put it in the desc */
- ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
- } else {
- /* Disable stripping. Leave tag in packet */
- ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
- }
-
- /* Allow all packets untagged/tagged */
- ctxt.info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
-
- ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
- ctxt.vsi_num = vsi->vsi_num;
-
- status = ice_aq_update_vsi(hw, &ctxt, NULL);
- if (status) {
- dev_err(dev, "update VSI for VALN strip failed, ena = %d err %d aq_err %d\n",
- ena, status, hw->adminq.sq_last_status);
- return -EIO;
- }
-
- vsi->info.vlan_flags = ctxt.info.vlan_flags;
- return 0;
-}
-
-/**
* ice_set_features - set the netdev feature flags
* @netdev: ptr to the netdev being adjusted
* @features: the feature set that the stack is suggesting
@@ -3805,6 +2471,12 @@ static int ice_set_features(struct net_device *netdev,
struct ice_vsi *vsi = np->vsi;
int ret = 0;
+ if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
+ ret = ice_vsi_manage_rss_lut(vsi, true);
+ else if (!(features & NETIF_F_RXHASH) &&
+ netdev->features & NETIF_F_RXHASH)
+ ret = ice_vsi_manage_rss_lut(vsi, false);
+
if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
ret = ice_vsi_manage_vlan_stripping(vsi, true);
@@ -3863,248 +2535,6 @@ static int ice_restore_vlan(struct ice_vsi *vsi)
}
/**
- * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
- * @ring: The Tx ring to configure
- * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
- * @pf_q: queue index in the PF space
- *
- * Configure the Tx descriptor ring in TLAN context.
- */
-static void
-ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
-{
- struct ice_vsi *vsi = ring->vsi;
- struct ice_hw *hw = &vsi->back->hw;
-
- tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
-
- tlan_ctx->port_num = vsi->port_info->lport;
-
- /* Transmit Queue Length */
- tlan_ctx->qlen = ring->count;
-
- /* PF number */
- tlan_ctx->pf_num = hw->pf_id;
-
- /* queue belongs to a specific VSI type
- * VF / VM index should be programmed per vmvf_type setting:
- * for vmvf_type = VF, it is VF number between 0-256
- * for vmvf_type = VM, it is VM number between 0-767
- * for PF or EMP this field should be set to zero
- */
- switch (vsi->type) {
- case ICE_VSI_PF:
- tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
- break;
- default:
- return;
- }
-
- /* make sure the context is associated with the right VSI */
- tlan_ctx->src_vsi = vsi->vsi_num;
-
- tlan_ctx->tso_ena = ICE_TX_LEGACY;
- tlan_ctx->tso_qnum = pf_q;
-
- /* Legacy or Advanced Host Interface:
- * 0: Advanced Host Interface
- * 1: Legacy Host Interface
- */
- tlan_ctx->legacy_int = ICE_TX_LEGACY;
-}
-
-/**
- * ice_vsi_cfg_txqs - Configure the VSI for Tx
- * @vsi: the VSI being configured
- *
- * Return 0 on success and a negative value on error
- * Configure the Tx VSI for operation.
- */
-static int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
-{
- struct ice_aqc_add_tx_qgrp *qg_buf;
- struct ice_aqc_add_txqs_perq *txq;
- struct ice_pf *pf = vsi->back;
- enum ice_status status;
- u16 buf_len, i, pf_q;
- int err = 0, tc = 0;
- u8 num_q_grps;
-
- buf_len = sizeof(struct ice_aqc_add_tx_qgrp);
- qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL);
- if (!qg_buf)
- return -ENOMEM;
-
- if (vsi->num_txq > ICE_MAX_TXQ_PER_TXQG) {
- err = -EINVAL;
- goto err_cfg_txqs;
- }
- qg_buf->num_txqs = 1;
- num_q_grps = 1;
-
- /* set up and configure the tx queues */
- ice_for_each_txq(vsi, i) {
- struct ice_tlan_ctx tlan_ctx = { 0 };
-
- pf_q = vsi->txq_map[i];
- ice_setup_tx_ctx(vsi->tx_rings[i], &tlan_ctx, pf_q);
- /* copy context contents into the qg_buf */
- qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
- ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
- ice_tlan_ctx_info);
-
- /* init queue specific tail reg. It is referred as transmit
- * comm scheduler queue doorbell.
- */
- vsi->tx_rings[i]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
- status = ice_ena_vsi_txq(vsi->port_info, vsi->vsi_num, tc,
- num_q_grps, qg_buf, buf_len, NULL);
- if (status) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to set LAN Tx queue context, error: %d\n",
- status);
- err = -ENODEV;
- goto err_cfg_txqs;
- }
-
- /* Add Tx Queue TEID into the VSI tx ring from the response
- * This will complete configuring and enabling the queue.
- */
- txq = &qg_buf->txqs[0];
- if (pf_q == le16_to_cpu(txq->txq_id))
- vsi->tx_rings[i]->txq_teid =
- le32_to_cpu(txq->q_teid);
- }
-err_cfg_txqs:
- devm_kfree(&pf->pdev->dev, qg_buf);
- return err;
-}
-
-/**
- * ice_setup_rx_ctx - Configure a receive ring context
- * @ring: The Rx ring to configure
- *
- * Configure the Rx descriptor ring in RLAN context.
- */
-static int ice_setup_rx_ctx(struct ice_ring *ring)
-{
- struct ice_vsi *vsi = ring->vsi;
- struct ice_hw *hw = &vsi->back->hw;
- u32 rxdid = ICE_RXDID_FLEX_NIC;
- struct ice_rlan_ctx rlan_ctx;
- u32 regval;
- u16 pf_q;
- int err;
-
- /* what is RX queue number in global space of 2K rx queues */
- pf_q = vsi->rxq_map[ring->q_index];
-
- /* clear the context structure first */
- memset(&rlan_ctx, 0, sizeof(rlan_ctx));
-
- rlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S;
-
- rlan_ctx.qlen = ring->count;
-
- /* Receive Packet Data Buffer Size.
- * The Packet Data Buffer Size is defined in 128 byte units.
- */
- rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
-
- /* use 32 byte descriptors */
- rlan_ctx.dsize = 1;
-
- /* Strip the Ethernet CRC bytes before the packet is posted to host
- * memory.
- */
- rlan_ctx.crcstrip = 1;
-
- /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */
- rlan_ctx.l2tsel = 1;
-
- rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
- rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
- rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
-
- /* This controls whether VLAN is stripped from inner headers
- * The VLAN in the inner L2 header is stripped to the receive
- * descriptor if enabled by this flag.
- */
- rlan_ctx.showiv = 0;
-
- /* Max packet size for this queue - must not be set to a larger value
- * than 5 x DBUF
- */
- rlan_ctx.rxmax = min_t(u16, vsi->max_frame,
- ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len);
-
- /* Rx queue threshold in units of 64 */
- rlan_ctx.lrxqthresh = 1;
-
- /* Enable Flexible Descriptors in the queue context which
- * allows this driver to select a specific receive descriptor format
- */
- regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
- regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
- QRXFLXP_CNTXT_RXDID_IDX_M;
-
- /* increasing context priority to pick up profile id;
- * default is 0x01; setting to 0x03 to ensure profile
- * is programming if prev context is of same priority
- */
- regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
- QRXFLXP_CNTXT_RXDID_PRIO_M;
-
- wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
-
- /* Absolute queue number out of 2K needs to be passed */
- err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
- if (err) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
- pf_q, err);
- return -EIO;
- }
-
- /* init queue specific tail register */
- ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
- writel(0, ring->tail);
- ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));
-
- return 0;
-}
-
-/**
- * ice_vsi_cfg_rxqs - Configure the VSI for Rx
- * @vsi: the VSI being configured
- *
- * Return 0 on success and a negative value on error
- * Configure the Rx VSI for operation.
- */
-static int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
-{
- int err = 0;
- u16 i;
-
- if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN)
- vsi->max_frame = vsi->netdev->mtu +
- ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
- else
- vsi->max_frame = ICE_RXBUF_2048;
-
- vsi->rx_buf_len = ICE_RXBUF_2048;
- /* set up individual rings */
- for (i = 0; i < vsi->num_rxq && !err; i++)
- err = ice_setup_rx_ctx(vsi->rx_rings[i]);
-
- if (err) {
- dev_err(&vsi->back->pdev->dev, "ice_setup_rx_ctx failed\n");
- return -EIO;
- }
- return err;
-}
-
-/**
* ice_vsi_cfg - Setup the VSI
* @vsi: the VSI being configured
*
@@ -4129,200 +2559,6 @@ static int ice_vsi_cfg(struct ice_vsi *vsi)
}
/**
- * ice_vsi_stop_tx_rings - Disable Tx rings
- * @vsi: the VSI being configured
- */
-static int ice_vsi_stop_tx_rings(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
- struct ice_hw *hw = &pf->hw;
- enum ice_status status;
- u32 *q_teids, val;
- u16 *q_ids, i;
- int err = 0;
-
- if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
- return -EINVAL;
-
- q_teids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_teids),
- GFP_KERNEL);
- if (!q_teids)
- return -ENOMEM;
-
- q_ids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_ids),
- GFP_KERNEL);
- if (!q_ids) {
- err = -ENOMEM;
- goto err_alloc_q_ids;
- }
-
- /* set up the tx queue list to be disabled */
- ice_for_each_txq(vsi, i) {
- u16 v_idx;
-
- if (!vsi->tx_rings || !vsi->tx_rings[i]) {
- err = -EINVAL;
- goto err_out;
- }
-
- q_ids[i] = vsi->txq_map[i];
- q_teids[i] = vsi->tx_rings[i]->txq_teid;
-
- /* clear cause_ena bit for disabled queues */
- val = rd32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
- val &= ~QINT_TQCTL_CAUSE_ENA_M;
- wr32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
-
- /* software is expected to wait for 100 ns */
- ndelay(100);
-
- /* trigger a software interrupt for the vector associated to
- * the queue to schedule napi handler
- */
- v_idx = vsi->tx_rings[i]->q_vector->v_idx;
- wr32(hw, GLINT_DYN_CTL(vsi->base_vector + v_idx),
- GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M);
- }
- status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids,
- NULL);
- if (status) {
- dev_err(&pf->pdev->dev,
- "Failed to disable LAN Tx queues, error: %d\n",
- status);
- err = -ENODEV;
- }
-
-err_out:
- devm_kfree(&pf->pdev->dev, q_ids);
-
-err_alloc_q_ids:
- devm_kfree(&pf->pdev->dev, q_teids);
-
- return err;
-}
-
-/**
- * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
- * @pf: the PF being configured
- * @pf_q: the PF queue
- * @ena: enable or disable state of the queue
- *
- * This routine will wait for the given Rx queue of the PF to reach the
- * enabled or disabled state.
- * Returns -ETIMEDOUT in case of failing to reach the requested state after
- * multiple retries; else will return 0 in case of success.
- */
-static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
-{
- int i;
-
- for (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) {
- u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q));
-
- if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
- break;
-
- usleep_range(10, 20);
- }
- if (i >= ICE_Q_WAIT_RETRY_LIMIT)
- return -ETIMEDOUT;
-
- return 0;
-}
-
-/**
- * ice_vsi_ctrl_rx_rings - Start or stop a VSI's rx rings
- * @vsi: the VSI being configured
- * @ena: start or stop the rx rings
- */
-static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
-{
- struct ice_pf *pf = vsi->back;
- struct ice_hw *hw = &pf->hw;
- int i, j, ret = 0;
-
- for (i = 0; i < vsi->num_rxq; i++) {
- int pf_q = vsi->rxq_map[i];
- u32 rx_reg;
-
- for (j = 0; j < ICE_Q_WAIT_MAX_RETRY; j++) {
- rx_reg = rd32(hw, QRX_CTRL(pf_q));
- if (((rx_reg >> QRX_CTRL_QENA_REQ_S) & 1) ==
- ((rx_reg >> QRX_CTRL_QENA_STAT_S) & 1))
- break;
- usleep_range(1000, 2000);
- }
-
- /* Skip if the queue is already in the requested state */
- if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
- continue;
-
- /* turn on/off the queue */
- if (ena)
- rx_reg |= QRX_CTRL_QENA_REQ_M;
- else
- rx_reg &= ~QRX_CTRL_QENA_REQ_M;
- wr32(hw, QRX_CTRL(pf_q), rx_reg);
-
- /* wait for the change to finish */
- ret = ice_pf_rxq_wait(pf, pf_q, ena);
- if (ret) {
- dev_err(&pf->pdev->dev,
- "VSI idx %d Rx ring %d %sable timeout\n",
- vsi->idx, pf_q, (ena ? "en" : "dis"));
- break;
- }
- }
-
- return ret;
-}
-
-/**
- * ice_vsi_start_rx_rings - start VSI's rx rings
- * @vsi: the VSI whose rings are to be started
- *
- * Returns 0 on success and a negative value on error
- */
-static int ice_vsi_start_rx_rings(struct ice_vsi *vsi)
-{
- return ice_vsi_ctrl_rx_rings(vsi, true);
-}
-
-/**
- * ice_vsi_stop_rx_rings - stop VSI's rx rings
- * @vsi: the VSI
- *
- * Returns 0 on success and a negative value on error
- */
-static int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
-{
- return ice_vsi_ctrl_rx_rings(vsi, false);
-}
-
-/**
- * ice_vsi_stop_tx_rx_rings - stop VSI's tx and rx rings
- * @vsi: the VSI
- * Returns 0 on success and a negative value on error
- */
-static int ice_vsi_stop_tx_rx_rings(struct ice_vsi *vsi)
-{
- int err_tx, err_rx;
-
- err_tx = ice_vsi_stop_tx_rings(vsi);
- if (err_tx)
- dev_dbg(&vsi->back->pdev->dev, "Failed to disable Tx rings\n");
-
- err_rx = ice_vsi_stop_rx_rings(vsi);
- if (err_rx)
- dev_dbg(&vsi->back->pdev->dev, "Failed to disable Rx rings\n");
-
- if (err_tx || err_rx)
- return -EIO;
-
- return 0;
-}
-
-/**
* ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
* @vsi: the VSI being configured
*/
@@ -4419,122 +2655,6 @@ static void ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts,
}
/**
- * ice_stat_update40 - read 40 bit stat from the chip and update stat values
- * @hw: ptr to the hardware info
- * @hireg: high 32 bit HW register to read from
- * @loreg: low 32 bit HW register to read from
- * @prev_stat_loaded: bool to specify if previous stats are loaded
- * @prev_stat: ptr to previous loaded stat value
- * @cur_stat: ptr to current stat value
- */
-static void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
- bool prev_stat_loaded, u64 *prev_stat,
- u64 *cur_stat)
-{
- u64 new_data;
-
- new_data = rd32(hw, loreg);
- new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
-
- /* device stats are not reset at PFR, they likely will not be zeroed
- * when the driver starts. So save the first values read and use them as
- * offsets to be subtracted from the raw values in order to report stats
- * that count from zero.
- */
- if (!prev_stat_loaded)
- *prev_stat = new_data;
- if (likely(new_data >= *prev_stat))
- *cur_stat = new_data - *prev_stat;
- else
- /* to manage the potential roll-over */
- *cur_stat = (new_data + BIT_ULL(40)) - *prev_stat;
- *cur_stat &= 0xFFFFFFFFFFULL;
-}
-
-/**
- * ice_stat_update32 - read 32 bit stat from the chip and update stat values
- * @hw: ptr to the hardware info
- * @reg: HW register to read from
- * @prev_stat_loaded: bool to specify if previous stats are loaded
- * @prev_stat: ptr to previous loaded stat value
- * @cur_stat: ptr to current stat value
- */
-static void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
- u64 *prev_stat, u64 *cur_stat)
-{
- u32 new_data;
-
- new_data = rd32(hw, reg);
-
- /* device stats are not reset at PFR, they likely will not be zeroed
- * when the driver starts. So save the first values read and use them as
- * offsets to be subtracted from the raw values in order to report stats
- * that count from zero.
- */
- if (!prev_stat_loaded)
- *prev_stat = new_data;
- if (likely(new_data >= *prev_stat))
- *cur_stat = new_data - *prev_stat;
- else
- /* to manage the potential roll-over */
- *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat;
-}
-
-/**
- * ice_update_eth_stats - Update VSI-specific ethernet statistics counters
- * @vsi: the VSI to be updated
- */
-static void ice_update_eth_stats(struct ice_vsi *vsi)
-{
- struct ice_eth_stats *prev_es, *cur_es;
- struct ice_hw *hw = &vsi->back->hw;
- u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */
-
- prev_es = &vsi->eth_stats_prev;
- cur_es = &vsi->eth_stats;
-
- ice_stat_update40(hw, GLV_GORCH(vsi_num), GLV_GORCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->rx_bytes,
- &cur_es->rx_bytes);
-
- ice_stat_update40(hw, GLV_UPRCH(vsi_num), GLV_UPRCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->rx_unicast,
- &cur_es->rx_unicast);
-
- ice_stat_update40(hw, GLV_MPRCH(vsi_num), GLV_MPRCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->rx_multicast,
- &cur_es->rx_multicast);
-
- ice_stat_update40(hw, GLV_BPRCH(vsi_num), GLV_BPRCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->rx_broadcast,
- &cur_es->rx_broadcast);
-
- ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded,
- &prev_es->rx_discards, &cur_es->rx_discards);
-
- ice_stat_update40(hw, GLV_GOTCH(vsi_num), GLV_GOTCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->tx_bytes,
- &cur_es->tx_bytes);
-
- ice_stat_update40(hw, GLV_UPTCH(vsi_num), GLV_UPTCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->tx_unicast,
- &cur_es->tx_unicast);
-
- ice_stat_update40(hw, GLV_MPTCH(vsi_num), GLV_MPTCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->tx_multicast,
- &cur_es->tx_multicast);
-
- ice_stat_update40(hw, GLV_BPTCH(vsi_num), GLV_BPTCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->tx_broadcast,
- &cur_es->tx_broadcast);
-
- ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded,
- &prev_es->tx_errors, &cur_es->tx_errors);
-
- vsi->stat_offsets_loaded = true;
-}
-
-/**
* ice_update_vsi_ring_stats - Update VSI stats counters
* @vsi: the VSI to be updated
*/
@@ -4827,7 +2947,7 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
*/
int ice_down(struct ice_vsi *vsi)
{
- int i, err;
+ int i, tx_err, rx_err;
/* Caller of this function is expected to set the
* vsi->state __ICE_DOWN bit
@@ -4838,7 +2958,18 @@ int ice_down(struct ice_vsi *vsi)
}
ice_vsi_dis_irq(vsi);
- err = ice_vsi_stop_tx_rx_rings(vsi);
+ tx_err = ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0);
+ if (tx_err)
+ netdev_err(vsi->netdev,
+ "Failed stop Tx rings, VSI %d error %d\n",
+ vsi->vsi_num, tx_err);
+
+ rx_err = ice_vsi_stop_rx_rings(vsi);
+ if (rx_err)
+ netdev_err(vsi->netdev,
+ "Failed stop Rx rings, VSI %d error %d\n",
+ vsi->vsi_num, rx_err);
+
ice_napi_disable_all(vsi);
ice_for_each_txq(vsi, i)
@@ -4847,10 +2978,14 @@ int ice_down(struct ice_vsi *vsi)
ice_for_each_rxq(vsi, i)
ice_clean_rx_ring(vsi->rx_rings[i]);
- if (err)
- netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
+ if (tx_err || rx_err) {
+ netdev_err(vsi->netdev,
+ "Failed to close VSI 0x%04X on switch 0x%04X\n",
vsi->vsi_num, vsi->vsw->sw_id);
- return err;
+ return -EIO;
+ }
+
+ return 0;
}
/**
@@ -4870,6 +3005,7 @@ static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
}
ice_for_each_txq(vsi, i) {
+ vsi->tx_rings[i]->netdev = vsi->netdev;
err = ice_setup_tx_ring(vsi->tx_rings[i]);
if (err)
break;
@@ -4895,6 +3031,7 @@ static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
}
ice_for_each_rxq(vsi, i) {
+ vsi->rx_rings[i]->netdev = vsi->netdev;
err = ice_setup_rx_ring(vsi->rx_rings[i]);
if (err)
break;
@@ -4922,38 +3059,6 @@ static int ice_vsi_req_irq(struct ice_vsi *vsi, char *basename)
}
/**
- * ice_vsi_free_tx_rings - Free Tx resources for VSI queues
- * @vsi: the VSI having resources freed
- */
-static void ice_vsi_free_tx_rings(struct ice_vsi *vsi)
-{
- int i;
-
- if (!vsi->tx_rings)
- return;
-
- ice_for_each_txq(vsi, i)
- if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
- ice_free_tx_ring(vsi->tx_rings[i]);
-}
-
-/**
- * ice_vsi_free_rx_rings - Free Rx resources for VSI queues
- * @vsi: the VSI having resources freed
- */
-static void ice_vsi_free_rx_rings(struct ice_vsi *vsi)
-{
- int i;
-
- if (!vsi->rx_rings)
- return;
-
- ice_for_each_rxq(vsi, i)
- if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
- ice_free_rx_ring(vsi->rx_rings[i]);
-}
-
-/**
* ice_vsi_open - Called when a network interface is made active
* @vsi: the VSI to open
*
@@ -5014,78 +3119,26 @@ err_setup_tx:
}
/**
- * ice_vsi_close - Shut down a VSI
- * @vsi: the VSI being shut down
+ * ice_vsi_release_all - Delete all VSIs
+ * @pf: PF from which all VSIs are being removed
*/
-static void ice_vsi_close(struct ice_vsi *vsi)
+static void ice_vsi_release_all(struct ice_pf *pf)
{
- if (!test_and_set_bit(__ICE_DOWN, vsi->state))
- ice_down(vsi);
-
- ice_vsi_free_irq(vsi);
- ice_vsi_free_tx_rings(vsi);
- ice_vsi_free_rx_rings(vsi);
-}
+ int err, i;
-/**
- * ice_rss_clean - Delete RSS related VSI structures that hold user inputs
- * @vsi: the VSI being removed
- */
-static void ice_rss_clean(struct ice_vsi *vsi)
-{
- struct ice_pf *pf;
-
- pf = vsi->back;
-
- if (vsi->rss_hkey_user)
- devm_kfree(&pf->pdev->dev, vsi->rss_hkey_user);
- if (vsi->rss_lut_user)
- devm_kfree(&pf->pdev->dev, vsi->rss_lut_user);
-}
-
-/**
- * ice_vsi_release - Delete a VSI and free its resources
- * @vsi: the VSI being removed
- *
- * Returns 0 on success or < 0 on error
- */
-static int ice_vsi_release(struct ice_vsi *vsi)
-{
- struct ice_pf *pf;
+ if (!pf->vsi)
+ return;
- if (!vsi->back)
- return -ENODEV;
- pf = vsi->back;
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ if (!pf->vsi[i])
+ continue;
- if (vsi->netdev) {
- unregister_netdev(vsi->netdev);
- free_netdev(vsi->netdev);
- vsi->netdev = NULL;
+ err = ice_vsi_release(pf->vsi[i]);
+ if (err)
+ dev_dbg(&pf->pdev->dev,
+ "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
+ i, err, pf->vsi[i]->vsi_num);
}
-
- if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
- ice_rss_clean(vsi);
-
- /* Disable VSI and free resources */
- ice_vsi_dis_irq(vsi);
- ice_vsi_close(vsi);
-
- /* reclaim interrupt vectors back to PF */
- ice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx);
- pf->num_avail_msix += vsi->num_q_vectors;
-
- ice_remove_vsi_fltr(&pf->hw, vsi->vsi_num);
- ice_vsi_delete(vsi);
- ice_vsi_free_q_vectors(vsi);
- ice_vsi_clear_rings(vsi);
-
- ice_vsi_put_qs(vsi);
- pf->q_left_tx += vsi->alloc_txq;
- pf->q_left_rx += vsi->alloc_rxq;
-
- ice_vsi_clear(vsi);
-
- return 0;
}
/**
@@ -5099,28 +3152,37 @@ static void ice_dis_vsi(struct ice_vsi *vsi)
set_bit(__ICE_NEEDS_RESTART, vsi->state);
- if (vsi->netdev && netif_running(vsi->netdev) &&
- vsi->type == ICE_VSI_PF)
- vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
-
- ice_vsi_close(vsi);
+ if (vsi->type == ICE_VSI_PF && vsi->netdev) {
+ if (netif_running(vsi->netdev)) {
+ rtnl_lock();
+ vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
+ rtnl_unlock();
+ } else {
+ ice_vsi_close(vsi);
+ }
+ }
}
/**
* ice_ena_vsi - resume a VSI
* @vsi: the VSI being resume
*/
-static void ice_ena_vsi(struct ice_vsi *vsi)
+static int ice_ena_vsi(struct ice_vsi *vsi)
{
- if (!test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state))
- return;
+ int err = 0;
- if (vsi->netdev && netif_running(vsi->netdev))
- vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
- else if (ice_vsi_open(vsi))
- /* this clears the DOWN bit */
- dev_dbg(&vsi->back->pdev->dev, "Failed open VSI 0x%04X on switch 0x%04X\n",
- vsi->vsi_num, vsi->vsw->sw_id);
+ if (test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state) &&
+ vsi->netdev) {
+ if (netif_running(vsi->netdev)) {
+ rtnl_lock();
+ err = vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
+ rtnl_unlock();
+ } else {
+ err = ice_vsi_open(vsi);
+ }
+ }
+
+ return err;
}
/**
@@ -5140,13 +3202,89 @@ static void ice_pf_dis_all_vsi(struct ice_pf *pf)
* ice_pf_ena_all_vsi - Resume all VSIs on a PF
* @pf: the PF
*/
-static void ice_pf_ena_all_vsi(struct ice_pf *pf)
+static int ice_pf_ena_all_vsi(struct ice_pf *pf)
{
int v;
ice_for_each_vsi(pf, v)
if (pf->vsi[v])
- ice_ena_vsi(pf->vsi[v]);
+ if (ice_ena_vsi(pf->vsi[v]))
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * ice_vsi_rebuild_all - rebuild all VSIs in pf
+ * @pf: the PF
+ */
+static int ice_vsi_rebuild_all(struct ice_pf *pf)
+{
+ int i;
+
+ /* loop through pf->vsi array and reinit the VSI if found */
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ int err;
+
+ if (!pf->vsi[i])
+ continue;
+
+ /* VF VSI rebuild isn't supported yet */
+ if (pf->vsi[i]->type == ICE_VSI_VF)
+ continue;
+
+ err = ice_vsi_rebuild(pf->vsi[i]);
+ if (err) {
+ dev_err(&pf->pdev->dev,
+ "VSI at index %d rebuild failed\n",
+ pf->vsi[i]->idx);
+ return err;
+ }
+
+ dev_info(&pf->pdev->dev,
+ "VSI at index %d rebuilt. vsi_num = 0x%x\n",
+ pf->vsi[i]->idx, pf->vsi[i]->vsi_num);
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vsi_replay_all - replay all VSIs configuration in the PF
+ * @pf: the PF
+ */
+static int ice_vsi_replay_all(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status ret;
+ int i;
+
+ /* loop through pf->vsi array and replay the VSI if found */
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ if (!pf->vsi[i])
+ continue;
+
+ ret = ice_replay_vsi(hw, pf->vsi[i]->idx);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "VSI at index %d replay failed %d\n",
+ pf->vsi[i]->idx, ret);
+ return -EIO;
+ }
+
+ /* Re-map HW VSI number, using VSI handle that has been
+ * previously validated in ice_replay_vsi() call above
+ */
+ pf->vsi[i]->vsi_num = ice_get_hw_vsi_num(hw, pf->vsi[i]->idx);
+
+ dev_info(&pf->pdev->dev,
+ "VSI at index %d filter replayed successfully - vsi_num %i\n",
+ pf->vsi[i]->idx, pf->vsi[i]->vsi_num);
+ }
+
+ /* Clean up replay filter after successful re-configuration */
+ ice_replay_post(hw);
+ return 0;
}
/**
@@ -5168,13 +3306,13 @@ static void ice_rebuild(struct ice_pf *pf)
ret = ice_init_all_ctrlq(hw);
if (ret) {
dev_err(dev, "control queues init failed %d\n", ret);
- goto fail_reset;
+ goto err_init_ctrlq;
}
ret = ice_clear_pf_cfg(hw);
if (ret) {
dev_err(dev, "clear PF configuration failed %d\n", ret);
- goto fail_reset;
+ goto err_init_ctrlq;
}
ice_clear_pxe_mode(hw);
@@ -5182,14 +3320,34 @@ static void ice_rebuild(struct ice_pf *pf)
ret = ice_get_caps(hw);
if (ret) {
dev_err(dev, "ice_get_caps failed %d\n", ret);
- goto fail_reset;
+ goto err_init_ctrlq;
}
- /* basic nic switch setup */
- err = ice_setup_pf_sw(pf);
+ err = ice_sched_init_port(hw->port_info);
+ if (err)
+ goto err_sched_init_port;
+
+ /* reset search_hint of irq_trackers to 0 since interrupts are
+ * reclaimed and could be allocated from beginning during VSI rebuild
+ */
+ pf->sw_irq_tracker->search_hint = 0;
+ pf->hw_irq_tracker->search_hint = 0;
+
+ err = ice_vsi_rebuild_all(pf);
if (err) {
- dev_err(dev, "ice_setup_pf_sw failed\n");
- goto fail_reset;
+ dev_err(dev, "ice_vsi_rebuild_all failed\n");
+ goto err_vsi_rebuild;
+ }
+
+ err = ice_update_link_info(hw->port_info);
+ if (err)
+ dev_err(&pf->pdev->dev, "Get link status error %d\n", err);
+
+ /* Replay all VSIs Configuration, including filters after reset */
+ if (ice_vsi_replay_all(pf)) {
+ dev_err(&pf->pdev->dev,
+ "error replaying VSI configurations with switch filter rules\n");
+ goto err_vsi_rebuild;
}
/* start misc vector */
@@ -5197,20 +3355,36 @@ static void ice_rebuild(struct ice_pf *pf)
err = ice_req_irq_msix_misc(pf);
if (err) {
dev_err(dev, "misc vector setup failed: %d\n", err);
- goto fail_reset;
+ goto err_vsi_rebuild;
}
}
/* restart the VSIs that were rebuilt and running before the reset */
- ice_pf_ena_all_vsi(pf);
+ err = ice_pf_ena_all_vsi(pf);
+ if (err) {
+ dev_err(&pf->pdev->dev, "error enabling VSIs\n");
+ /* no need to disable VSIs in tear down path in ice_rebuild()
+ * since its already taken care in ice_vsi_open()
+ */
+ goto err_vsi_rebuild;
+ }
+ ice_reset_all_vfs(pf, true);
+ /* if we get here, reset flow is successful */
+ clear_bit(__ICE_RESET_FAILED, pf->state);
return;
-fail_reset:
+err_vsi_rebuild:
+ ice_vsi_release_all(pf);
+err_sched_init_port:
+ ice_sched_cleanup_all(hw);
+err_init_ctrlq:
ice_shutdown_all_ctrlq(hw);
set_bit(__ICE_RESET_FAILED, pf->state);
clear_recovery:
- set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
+ /* set this bit in PF state to control service task scheduling */
+ set_bit(__ICE_NEEDS_RESTART, pf->state);
+ dev_err(dev, "Rebuild failed, unload and reload driver\n");
}
/**
@@ -5243,7 +3417,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
}
/* if a reset is in progress, wait for some time for it to complete */
do {
- if (ice_is_reset_recovery_pending(pf->state)) {
+ if (ice_is_reset_in_progress(pf->state)) {
count++;
usleep_range(1000, 2000);
} else {
@@ -5299,7 +3473,7 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
struct ice_aqc_get_set_rss_keys *buf =
(struct ice_aqc_get_set_rss_keys *)seed;
- status = ice_aq_set_rss_key(hw, vsi->vsi_num, buf);
+ status = ice_aq_set_rss_key(hw, vsi->idx, buf);
if (status) {
dev_err(&pf->pdev->dev,
@@ -5310,8 +3484,8 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
}
if (lut) {
- status = ice_aq_set_rss_lut(hw, vsi->vsi_num,
- vsi->rss_lut_type, lut, lut_size);
+ status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
+ lut, lut_size);
if (status) {
dev_err(&pf->pdev->dev,
"Cannot set RSS lut, err %d aq_err %d\n",
@@ -5342,7 +3516,7 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
struct ice_aqc_get_set_rss_keys *buf =
(struct ice_aqc_get_set_rss_keys *)seed;
- status = ice_aq_get_rss_key(hw, vsi->vsi_num, buf);
+ status = ice_aq_get_rss_key(hw, vsi->idx, buf);
if (status) {
dev_err(&pf->pdev->dev,
"Cannot get RSS key, err %d aq_err %d\n",
@@ -5352,8 +3526,8 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
}
if (lut) {
- status = ice_aq_get_rss_lut(hw, vsi->vsi_num,
- vsi->rss_lut_type, lut, lut_size);
+ status = ice_aq_get_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
+ lut, lut_size);
if (status) {
dev_err(&pf->pdev->dev,
"Cannot get RSS lut, err %d aq_err %d\n",
@@ -5366,6 +3540,232 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
}
/**
+ * ice_bridge_getlink - Get the hardware bridge mode
+ * @skb: skb buff
+ * @pid: process id
+ * @seq: RTNL message seq
+ * @dev: the netdev being configured
+ * @filter_mask: filter mask passed in
+ * @nlflags: netlink flags passed in
+ *
+ * Return the bridge mode (VEB/VEPA)
+ */
+static int
+ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev, u32 filter_mask, int nlflags)
+{
+ struct ice_netdev_priv *np = netdev_priv(dev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ u16 bmode;
+
+ bmode = pf->first_sw->bridge_mode;
+
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
+ filter_mask, NULL);
+}
+
+/**
+ * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
+ * @vsi: Pointer to VSI structure
+ * @bmode: Hardware bridge mode (VEB/VEPA)
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
+{
+ struct device *dev = &vsi->back->pdev->dev;
+ struct ice_aqc_vsi_props *vsi_props;
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx ctxt = { 0 };
+ enum ice_status status;
+
+ vsi_props = &vsi->info;
+ ctxt.info = vsi->info;
+
+ if (bmode == BRIDGE_MODE_VEB)
+ /* change from VEPA to VEB mode */
+ ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
+ else
+ /* change from VEB to VEPA mode */
+ ctxt.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
+ ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
+
+ status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
+ if (status) {
+ dev_err(dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n",
+ bmode, status, hw->adminq.sq_last_status);
+ return -EIO;
+ }
+ /* Update sw flags for book keeping */
+ vsi_props->sw_flags = ctxt.info.sw_flags;
+
+ return 0;
+}
+
+/**
+ * ice_bridge_setlink - Set the hardware bridge mode
+ * @dev: the netdev being configured
+ * @nlh: RTNL message
+ * @flags: bridge setlink flags
+ *
+ * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
+ * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
+ * not already set for all VSIs connected to this switch. And also update the
+ * unicast switch filter rules for the corresponding switch of the netdev.
+ */
+static int
+ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
+ u16 __always_unused flags)
+{
+ struct ice_netdev_priv *np = netdev_priv(dev);
+ struct ice_pf *pf = np->vsi->back;
+ struct nlattr *attr, *br_spec;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ struct ice_sw *pf_sw;
+ int rem, v, err = 0;
+
+ pf_sw = pf->first_sw;
+ /* find the attribute in the netlink message */
+ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+
+ nla_for_each_nested(attr, br_spec, rem) {
+ __u16 mode;
+
+ if (nla_type(attr) != IFLA_BRIDGE_MODE)
+ continue;
+ mode = nla_get_u16(attr);
+ if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
+ return -EINVAL;
+ /* Continue if bridge mode is not being flipped */
+ if (mode == pf_sw->bridge_mode)
+ continue;
+ /* Iterates through the PF VSI list and update the loopback
+ * mode of the VSI
+ */
+ ice_for_each_vsi(pf, v) {
+ if (!pf->vsi[v])
+ continue;
+ err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
+ if (err)
+ return err;
+ }
+
+ hw->evb_veb = (mode == BRIDGE_MODE_VEB);
+ /* Update the unicast switch filter rules for the corresponding
+ * switch of the netdev
+ */
+ status = ice_update_sw_rule_bridge_mode(hw);
+ if (status) {
+ netdev_err(dev, "update SW_RULE for bridge mode failed, = %d err %d aq_err %d\n",
+ mode, status, hw->adminq.sq_last_status);
+ /* revert hw->evb_veb */
+ hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
+ return -EIO;
+ }
+
+ pf_sw->bridge_mode = mode;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ */
+static void ice_tx_timeout(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_ring *tx_ring = NULL;
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ u32 head, val = 0, i;
+ int hung_queue = -1;
+
+ pf->tx_timeout_count++;
+
+ /* find the stopped queue the same way the stack does */
+ for (i = 0; i < netdev->num_tx_queues; i++) {
+ struct netdev_queue *q;
+ unsigned long trans_start;
+
+ q = netdev_get_tx_queue(netdev, i);
+ trans_start = q->trans_start;
+ if (netif_xmit_stopped(q) &&
+ time_after(jiffies,
+ (trans_start + netdev->watchdog_timeo))) {
+ hung_queue = i;
+ break;
+ }
+ }
+
+ if (i == netdev->num_tx_queues) {
+ netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
+ } else {
+ /* now that we have an index, find the tx_ring struct */
+ for (i = 0; i < vsi->num_txq; i++) {
+ if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
+ if (hung_queue ==
+ vsi->tx_rings[i]->q_index) {
+ tx_ring = vsi->tx_rings[i];
+ break;
+ }
+ }
+ }
+ }
+
+ /* Reset recovery level if enough time has elapsed after last timeout.
+ * Also ensure no new reset action happens before next timeout period.
+ */
+ if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
+ pf->tx_timeout_recovery_level = 1;
+ else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
+ netdev->watchdog_timeo)))
+ return;
+
+ if (tx_ring) {
+ head = tx_ring->next_to_clean;
+ /* Read interrupt register */
+ if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
+ val = rd32(&pf->hw,
+ GLINT_DYN_CTL(tx_ring->q_vector->v_idx +
+ tx_ring->vsi->hw_base_vector));
+
+ netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
+ vsi->vsi_num, hung_queue, tx_ring->next_to_clean,
+ head, tx_ring->next_to_use,
+ readl(tx_ring->tail), val);
+ }
+
+ pf->tx_timeout_last_recovery = jiffies;
+ netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
+ pf->tx_timeout_recovery_level, hung_queue);
+
+ switch (pf->tx_timeout_recovery_level) {
+ case 1:
+ set_bit(__ICE_PFR_REQ, pf->state);
+ break;
+ case 2:
+ set_bit(__ICE_CORER_REQ, pf->state);
+ break;
+ case 3:
+ set_bit(__ICE_GLOBR_REQ, pf->state);
+ break;
+ default:
+ netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
+ set_bit(__ICE_DOWN, pf->state);
+ set_bit(__ICE_NEEDS_RESTART, vsi->state);
+ set_bit(__ICE_SERVICE_DIS, pf->state);
+ break;
+ }
+
+ ice_service_task_schedule(pf);
+ pf->tx_timeout_recovery_level++;
+}
+
+/**
* ice_open - Called when a network interface becomes active
* @netdev: network interface device structure
*
@@ -5383,6 +3783,11 @@ static int ice_open(struct net_device *netdev)
struct ice_vsi *vsi = np->vsi;
int err;
+ if (test_bit(__ICE_NEEDS_RESTART, vsi->back->state)) {
+ netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
+ return -EIO;
+ }
+
netif_carrier_off(netdev);
err = ice_vsi_open(vsi);
@@ -5473,9 +3878,18 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = ice_change_mtu,
.ndo_get_stats64 = ice_get_stats64,
+ .ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
+ .ndo_set_vf_mac = ice_set_vf_mac,
+ .ndo_get_vf_config = ice_get_vf_cfg,
+ .ndo_set_vf_trust = ice_set_vf_trust,
+ .ndo_set_vf_vlan = ice_set_vf_port_vlan,
+ .ndo_set_vf_link_state = ice_set_vf_link_state,
.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
.ndo_set_features = ice_set_features,
+ .ndo_bridge_getlink = ice_bridge_getlink,
+ .ndo_bridge_setlink = ice_bridge_setlink,
.ndo_fdb_add = ice_fdb_add,
.ndo_fdb_del = ice_fdb_del,
+ .ndo_tx_timeout = ice_tx_timeout,
};
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 295a8cd87fc1..3274c543283c 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -137,7 +137,7 @@ ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
if (hw->nvm.blank_nvm_mode)
return 0;
- return ice_acquire_res(hw, ICE_NVM_RES_ID, access);
+ return ice_acquire_res(hw, ICE_NVM_RES_ID, access, ICE_NVM_TIMEOUT);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index eeae199469b6..7cc8aa18a22b 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -17,7 +17,6 @@ ice_sched_add_root_node(struct ice_port_info *pi,
{
struct ice_sched_node *root;
struct ice_hw *hw;
- u16 max_children;
if (!pi)
return ICE_ERR_PARAM;
@@ -28,8 +27,8 @@ ice_sched_add_root_node(struct ice_port_info *pi,
if (!root)
return ICE_ERR_NO_MEMORY;
- max_children = le16_to_cpu(hw->layer_info[0].max_children);
- root->children = devm_kcalloc(ice_hw_to_dev(hw), max_children,
+ /* coverity[suspicious_sizeof] */
+ root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0],
sizeof(*root), GFP_KERNEL);
if (!root->children) {
devm_kfree(ice_hw_to_dev(hw), root);
@@ -86,6 +85,62 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
}
/**
+ * ice_aq_query_sched_elems - query scheduler elements
+ * @hw: pointer to the hw struct
+ * @elems_req: number of elements to query
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @elems_ret: returns total number of elements returned
+ * @cd: pointer to command details structure or NULL
+ *
+ * Query scheduling elements (0x0404)
+ */
+static enum ice_status
+ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
+ struct ice_aqc_get_elem *buf, u16 buf_size,
+ u16 *elems_ret, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_get_cfg_elem *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.get_update_elem;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sched_elems);
+ cmd->num_elem_req = cpu_to_le16(elems_req);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (!status && elems_ret)
+ *elems_ret = le16_to_cpu(cmd->num_elem_resp);
+
+ return status;
+}
+
+/**
+ * ice_sched_query_elem - query element information from hw
+ * @hw: pointer to the hw struct
+ * @node_teid: node teid to be queried
+ * @buf: buffer to element information
+ *
+ * This function queries HW element information
+ */
+static enum ice_status
+ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
+ struct ice_aqc_get_elem *buf)
+{
+ u16 buf_size, num_elem_ret = 0;
+ enum ice_status status;
+
+ buf_size = sizeof(*buf);
+ memset(buf, 0, buf_size);
+ buf->generic[0].node_teid = cpu_to_le32(node_teid);
+ status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
+ NULL);
+ if (status || num_elem_ret != 1)
+ ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
+ return status;
+}
+
+/**
* ice_sched_add_node - Insert the Tx scheduler node in SW DB
* @pi: port information structure
* @layer: Scheduler layer of the node
@@ -98,9 +153,10 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
struct ice_aqc_txsched_elem_data *info)
{
struct ice_sched_node *parent;
+ struct ice_aqc_get_elem elem;
struct ice_sched_node *node;
+ enum ice_status status;
struct ice_hw *hw;
- u16 max_children;
if (!pi)
return ICE_ERR_PARAM;
@@ -117,12 +173,20 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
return ICE_ERR_PARAM;
}
+ /* query the current node information from FW before additing it
+ * to the SW DB
+ */
+ status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), &elem);
+ if (status)
+ return status;
+
node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL);
if (!node)
return ICE_ERR_NO_MEMORY;
- max_children = le16_to_cpu(hw->layer_info[layer].max_children);
- if (max_children) {
- node->children = devm_kcalloc(ice_hw_to_dev(hw), max_children,
+ if (hw->max_children[layer]) {
+ /* coverity[suspicious_sizeof] */
+ node->children = devm_kcalloc(ice_hw_to_dev(hw),
+ hw->max_children[layer],
sizeof(*node), GFP_KERNEL);
if (!node->children) {
devm_kfree(ice_hw_to_dev(hw), node);
@@ -134,7 +198,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
node->parent = parent;
node->tx_sched_layer = layer;
parent->children[parent->num_children++] = node;
- memcpy(&node->info, info, sizeof(*info));
+ memcpy(&node->info, &elem.generic[0], sizeof(node->info));
return 0;
}
@@ -192,14 +256,17 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
if (!buf)
return ICE_ERR_NO_MEMORY;
+
buf->hdr.parent_teid = parent->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(num_nodes);
for (i = 0; i < num_nodes; i++)
buf->teid[i] = cpu_to_le32(node_teids[i]);
+
status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
&num_groups_removed, NULL);
if (status || num_groups_removed != 1)
ice_debug(hw, ICE_DBG_SCHED, "remove elements failed\n");
+
devm_kfree(ice_hw_to_dev(hw), buf);
return status;
}
@@ -532,9 +599,7 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
{
struct ice_sched_agg_info *agg_info;
- struct ice_sched_vsi_info *vsi_elem;
struct ice_sched_agg_info *atmp;
- struct ice_sched_vsi_info *tmp;
struct ice_hw *hw;
if (!pi)
@@ -553,13 +618,6 @@ static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
}
}
- /* remove the vsi list */
- list_for_each_entry_safe(vsi_elem, tmp, &pi->vsi_info_list,
- list_entry) {
- list_del(&vsi_elem->list_entry);
- devm_kfree(ice_hw_to_dev(hw), vsi_elem);
- }
-
if (pi->root) {
ice_free_sched_node(pi, pi->root);
pi->root = NULL;
@@ -592,13 +650,16 @@ static void ice_sched_clear_port(struct ice_port_info *pi)
*/
void ice_sched_cleanup_all(struct ice_hw *hw)
{
- if (!hw || !hw->port_info)
+ if (!hw)
return;
- if (hw->layer_info)
+ if (hw->layer_info) {
devm_kfree(ice_hw_to_dev(hw), hw->layer_info);
+ hw->layer_info = NULL;
+ }
- ice_sched_clear_port(hw->port_info);
+ if (hw->port_info)
+ ice_sched_clear_port(hw->port_info);
hw->num_tx_sched_layers = 0;
hw->num_tx_sched_phys_layers = 0;
@@ -607,31 +668,6 @@ void ice_sched_cleanup_all(struct ice_hw *hw)
}
/**
- * ice_sched_create_vsi_info_entry - create an empty new VSI entry
- * @pi: port information structure
- * @vsi_id: VSI Id
- *
- * This function creates a new VSI entry and adds it to list
- */
-static struct ice_sched_vsi_info *
-ice_sched_create_vsi_info_entry(struct ice_port_info *pi, u16 vsi_id)
-{
- struct ice_sched_vsi_info *vsi_elem;
-
- if (!pi)
- return NULL;
-
- vsi_elem = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*vsi_elem),
- GFP_KERNEL);
- if (!vsi_elem)
- return NULL;
-
- list_add(&vsi_elem->list_entry, &pi->vsi_info_list);
- vsi_elem->vsi_id = vsi_id;
- return vsi_elem;
-}
-
-/**
* ice_sched_add_elems - add nodes to hw and SW DB
* @pi: port information structure
* @tc_node: pointer to the branch node
@@ -671,9 +707,13 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
ICE_AQC_ELEM_VALID_EIR;
buf->generic[i].data.generic = 0;
buf->generic[i].data.cir_bw.bw_profile_idx =
- ICE_SCHED_DFLT_RL_PROF_ID;
+ cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
+ buf->generic[i].data.cir_bw.bw_alloc =
+ cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
buf->generic[i].data.eir_bw.bw_profile_idx =
- ICE_SCHED_DFLT_RL_PROF_ID;
+ cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
+ buf->generic[i].data.eir_bw.bw_alloc =
+ cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
}
status = ice_aq_add_sched_elems(hw, 1, buf, buf_size,
@@ -697,7 +737,6 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
teid = le32_to_cpu(buf->generic[i].node_teid);
new_node = ice_sched_find_node_by_teid(parent, teid);
-
if (!new_node) {
ice_debug(hw, ICE_DBG_SCHED,
"Node is missing for teid =%d\n", teid);
@@ -710,7 +749,6 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
/* add it to previous node sibling pointer */
/* Note: siblings are not linked across branches */
prev = ice_sched_get_first_node(hw, tc_node, layer);
-
if (prev && prev != new_node) {
while (prev->sibling)
prev = prev->sibling;
@@ -760,8 +798,7 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
return ICE_ERR_PARAM;
/* max children per node per layer */
- max_child_nodes =
- le16_to_cpu(hw->layer_info[parent->tx_sched_layer].max_children);
+ max_child_nodes = hw->max_children[parent->tx_sched_layer];
/* current number of children + required nodes exceed max children ? */
if ((parent->num_children + num_nodes) > max_child_nodes) {
@@ -851,78 +888,6 @@ static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
}
/**
- * ice_sched_get_num_nodes_per_layer - Get the total number of nodes per layer
- * @pi: pointer to the port info struct
- * @layer: layer number
- *
- * This function calculates the number of nodes present in the scheduler tree
- * including all the branches for a given layer
- */
-static u16
-ice_sched_get_num_nodes_per_layer(struct ice_port_info *pi, u8 layer)
-{
- struct ice_hw *hw;
- u16 num_nodes = 0;
- u8 i;
-
- if (!pi)
- return num_nodes;
-
- hw = pi->hw;
-
- /* Calculate the number of nodes for all TCs */
- for (i = 0; i < pi->root->num_children; i++) {
- struct ice_sched_node *tc_node, *node;
-
- tc_node = pi->root->children[i];
-
- /* Get the first node */
- node = ice_sched_get_first_node(hw, tc_node, layer);
- if (!node)
- continue;
-
- /* count the siblings */
- while (node) {
- num_nodes++;
- node = node->sibling;
- }
- }
-
- return num_nodes;
-}
-
-/**
- * ice_sched_val_max_nodes - check max number of nodes reached or not
- * @pi: port information structure
- * @new_num_nodes_per_layer: pointer to the new number of nodes array
- *
- * This function checks whether the scheduler tree layers have enough space to
- * add new nodes
- */
-static enum ice_status
-ice_sched_validate_for_max_nodes(struct ice_port_info *pi,
- u16 *new_num_nodes_per_layer)
-{
- struct ice_hw *hw = pi->hw;
- u8 i, qg_layer;
- u16 num_nodes;
-
- qg_layer = ice_sched_get_qgrp_layer(hw);
-
- /* walk through all the layers from SW entry point to qgroup layer */
- for (i = hw->sw_entry_point_layer; i <= qg_layer; i++) {
- num_nodes = ice_sched_get_num_nodes_per_layer(pi, i);
- if (num_nodes + new_num_nodes_per_layer[i] >
- le16_to_cpu(hw->layer_info[i].max_pf_nodes)) {
- ice_debug(hw, ICE_DBG_SCHED,
- "max nodes reached for layer = %d\n", i);
- return ICE_ERR_CFG;
- }
- }
- return 0;
-}
-
-/**
* ice_rm_dflt_leaf_node - remove the default leaf node in the tree
* @pi: port information structure
*
@@ -1003,14 +968,12 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi)
hw = pi->hw;
/* Query the Default Topology from FW */
- buf = devm_kcalloc(ice_hw_to_dev(hw), ICE_TXSCHED_MAX_BRANCHES,
- sizeof(*buf), GFP_KERNEL);
+ buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
if (!buf)
return ICE_ERR_NO_MEMORY;
/* Query default scheduling tree topology */
- status = ice_aq_get_dflt_topo(hw, pi->lport, buf,
- sizeof(*buf) * ICE_TXSCHED_MAX_BRANCHES,
+ status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN,
&num_branches, NULL);
if (status)
goto err_init_port;
@@ -1075,7 +1038,6 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi)
pi->port_state = ICE_SCHED_PORT_STATE_READY;
mutex_init(&pi->sched_lock);
INIT_LIST_HEAD(&pi->agg_list);
- INIT_LIST_HEAD(&pi->vsi_info_list);
err_init_port:
if (status && pi->root) {
@@ -1097,6 +1059,8 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
{
struct ice_aqc_query_txsched_res_resp *buf;
enum ice_status status = 0;
+ __le16 max_sibl;
+ u8 i;
if (hw->layer_info)
return status;
@@ -1115,7 +1079,20 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
hw->flattened_layers = buf->sched_props.flattening_bitmap;
hw->max_cgds = buf->sched_props.max_pf_cgds;
- hw->layer_info = devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props,
+ /* max sibling group size of current layer refers to the max children
+ * of the below layer node.
+ * layer 1 node max children will be layer 2 max sibling group size
+ * layer 2 node max children will be layer 3 max sibling group size
+ * and so on. This array will be populated from root (index 0) to
+ * qgroup layer 7. Leaf node has no children.
+ */
+ for (i = 0; i < hw->num_tx_sched_layers; i++) {
+ max_sibl = buf->layer_props[i].max_sibl_grp_sz;
+ hw->max_children[i] = le16_to_cpu(max_sibl);
+ }
+
+ hw->layer_info = (struct ice_aqc_layer_props *)
+ devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props,
(hw->num_tx_sched_layers *
sizeof(*hw->layer_info)),
GFP_KERNEL);
@@ -1130,27 +1107,6 @@ sched_query_out:
}
/**
- * ice_sched_get_vsi_info_entry - Get the vsi entry list for given vsi_id
- * @pi: port information structure
- * @vsi_id: vsi id
- *
- * This function retrieves the vsi list for the given vsi id
- */
-static struct ice_sched_vsi_info *
-ice_sched_get_vsi_info_entry(struct ice_port_info *pi, u16 vsi_id)
-{
- struct ice_sched_vsi_info *list_elem;
-
- if (!pi)
- return NULL;
-
- list_for_each_entry(list_elem, &pi->vsi_info_list, list_entry)
- if (list_elem->vsi_id == vsi_id)
- return list_elem;
- return NULL;
-}
-
-/**
* ice_sched_find_node_in_subtree - Find node in part of base node subtree
* @hw: pointer to the hw struct
* @base: pointer to the base node
@@ -1186,30 +1142,28 @@ ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
/**
* ice_sched_get_free_qparent - Get a free lan or rdma q group node
* @pi: port information structure
- * @vsi_id: vsi id
+ * @vsi_handle: software VSI handle
* @tc: branch number
* @owner: lan or rdma
*
* This function retrieves a free lan or rdma q group node
*/
struct ice_sched_node *
-ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_id, u8 tc,
+ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u8 owner)
{
struct ice_sched_node *vsi_node, *qgrp_node = NULL;
- struct ice_sched_vsi_info *list_elem;
+ struct ice_vsi_ctx *vsi_ctx;
u16 max_children;
u8 qgrp_layer;
qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
- max_children = le16_to_cpu(pi->hw->layer_info[qgrp_layer].max_children);
-
- list_elem = ice_sched_get_vsi_info_entry(pi, vsi_id);
- if (!list_elem)
- goto lan_q_exit;
-
- vsi_node = list_elem->vsi_node[tc];
+ max_children = pi->hw->max_children[qgrp_layer];
+ vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
+ if (!vsi_ctx)
+ return NULL;
+ vsi_node = vsi_ctx->sched.vsi_node[tc];
/* validate invalid VSI id */
if (!vsi_node)
goto lan_q_exit;
@@ -1233,14 +1187,14 @@ lan_q_exit:
* ice_sched_get_vsi_node - Get a VSI node based on VSI id
* @hw: pointer to the hw struct
* @tc_node: pointer to the TC node
- * @vsi_id: VSI id
+ * @vsi_handle: software VSI handle
*
* This function retrieves a VSI node for a given VSI id from a given
* TC branch
*/
static struct ice_sched_node *
ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node,
- u16 vsi_id)
+ u16 vsi_handle)
{
struct ice_sched_node *node;
u8 vsi_layer;
@@ -1250,7 +1204,7 @@ ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node,
/* Check whether it already exists */
while (node) {
- if (node->vsi_id == vsi_id)
+ if (node->vsi_handle == vsi_handle)
return node;
node = node->sibling;
}
@@ -1278,10 +1232,8 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
/* calculate num nodes from q group to VSI layer */
for (i = qgl; i > vsil; i--) {
- u16 max_children = le16_to_cpu(hw->layer_info[i].max_children);
-
/* round to the next integer if there is a remainder */
- num = DIV_ROUND_UP(num, max_children);
+ num = DIV_ROUND_UP(num, hw->max_children[i]);
/* need at least one node */
num_nodes[i] = num ? num : 1;
@@ -1291,7 +1243,7 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
/**
* ice_sched_add_vsi_child_nodes - add VSI child nodes to tree
* @pi: port information structure
- * @vsi_id: VSI id
+ * @vsi_handle: software VSI handle
* @tc_node: pointer to the TC node
* @num_nodes: pointer to the num nodes that needs to be added per layer
* @owner: node owner (lan or rdma)
@@ -1300,7 +1252,7 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
* lan and rdma separately.
*/
static enum ice_status
-ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id,
+ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
struct ice_sched_node *tc_node, u16 *num_nodes,
u8 owner)
{
@@ -1311,16 +1263,13 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id,
u16 num_added = 0;
u8 i, qgl, vsil;
- status = ice_sched_validate_for_max_nodes(pi, num_nodes);
- if (status)
- return status;
-
qgl = ice_sched_get_qgrp_layer(hw);
vsil = ice_sched_get_vsi_layer(hw);
- parent = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
+ parent = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
for (i = vsil + 1; i <= qgl; i++) {
if (!parent)
return ICE_ERR_CFG;
+
status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
num_nodes[i],
&first_node_teid,
@@ -1398,8 +1347,8 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
struct ice_sched_node *tc_node, u16 *num_nodes)
{
struct ice_sched_node *node;
- u16 max_child;
- u8 i, vsil;
+ u8 vsil;
+ int i;
vsil = ice_sched_get_vsi_layer(hw);
for (i = vsil; i >= hw->sw_entry_point_layer; i--)
@@ -1412,12 +1361,10 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
/* If intermediate nodes are reached max children
* then add a new one.
*/
- node = ice_sched_get_first_node(hw, tc_node, i);
- max_child = le16_to_cpu(hw->layer_info[i].max_children);
-
+ node = ice_sched_get_first_node(hw, tc_node, (u8)i);
/* scan all the siblings */
while (node) {
- if (node->num_children < max_child)
+ if (node->num_children < hw->max_children[i])
break;
node = node->sibling;
}
@@ -1431,7 +1378,7 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
/**
* ice_sched_add_vsi_support_nodes - add VSI supported nodes into tx tree
* @pi: port information structure
- * @vsi_id: VSI Id
+ * @vsi_handle: software VSI handle
* @tc_node: pointer to TC node
* @num_nodes: pointer to num nodes array
*
@@ -1439,7 +1386,7 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
* VSI, its parent and intermediate nodes in below layers
*/
static enum ice_status
-ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_id,
+ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
struct ice_sched_node *tc_node, u16 *num_nodes)
{
struct ice_sched_node *parent = tc_node;
@@ -1451,10 +1398,6 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_id,
if (!pi)
return ICE_ERR_PARAM;
- status = ice_sched_validate_for_max_nodes(pi, num_nodes);
- if (status)
- return status;
-
vsil = ice_sched_get_vsi_layer(pi->hw);
for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
@@ -1477,21 +1420,22 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_id,
return ICE_ERR_CFG;
if (i == vsil)
- parent->vsi_id = vsi_id;
+ parent->vsi_handle = vsi_handle;
}
+
return 0;
}
/**
* ice_sched_add_vsi_to_topo - add a new VSI into tree
* @pi: port information structure
- * @vsi_id: VSI Id
+ * @vsi_handle: software VSI handle
* @tc: TC number
*
* This function adds a new VSI into scheduler tree
*/
static enum ice_status
-ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_id, u8 tc)
+ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
{
u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
struct ice_sched_node *tc_node;
@@ -1505,13 +1449,14 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_id, u8 tc)
ice_sched_calc_vsi_support_nodes(hw, tc_node, num_nodes);
/* add vsi supported nodes to tc subtree */
- return ice_sched_add_vsi_support_nodes(pi, vsi_id, tc_node, num_nodes);
+ return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node,
+ num_nodes);
}
/**
* ice_sched_update_vsi_child_nodes - update VSI child nodes
* @pi: port information structure
- * @vsi_id: VSI Id
+ * @vsi_handle: software VSI handle
* @tc: TC number
* @new_numqs: new number of max queues
* @owner: owner of this subtree
@@ -1519,14 +1464,14 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_id, u8 tc)
* This function updates the VSI child nodes based on the number of queues
*/
static enum ice_status
-ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,
- u16 new_numqs, u8 owner)
+ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
+ u8 tc, u16 new_numqs, u8 owner)
{
u16 prev_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
struct ice_sched_node *vsi_node;
struct ice_sched_node *tc_node;
- struct ice_sched_vsi_info *vsi;
+ struct ice_vsi_ctx *vsi_ctx;
enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
u16 prev_numqs;
@@ -1536,16 +1481,16 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,
if (!tc_node)
return ICE_ERR_CFG;
- vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
+ vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
if (!vsi_node)
return ICE_ERR_CFG;
- vsi = ice_sched_get_vsi_info_entry(pi, vsi_id);
- if (!vsi)
- return ICE_ERR_CFG;
+ vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
+ if (!vsi_ctx)
+ return ICE_ERR_PARAM;
if (owner == ICE_SCHED_NODE_OWNER_LAN)
- prev_numqs = vsi->max_lanq[tc];
+ prev_numqs = vsi_ctx->sched.max_lanq[tc];
else
return ICE_ERR_PARAM;
@@ -1570,13 +1515,13 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,
for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
new_num_nodes[i] -= prev_num_nodes[i];
- status = ice_sched_add_vsi_child_nodes(pi, vsi_id, tc_node,
+ status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
new_num_nodes, owner);
if (status)
return status;
}
- vsi->max_lanq[tc] = new_numqs;
+ vsi_ctx->sched.max_lanq[tc] = new_numqs;
return status;
}
@@ -1584,7 +1529,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,
/**
* ice_sched_cfg_vsi - configure the new/exisiting VSI
* @pi: port information structure
- * @vsi_id: VSI Id
+ * @vsi_handle: software VSI handle
* @tc: TC number
* @maxqs: max number of queues
* @owner: lan or rdma
@@ -1595,25 +1540,21 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,
* disabled then suspend the VSI if it is not already.
*/
enum ice_status
-ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_id, u8 tc, u16 maxqs,
+ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
u8 owner, bool enable)
{
struct ice_sched_node *vsi_node, *tc_node;
- struct ice_sched_vsi_info *vsi;
+ struct ice_vsi_ctx *vsi_ctx;
enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
return ICE_ERR_PARAM;
-
- vsi = ice_sched_get_vsi_info_entry(pi, vsi_id);
- if (!vsi)
- vsi = ice_sched_create_vsi_info_entry(pi, vsi_id);
- if (!vsi)
- return ICE_ERR_NO_MEMORY;
-
- vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
+ vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
+ if (!vsi_ctx)
+ return ICE_ERR_PARAM;
+ vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
/* suspend the VSI if tc is not enabled */
if (!enable) {
@@ -1630,18 +1571,26 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_id, u8 tc, u16 maxqs,
/* TC is enabled, if it is a new VSI then add it to the tree */
if (!vsi_node) {
- status = ice_sched_add_vsi_to_topo(pi, vsi_id, tc);
+ status = ice_sched_add_vsi_to_topo(pi, vsi_handle, tc);
if (status)
return status;
- vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
+
+ vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
if (!vsi_node)
return ICE_ERR_CFG;
- vsi->vsi_node[tc] = vsi_node;
+
+ vsi_ctx->sched.vsi_node[tc] = vsi_node;
vsi_node->in_use = true;
+ /* invalidate the max queues whenever VSI gets added first time
+ * into the scheduler tree (boot or after reset). We need to
+ * recreate the child nodes all the time in these cases.
+ */
+ vsi_ctx->sched.max_lanq[tc] = 0;
}
/* update the VSI child nodes */
- status = ice_sched_update_vsi_child_nodes(pi, vsi_id, tc, maxqs, owner);
+ status = ice_sched_update_vsi_child_nodes(pi, vsi_handle, tc, maxqs,
+ owner);
if (status)
return status;
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index badadcc120d3..5dc9cfa04c58 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -12,7 +12,6 @@
struct ice_sched_agg_vsi_info {
struct list_head list_entry;
DECLARE_BITMAP(tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
- u16 vsi_id;
};
struct ice_sched_agg_info {
@@ -35,9 +34,9 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node);
struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc);
struct ice_sched_node *
-ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_id, u8 tc,
+ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u8 owner);
enum ice_status
-ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_id, u8 tc, u16 maxqs,
+ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
u8 owner, bool enable);
#endif /* _ICE_SCHED_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
new file mode 100644
index 000000000000..027eba4e13f8
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, Intel Corporation. */
+
+#include "ice_common.h"
+#include "ice_adminq_cmd.h"
+#include "ice_sriov.h"
+
+/**
+ * ice_aq_send_msg_to_vf
+ * @hw: pointer to the hardware structure
+ * @vfid: VF ID to send msg
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cd: pointer to command details
+ *
+ * Send message to VF driver (0x0802) using mailbox
+ * queue and asynchronously sending message via
+ * ice_sq_send_cmd() function
+ */
+enum ice_status
+ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
+ u8 *msg, u16 msglen, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_pf_vf_msg *cmd;
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_vf);
+
+ cmd = &desc.params.virt;
+ cmd->id = cpu_to_le32(vfid);
+
+ desc.cookie_high = cpu_to_le32(v_opcode);
+ desc.cookie_low = cpu_to_le32(v_retval);
+
+ if (msglen)
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
+}
+
+/**
+ * ice_conv_link_speed_to_virtchnl
+ * @adv_link_support: determines the format of the returned link speed
+ * @link_speed: variable containing the link_speed to be converted
+ *
+ * Convert link speed supported by HW to link speed supported by virtchnl.
+ * If adv_link_support is true, then return link speed in Mbps. Else return
+ * link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller
+ * needs to cast back to an enum virtchnl_link_speed in the case where
+ * adv_link_support is false, but when adv_link_support is true the caller can
+ * expect the speed in Mbps.
+ */
+u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
+{
+ u32 speed;
+
+ if (adv_link_support)
+ switch (link_speed) {
+ case ICE_AQ_LINK_SPEED_10MB:
+ speed = ICE_LINK_SPEED_10MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_100MB:
+ speed = ICE_LINK_SPEED_100MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_1000MB:
+ speed = ICE_LINK_SPEED_1000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_2500MB:
+ speed = ICE_LINK_SPEED_2500MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_5GB:
+ speed = ICE_LINK_SPEED_5000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_10GB:
+ speed = ICE_LINK_SPEED_10000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_20GB:
+ speed = ICE_LINK_SPEED_20000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_25GB:
+ speed = ICE_LINK_SPEED_25000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_40GB:
+ speed = ICE_LINK_SPEED_40000MBPS;
+ break;
+ default:
+ speed = ICE_LINK_SPEED_UNKNOWN;
+ break;
+ }
+ else
+ /* Virtchnl speeds are not defined for every speed supported in
+ * the hardware. To maintain compatibility with older AVF
+ * drivers, while reporting the speed the new speed values are
+ * resolved to the closest known virtchnl speeds
+ */
+ switch (link_speed) {
+ case ICE_AQ_LINK_SPEED_10MB:
+ case ICE_AQ_LINK_SPEED_100MB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_100MB;
+ break;
+ case ICE_AQ_LINK_SPEED_1000MB:
+ case ICE_AQ_LINK_SPEED_2500MB:
+ case ICE_AQ_LINK_SPEED_5GB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_1GB;
+ break;
+ case ICE_AQ_LINK_SPEED_10GB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_10GB;
+ break;
+ case ICE_AQ_LINK_SPEED_20GB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_20GB;
+ break;
+ case ICE_AQ_LINK_SPEED_25GB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_25GB;
+ break;
+ case ICE_AQ_LINK_SPEED_40GB:
+ /* fall through */
+ speed = (u32)VIRTCHNL_LINK_SPEED_40GB;
+ break;
+ default:
+ speed = (u32)VIRTCHNL_LINK_SPEED_UNKNOWN;
+ break;
+ }
+
+ return speed;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h
new file mode 100644
index 000000000000..3d78a0795138
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Intel Corporation. */
+
+#ifndef _ICE_SRIOV_H_
+#define _ICE_SRIOV_H_
+
+#include "ice_common.h"
+
+#ifdef CONFIG_PCI_IOV
+enum ice_status
+ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
+ u8 *msg, u16 msglen, struct ice_sq_cd *cd);
+
+u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed);
+#else /* CONFIG_PCI_IOV */
+static inline enum ice_status
+ice_aq_send_msg_to_vf(struct ice_hw __always_unused *hw,
+ u16 __always_unused vfid, u32 __always_unused v_opcode,
+ u32 __always_unused v_retval, u8 __always_unused *msg,
+ u16 __always_unused msglen,
+ struct ice_sq_cd __always_unused *cd)
+{
+ return 0;
+}
+
+static inline u32
+ice_conv_link_speed_to_virtchnl(bool __always_unused adv_link_support,
+ u16 __always_unused link_speed)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PCI_IOV */
+#endif /* _ICE_SRIOV_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h
index 9a95c4ffd7d7..f49f299ddf2c 100644
--- a/drivers/net/ethernet/intel/ice/ice_status.h
+++ b/drivers/net/ethernet/intel/ice/ice_status.h
@@ -6,6 +6,9 @@
/* Error Codes */
enum ice_status {
+ ICE_SUCCESS = 0,
+
+ /* Generic codes : Range -1..-49 */
ICE_ERR_PARAM = -1,
ICE_ERR_NOT_IMPL = -2,
ICE_ERR_NOT_READY = -3,
@@ -20,6 +23,7 @@ enum ice_status {
ICE_ERR_ALREADY_EXISTS = -14,
ICE_ERR_DOES_NOT_EXIST = -15,
ICE_ERR_MAX_LIMIT = -17,
+ ICE_ERR_RESET_ONGOING = -18,
ICE_ERR_BUF_TOO_SHORT = -52,
ICE_ERR_NVM_BLANK_MODE = -53,
ICE_ERR_AQ_ERROR = -100,
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 6b7ec2ae5ad6..33403f39f1b3 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -86,6 +86,36 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
}
/**
+ * ice_init_def_sw_recp - initialize the recipe book keeping tables
+ * @hw: pointer to the hw struct
+ *
+ * Allocate memory for the entire recipe table and initialize the structures/
+ * entries corresponding to basic recipes.
+ */
+enum ice_status
+ice_init_def_sw_recp(struct ice_hw *hw)
+{
+ struct ice_sw_recipe *recps;
+ u8 i;
+
+ recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES,
+ sizeof(struct ice_sw_recipe), GFP_KERNEL);
+ if (!recps)
+ return ICE_ERR_NO_MEMORY;
+
+ for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
+ recps[i].root_rid = i;
+ INIT_LIST_HEAD(&recps[i].filt_rules);
+ INIT_LIST_HEAD(&recps[i].filt_replay_rules);
+ mutex_init(&recps[i].filt_rule_lock);
+ }
+
+ hw->switch_info->recp_list = recps;
+
+ return 0;
+}
+
+/**
* ice_aq_get_sw_cfg - get switch configuration
* @hw: pointer to the hardware structure
* @buf: pointer to the result buffer
@@ -140,23 +170,24 @@ ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
*
* Add a VSI context to the hardware (0x0210)
*/
-enum ice_status
+static enum ice_status
ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd)
{
struct ice_aqc_add_update_free_vsi_resp *res;
struct ice_aqc_add_get_update_free_vsi *cmd;
- enum ice_status status;
struct ice_aq_desc desc;
+ enum ice_status status;
cmd = &desc.params.vsi_cmd;
- res = (struct ice_aqc_add_update_free_vsi_resp *)&desc.params.raw;
+ res = &desc.params.add_update_free_vsi_res;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
if (!vsi_ctx->alloc_from_pool)
cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
ICE_AQ_VSI_IS_VALID);
+ cmd->vf_id = vsi_ctx->vf_num;
cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
@@ -175,6 +206,42 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
}
/**
+ * ice_aq_free_vsi
+ * @hw: pointer to the hw struct
+ * @vsi_ctx: pointer to a VSI context struct
+ * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
+ * @cd: pointer to command details structure or NULL
+ *
+ * Free VSI context info from hardware (0x0213)
+ */
+static enum ice_status
+ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
+ bool keep_vsi_alloc, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_add_update_free_vsi_resp *resp;
+ struct ice_aqc_add_get_update_free_vsi *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.vsi_cmd;
+ resp = &desc.params.add_update_free_vsi_res;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
+
+ cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
+ if (keep_vsi_alloc)
+ cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+ if (!status) {
+ vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
+ vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
+ }
+
+ return status;
+}
+
+/**
* ice_aq_update_vsi
* @hw: pointer to the hw struct
* @vsi_ctx: pointer to a VSI context struct
@@ -182,7 +249,7 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
*
* Update VSI context in the hardware (0x0211)
*/
-enum ice_status
+static enum ice_status
ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd)
{
@@ -192,7 +259,7 @@ ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
enum ice_status status;
cmd = &desc.params.vsi_cmd;
- resp = (struct ice_aqc_add_update_free_vsi_resp *)&desc.params.raw;
+ resp = &desc.params.add_update_free_vsi_res;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
@@ -212,42 +279,162 @@ ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
}
/**
- * ice_aq_free_vsi
+ * ice_is_vsi_valid - check whether the VSI is valid or not
+ * @hw: pointer to the hw struct
+ * @vsi_handle: VSI handle
+ *
+ * check whether the VSI is valid or not
+ */
+bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
+{
+ return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
+}
+
+/**
+ * ice_get_hw_vsi_num - return the hw VSI number
+ * @hw: pointer to the hw struct
+ * @vsi_handle: VSI handle
+ *
+ * return the hw VSI number
+ * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
+ */
+u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
+{
+ return hw->vsi_ctx[vsi_handle]->vsi_num;
+}
+
+/**
+ * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
+ * @hw: pointer to the hw struct
+ * @vsi_handle: VSI handle
+ *
+ * return the VSI context entry for a given VSI handle
+ */
+struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
+{
+ return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
+}
+
+/**
+ * ice_save_vsi_ctx - save the VSI context for a given VSI handle
+ * @hw: pointer to the hw struct
+ * @vsi_handle: VSI handle
+ * @vsi: VSI context pointer
+ *
+ * save the VSI context entry for a given VSI handle
+ */
+static void ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle,
+ struct ice_vsi_ctx *vsi)
+{
+ hw->vsi_ctx[vsi_handle] = vsi;
+}
+
+/**
+ * ice_clear_vsi_ctx - clear the VSI context entry
+ * @hw: pointer to the hw struct
+ * @vsi_handle: VSI handle
+ *
+ * clear the VSI context entry
+ */
+static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
+{
+ struct ice_vsi_ctx *vsi;
+
+ vsi = ice_get_vsi_ctx(hw, vsi_handle);
+ if (vsi) {
+ devm_kfree(ice_hw_to_dev(hw), vsi);
+ hw->vsi_ctx[vsi_handle] = NULL;
+ }
+}
+
+/**
+ * ice_add_vsi - add VSI context to the hardware and VSI handle list
* @hw: pointer to the hw struct
+ * @vsi_handle: unique VSI handle provided by drivers
* @vsi_ctx: pointer to a VSI context struct
- * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
* @cd: pointer to command details structure or NULL
*
- * Get VSI context info from hardware (0x0213)
+ * Add a VSI context to the hardware also add it into the VSI handle list.
+ * If this function gets called after reset for existing VSIs then update
+ * with the new HW VSI number in the corresponding VSI handle list entry.
*/
enum ice_status
-ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
- bool keep_vsi_alloc, struct ice_sq_cd *cd)
+ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
+ struct ice_sq_cd *cd)
{
- struct ice_aqc_add_update_free_vsi_resp *resp;
- struct ice_aqc_add_get_update_free_vsi *cmd;
- struct ice_aq_desc desc;
+ struct ice_vsi_ctx *tmp_vsi_ctx;
enum ice_status status;
- cmd = &desc.params.vsi_cmd;
- resp = (struct ice_aqc_add_update_free_vsi_resp *)&desc.params.raw;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
+ if (vsi_handle >= ICE_MAX_VSI)
+ return ICE_ERR_PARAM;
+ status = ice_aq_add_vsi(hw, vsi_ctx, cd);
+ if (status)
+ return status;
+ tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
+ if (!tmp_vsi_ctx) {
+ /* Create a new vsi context */
+ tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw),
+ sizeof(*tmp_vsi_ctx), GFP_KERNEL);
+ if (!tmp_vsi_ctx) {
+ ice_aq_free_vsi(hw, vsi_ctx, false, cd);
+ return ICE_ERR_NO_MEMORY;
+ }
+ *tmp_vsi_ctx = *vsi_ctx;
+ ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
+ } else {
+ /* update with new HW VSI num */
+ if (tmp_vsi_ctx->vsi_num != vsi_ctx->vsi_num)
+ tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
+ }
- cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
- if (keep_vsi_alloc)
- cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);
+ return status;
+}
- status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
- if (!status) {
- vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
- vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
- }
+/**
+ * ice_free_vsi- free VSI context from hardware and VSI handle list
+ * @hw: pointer to the hw struct
+ * @vsi_handle: unique VSI handle
+ * @vsi_ctx: pointer to a VSI context struct
+ * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
+ * @cd: pointer to command details structure or NULL
+ *
+ * Free VSI context info from hardware as well as from VSI handle list
+ */
+enum ice_status
+ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
+ bool keep_vsi_alloc, struct ice_sq_cd *cd)
+{
+ enum ice_status status;
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+ vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
+ status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
+ if (!status)
+ ice_clear_vsi_ctx(hw, vsi_handle);
return status;
}
/**
+ * ice_update_vsi
+ * @hw: pointer to the hw struct
+ * @vsi_handle: unique VSI handle
+ * @vsi_ctx: pointer to a VSI context struct
+ * @cd: pointer to command details structure or NULL
+ *
+ * Update VSI context in the hardware
+ */
+enum ice_status
+ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
+ struct ice_sq_cd *cd)
+{
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+ vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
+ return ice_aq_update_vsi(hw, vsi_ctx, cd);
+}
+
+/**
* ice_aq_alloc_free_vsi_list
* @hw: pointer to the hw struct
* @vsi_list_id: VSI list id returned or used for lookup
@@ -464,10 +651,12 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
{
u16 vlan_id = ICE_MAX_VLAN_ID + 1;
- u8 eth_hdr[DUMMY_ETH_HDR_LEN];
void *daddr = NULL;
+ u16 eth_hdr_sz;
+ u8 *eth_hdr;
u32 act = 0;
__be16 *off;
+ u8 q_rgn;
if (opc == ice_aqc_opc_remove_sw_rules) {
s_rule->pdata.lkup_tx_rx.act = 0;
@@ -477,13 +666,16 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
return;
}
+ eth_hdr_sz = sizeof(dummy_eth_header);
+ eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
+
/* initialize the ether header with a dummy header */
- memcpy(eth_hdr, dummy_eth_header, sizeof(dummy_eth_header));
+ memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz);
ice_fill_sw_info(hw, f_info);
switch (f_info->fltr_act) {
case ICE_FWD_TO_VSI:
- act |= (f_info->fwd_id.vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
+ act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
ICE_SINGLE_ACT_VSI_ID_M;
if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
act |= ICE_SINGLE_ACT_VSI_FORWARDING |
@@ -503,14 +695,19 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
ICE_SINGLE_ACT_Q_INDEX_M;
break;
+ case ICE_DROP_PACKET:
+ act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
+ ICE_SINGLE_ACT_VALID_BIT;
+ break;
case ICE_FWD_TO_QGRP:
+ q_rgn = f_info->qgrp_size > 0 ?
+ (u8)ilog2(f_info->qgrp_size) : 0;
act |= ICE_SINGLE_ACT_TO_Q;
- act |= (f_info->qgrp_size << ICE_SINGLE_ACT_Q_REGION_S) &
+ act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
+ ICE_SINGLE_ACT_Q_INDEX_M;
+ act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
ICE_SINGLE_ACT_Q_REGION_M;
break;
- case ICE_DROP_PACKET:
- act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP;
- break;
default:
return;
}
@@ -536,7 +733,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
daddr = f_info->l_data.ethertype_mac.mac_addr;
/* fall-through */
case ICE_SW_LKUP_ETHERTYPE:
- off = (__be16 *)&eth_hdr[ICE_ETH_ETHTYPE_OFFSET];
+ off = (__be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
*off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
break;
case ICE_SW_LKUP_MAC_VLAN:
@@ -563,18 +760,16 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
if (daddr)
- ether_addr_copy(&eth_hdr[ICE_ETH_DA_OFFSET], daddr);
+ ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr);
if (!(vlan_id > ICE_MAX_VLAN_ID)) {
- off = (__be16 *)&eth_hdr[ICE_ETH_VLAN_TCI_OFFSET];
+ off = (__be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
*off = cpu_to_be16(vlan_id);
}
/* Create the switch rule with the final dummy Ethernet header */
if (opc != ice_aqc_opc_update_sw_rules)
- s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(sizeof(eth_hdr));
-
- memcpy(s_rule->pdata.lkup_tx_rx.hdr, eth_hdr, sizeof(eth_hdr));
+ s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(eth_hdr_sz);
}
/**
@@ -601,8 +796,8 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
enum ice_status status;
u16 lg_act_size;
u16 rules_size;
- u16 vsi_info;
u32 act;
+ u16 id;
if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
return ICE_ERR_PARAM;
@@ -628,12 +823,11 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
/* First action VSI forwarding or VSI list forwarding depending on how
* many VSIs
*/
- vsi_info = (m_ent->vsi_count > 1) ?
- m_ent->fltr_info.fwd_id.vsi_list_id :
- m_ent->fltr_info.fwd_id.vsi_id;
+ id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
+ m_ent->fltr_info.fwd_id.hw_vsi_id;
act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
- act |= (vsi_info << ICE_LG_ACT_VSI_LIST_ID_S) &
+ act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
ICE_LG_ACT_VSI_LIST_ID_M;
if (m_ent->vsi_count > 1)
act |= ICE_LG_ACT_VSI_LIST;
@@ -686,15 +880,15 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
/**
* ice_create_vsi_list_map
* @hw: pointer to the hardware structure
- * @vsi_array: array of VSIs to form a VSI list
- * @num_vsi: num VSI in the array
+ * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
+ * @num_vsi: number of VSI handles in the array
* @vsi_list_id: VSI list id generated as part of allocate resource
*
* Helper function to create a new entry of VSI list id to VSI mapping
* using the given VSI list id
*/
static struct ice_vsi_list_map_info *
-ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
+ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
u16 vsi_list_id)
{
struct ice_switch_info *sw = hw->switch_info;
@@ -706,9 +900,9 @@ ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
return NULL;
v_map->vsi_list_id = vsi_list_id;
-
+ v_map->ref_cnt = 1;
for (i = 0; i < num_vsi; i++)
- set_bit(vsi_array[i], v_map->vsi_map);
+ set_bit(vsi_handle_arr[i], v_map->vsi_map);
list_add(&v_map->list_entry, &sw->vsi_list_map_head);
return v_map;
@@ -717,8 +911,8 @@ ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
/**
* ice_update_vsi_list_rule
* @hw: pointer to the hardware structure
- * @vsi_array: array of VSIs to form a VSI list
- * @num_vsi: num VSI in the array
+ * @vsi_handle_arr: array of VSI handles to form a VSI list
+ * @num_vsi: number of VSI handles in the array
* @vsi_list_id: VSI list id generated as part of allocate resource
* @remove: Boolean value to indicate if this is a remove action
* @opc: switch rules population command type - pass in the command opcode
@@ -728,7 +922,7 @@ ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
* using the given VSI list id
*/
static enum ice_status
-ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
+ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
enum ice_sw_lkup_type lkup_type)
{
@@ -759,9 +953,15 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
if (!s_rule)
return ICE_ERR_NO_MEMORY;
-
- for (i = 0; i < num_vsi; i++)
- s_rule->pdata.vsi_list.vsi[i] = cpu_to_le16(vsi_array[i]);
+ for (i = 0; i < num_vsi; i++) {
+ if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
+ status = ICE_ERR_PARAM;
+ goto exit;
+ }
+ /* AQ call requires hw_vsi_id(s) */
+ s_rule->pdata.vsi_list.vsi[i] =
+ cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
+ }
s_rule->type = cpu_to_le16(type);
s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi);
@@ -769,6 +969,7 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
+exit:
devm_kfree(ice_hw_to_dev(hw), s_rule);
return status;
}
@@ -776,21 +977,16 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
/**
* ice_create_vsi_list_rule - Creates and populates a VSI list rule
* @hw: pointer to the hw struct
- * @vsi_array: array of VSIs to form a VSI list
- * @num_vsi: number of VSIs in the array
+ * @vsi_handle_arr: array of VSI handles to form a VSI list
+ * @num_vsi: number of VSI handles in the array
* @vsi_list_id: stores the ID of the VSI list to be created
* @lkup_type: switch rule filter's lookup type
*/
static enum ice_status
-ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
+ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
{
enum ice_status status;
- int i;
-
- for (i = 0; i < num_vsi; i++)
- if (vsi_array[i] >= ICE_MAX_VSI)
- return ICE_ERR_OUT_OF_RANGE;
status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
ice_aqc_opc_alloc_res);
@@ -798,9 +994,9 @@ ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
return status;
/* Update the newly created VSI list to include the specified VSIs */
- return ice_update_vsi_list_rule(hw, vsi_array, num_vsi, *vsi_list_id,
- false, ice_aqc_opc_add_sw_rules,
- lkup_type);
+ return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
+ *vsi_list_id, false,
+ ice_aqc_opc_add_sw_rules, lkup_type);
}
/**
@@ -816,10 +1012,10 @@ static enum ice_status
ice_create_pkt_fwd_rule(struct ice_hw *hw,
struct ice_fltr_list_entry *f_entry)
{
- struct ice_switch_info *sw = hw->switch_info;
struct ice_fltr_mgmt_list_entry *fm_entry;
struct ice_aqc_sw_rules_elem *s_rule;
enum ice_sw_lkup_type l_type;
+ struct ice_sw_recipe *recp;
enum ice_status status;
s_rule = devm_kzalloc(ice_hw_to_dev(hw),
@@ -860,31 +1056,9 @@ ice_create_pkt_fwd_rule(struct ice_hw *hw,
* calls remove filter AQ command
*/
l_type = fm_entry->fltr_info.lkup_type;
- if (l_type == ICE_SW_LKUP_MAC) {
- mutex_lock(&sw->mac_list_lock);
- list_add(&fm_entry->list_entry, &sw->mac_list_head);
- mutex_unlock(&sw->mac_list_lock);
- } else if (l_type == ICE_SW_LKUP_VLAN) {
- mutex_lock(&sw->vlan_list_lock);
- list_add(&fm_entry->list_entry, &sw->vlan_list_head);
- mutex_unlock(&sw->vlan_list_lock);
- } else if (l_type == ICE_SW_LKUP_ETHERTYPE ||
- l_type == ICE_SW_LKUP_ETHERTYPE_MAC) {
- mutex_lock(&sw->eth_m_list_lock);
- list_add(&fm_entry->list_entry, &sw->eth_m_list_head);
- mutex_unlock(&sw->eth_m_list_lock);
- } else if (l_type == ICE_SW_LKUP_PROMISC ||
- l_type == ICE_SW_LKUP_PROMISC_VLAN) {
- mutex_lock(&sw->promisc_list_lock);
- list_add(&fm_entry->list_entry, &sw->promisc_list_head);
- mutex_unlock(&sw->promisc_list_lock);
- } else if (fm_entry->fltr_info.lkup_type == ICE_SW_LKUP_MAC_VLAN) {
- mutex_lock(&sw->mac_vlan_list_lock);
- list_add(&fm_entry->list_entry, &sw->mac_vlan_list_head);
- mutex_unlock(&sw->mac_vlan_list_lock);
- } else {
- status = ICE_ERR_NOT_IMPL;
- }
+ recp = &hw->switch_info->recp_list[l_type];
+ list_add(&fm_entry->list_entry, &recp->filt_rules);
+
ice_create_pkt_fwd_rule_exit:
devm_kfree(ice_hw_to_dev(hw), s_rule);
return status;
@@ -893,19 +1067,15 @@ ice_create_pkt_fwd_rule_exit:
/**
* ice_update_pkt_fwd_rule
* @hw: pointer to the hardware structure
- * @rule_id: rule of previously created switch rule to update
- * @vsi_list_id: VSI list id to be updated with
- * @f_info: ice_fltr_info to pull other information for switch rule
+ * @f_info: filter information for switch rule
*
* Call AQ command to update a previously created switch rule with a
* VSI list id
*/
static enum ice_status
-ice_update_pkt_fwd_rule(struct ice_hw *hw, u16 rule_id, u16 vsi_list_id,
- struct ice_fltr_info f_info)
+ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
{
struct ice_aqc_sw_rules_elem *s_rule;
- struct ice_fltr_info tmp_fltr;
enum ice_status status;
s_rule = devm_kzalloc(ice_hw_to_dev(hw),
@@ -913,14 +1083,9 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, u16 rule_id, u16 vsi_list_id,
if (!s_rule)
return ICE_ERR_NO_MEMORY;
- tmp_fltr = f_info;
- tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
- tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
+ ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
- ice_fill_sw_rule(hw, &tmp_fltr, s_rule,
- ice_aqc_opc_update_sw_rules);
-
- s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(rule_id);
+ s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(f_info->fltr_rule_id);
/* Update switch rule with new rule set to forward VSI list */
status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
@@ -931,7 +1096,48 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, u16 rule_id, u16 vsi_list_id,
}
/**
- * ice_handle_vsi_list_mgmt
+ * ice_update_sw_rule_bridge_mode
+ * @hw: pointer to the hw struct
+ *
+ * Updates unicast switch filter rules based on VEB/VEPA mode
+ */
+enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_fltr_mgmt_list_entry *fm_entry;
+ enum ice_status status = 0;
+ struct list_head *rule_head;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
+
+ rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
+ rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
+
+ mutex_lock(rule_lock);
+ list_for_each_entry(fm_entry, rule_head, list_entry) {
+ struct ice_fltr_info *fi = &fm_entry->fltr_info;
+ u8 *addr = fi->l_data.mac.mac_addr;
+
+ /* Update unicast Tx rules to reflect the selected
+ * VEB/VEPA mode
+ */
+ if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) &&
+ (fi->fltr_act == ICE_FWD_TO_VSI ||
+ fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
+ fi->fltr_act == ICE_FWD_TO_Q ||
+ fi->fltr_act == ICE_FWD_TO_QGRP)) {
+ status = ice_update_pkt_fwd_rule(hw, fi);
+ if (status)
+ break;
+ }
+ }
+
+ mutex_unlock(rule_lock);
+
+ return status;
+}
+
+/**
+ * ice_add_update_vsi_list
* @hw: pointer to the hardware structure
* @m_entry: pointer to current filter management list entry
* @cur_fltr: filter information from the book keeping entry
@@ -952,10 +1158,10 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, u16 rule_id, u16 vsi_list_id,
* using the update switch rule command
*/
static enum ice_status
-ice_handle_vsi_list_mgmt(struct ice_hw *hw,
- struct ice_fltr_mgmt_list_entry *m_entry,
- struct ice_fltr_info *cur_fltr,
- struct ice_fltr_info *new_fltr)
+ice_add_update_vsi_list(struct ice_hw *hw,
+ struct ice_fltr_mgmt_list_entry *m_entry,
+ struct ice_fltr_info *cur_fltr,
+ struct ice_fltr_info *new_fltr)
{
enum ice_status status = 0;
u16 vsi_list_id = 0;
@@ -975,34 +1181,36 @@ ice_handle_vsi_list_mgmt(struct ice_hw *hw,
* a part of a VSI list. So, create a VSI list with the old and
* new VSIs.
*/
- u16 vsi_id_arr[2];
- u16 fltr_rule;
+ struct ice_fltr_info tmp_fltr;
+ u16 vsi_handle_arr[2];
/* A rule already exists with the new VSI being added */
- if (cur_fltr->fwd_id.vsi_id == new_fltr->fwd_id.vsi_id)
+ if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
return ICE_ERR_ALREADY_EXISTS;
- vsi_id_arr[0] = cur_fltr->fwd_id.vsi_id;
- vsi_id_arr[1] = new_fltr->fwd_id.vsi_id;
- status = ice_create_vsi_list_rule(hw, &vsi_id_arr[0], 2,
+ vsi_handle_arr[0] = cur_fltr->vsi_handle;
+ vsi_handle_arr[1] = new_fltr->vsi_handle;
+ status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
&vsi_list_id,
new_fltr->lkup_type);
if (status)
return status;
- fltr_rule = cur_fltr->fltr_rule_id;
+ tmp_fltr = *new_fltr;
+ tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
+ tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
+ tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
/* Update the previous switch rule of "MAC forward to VSI" to
* "MAC fwd to VSI list"
*/
- status = ice_update_pkt_fwd_rule(hw, fltr_rule, vsi_list_id,
- *new_fltr);
+ status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
if (status)
return status;
cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
m_entry->vsi_list_info =
- ice_create_vsi_list_map(hw, &vsi_id_arr[0], 2,
+ ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
vsi_list_id);
/* If this entry was large action then the large action needs
@@ -1014,11 +1222,11 @@ ice_handle_vsi_list_mgmt(struct ice_hw *hw,
m_entry->sw_marker_id,
m_entry->lg_act_idx);
} else {
- u16 vsi_id = new_fltr->fwd_id.vsi_id;
+ u16 vsi_handle = new_fltr->vsi_handle;
enum ice_adminq_opc opcode;
/* A rule already exists with the new VSI being added */
- if (test_bit(vsi_id, m_entry->vsi_list_info->vsi_map))
+ if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
return 0;
/* Update the previously created VSI list set with
@@ -1027,12 +1235,12 @@ ice_handle_vsi_list_mgmt(struct ice_hw *hw,
vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
opcode = ice_aqc_opc_update_sw_rules;
- status = ice_update_vsi_list_rule(hw, &vsi_id, 1, vsi_list_id,
- false, opcode,
+ status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
+ vsi_list_id, false, opcode,
new_fltr->lkup_type);
/* update VSI list mapping info with new VSI id */
if (!status)
- set_bit(vsi_id, m_entry->vsi_list_info->vsi_map);
+ set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
}
if (!status)
m_entry->vsi_count++;
@@ -1040,54 +1248,313 @@ ice_handle_vsi_list_mgmt(struct ice_hw *hw,
}
/**
- * ice_find_mac_entry
+ * ice_find_rule_entry - Search a rule entry
* @hw: pointer to the hardware structure
- * @mac_addr: MAC address to search for
+ * @recp_id: lookup type for which the specified rule needs to be searched
+ * @f_info: rule information
*
- * Helper function to search for a MAC entry using a given MAC address
- * Returns pointer to the entry if found.
+ * Helper function to search for a given rule entry
+ * Returns pointer to entry storing the rule if found
*/
static struct ice_fltr_mgmt_list_entry *
-ice_find_mac_entry(struct ice_hw *hw, u8 *mac_addr)
+ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
{
- struct ice_fltr_mgmt_list_entry *m_list_itr, *mac_ret = NULL;
+ struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
struct ice_switch_info *sw = hw->switch_info;
-
- mutex_lock(&sw->mac_list_lock);
- list_for_each_entry(m_list_itr, &sw->mac_list_head, list_entry) {
- u8 *buf = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
-
- if (ether_addr_equal(buf, mac_addr)) {
- mac_ret = m_list_itr;
+ struct list_head *list_head;
+
+ list_head = &sw->recp_list[recp_id].filt_rules;
+ list_for_each_entry(list_itr, list_head, list_entry) {
+ if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
+ sizeof(f_info->l_data)) &&
+ f_info->flag == list_itr->fltr_info.flag) {
+ ret = list_itr;
break;
}
}
- mutex_unlock(&sw->mac_list_lock);
- return mac_ret;
+ return ret;
+}
+
+/**
+ * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
+ * @hw: pointer to the hardware structure
+ * @recp_id: lookup type for which VSI lists needs to be searched
+ * @vsi_handle: VSI handle to be found in VSI list
+ * @vsi_list_id: VSI list id found containing vsi_handle
+ *
+ * Helper function to search a VSI list with single entry containing given VSI
+ * handle element. This can be extended further to search VSI list with more
+ * than 1 vsi_count. Returns pointer to VSI list entry if found.
+ */
+static struct ice_vsi_list_map_info *
+ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
+ u16 *vsi_list_id)
+{
+ struct ice_vsi_list_map_info *map_info = NULL;
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_fltr_mgmt_list_entry *list_itr;
+ struct list_head *list_head;
+
+ list_head = &sw->recp_list[recp_id].filt_rules;
+ list_for_each_entry(list_itr, list_head, list_entry) {
+ if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
+ map_info = list_itr->vsi_list_info;
+ if (test_bit(vsi_handle, map_info->vsi_map)) {
+ *vsi_list_id = map_info->vsi_list_id;
+ return map_info;
+ }
+ }
+ }
+ return NULL;
}
/**
- * ice_add_shared_mac - Add one MAC shared filter rule
+ * ice_add_rule_internal - add rule for a given lookup type
* @hw: pointer to the hardware structure
+ * @recp_id: lookup type (recipe id) for which rule has to be added
* @f_entry: structure containing MAC forwarding information
*
- * Adds or updates the book keeping list for the MAC addresses
+ * Adds or updates the rule lists for a given recipe
*/
static enum ice_status
-ice_add_shared_mac(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
+ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
+ struct ice_fltr_list_entry *f_entry)
{
+ struct ice_switch_info *sw = hw->switch_info;
struct ice_fltr_info *new_fltr, *cur_fltr;
struct ice_fltr_mgmt_list_entry *m_entry;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
+ enum ice_status status = 0;
- new_fltr = &f_entry->fltr_info;
+ if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
+ return ICE_ERR_PARAM;
+ f_entry->fltr_info.fwd_id.hw_vsi_id =
+ ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
+
+ rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
- m_entry = ice_find_mac_entry(hw, &new_fltr->l_data.mac.mac_addr[0]);
- if (!m_entry)
+ mutex_lock(rule_lock);
+ new_fltr = &f_entry->fltr_info;
+ if (new_fltr->flag & ICE_FLTR_RX)
+ new_fltr->src = hw->port_info->lport;
+ else if (new_fltr->flag & ICE_FLTR_TX)
+ new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id;
+
+ m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
+ if (!m_entry) {
+ mutex_unlock(rule_lock);
return ice_create_pkt_fwd_rule(hw, f_entry);
+ }
cur_fltr = &m_entry->fltr_info;
+ status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
+ mutex_unlock(rule_lock);
- return ice_handle_vsi_list_mgmt(hw, m_entry, cur_fltr, new_fltr);
+ return status;
+}
+
+/**
+ * ice_remove_vsi_list_rule
+ * @hw: pointer to the hardware structure
+ * @vsi_list_id: VSI list id generated as part of allocate resource
+ * @lkup_type: switch rule filter lookup type
+ *
+ * The VSI list should be emptied before this function is called to remove the
+ * VSI list.
+ */
+static enum ice_status
+ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
+ enum ice_sw_lkup_type lkup_type)
+{
+ struct ice_aqc_sw_rules_elem *s_rule;
+ enum ice_status status;
+ u16 s_rule_size;
+
+ s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
+ s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
+ if (!s_rule)
+ return ICE_ERR_NO_MEMORY;
+
+ s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
+ s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
+
+ /* Free the vsi_list resource that we allocated. It is assumed that the
+ * list is empty at this point.
+ */
+ status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
+ ice_aqc_opc_free_res);
+
+ devm_kfree(ice_hw_to_dev(hw), s_rule);
+ return status;
+}
+
+/**
+ * ice_rem_update_vsi_list
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle of the VSI to remove
+ * @fm_list: filter management entry for which the VSI list management needs to
+ * be done
+ */
+static enum ice_status
+ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
+ struct ice_fltr_mgmt_list_entry *fm_list)
+{
+ enum ice_sw_lkup_type lkup_type;
+ enum ice_status status = 0;
+ u16 vsi_list_id;
+
+ if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
+ fm_list->vsi_count == 0)
+ return ICE_ERR_PARAM;
+
+ /* A rule with the VSI being removed does not exist */
+ if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ lkup_type = fm_list->fltr_info.lkup_type;
+ vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
+ status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
+ ice_aqc_opc_update_sw_rules,
+ lkup_type);
+ if (status)
+ return status;
+
+ fm_list->vsi_count--;
+ clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
+
+ if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
+ struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
+ struct ice_vsi_list_map_info *vsi_list_info =
+ fm_list->vsi_list_info;
+ u16 rem_vsi_handle;
+
+ rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
+ ICE_MAX_VSI);
+ if (!ice_is_vsi_valid(hw, rem_vsi_handle))
+ return ICE_ERR_OUT_OF_RANGE;
+
+ /* Make sure VSI list is empty before removing it below */
+ status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
+ vsi_list_id, true,
+ ice_aqc_opc_update_sw_rules,
+ lkup_type);
+ if (status)
+ return status;
+
+ tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
+ tmp_fltr_info.fwd_id.hw_vsi_id =
+ ice_get_hw_vsi_num(hw, rem_vsi_handle);
+ tmp_fltr_info.vsi_handle = rem_vsi_handle;
+ status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
+ if (status) {
+ ice_debug(hw, ICE_DBG_SW,
+ "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
+ tmp_fltr_info.fwd_id.hw_vsi_id, status);
+ return status;
+ }
+
+ fm_list->fltr_info = tmp_fltr_info;
+ }
+
+ if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
+ (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
+ struct ice_vsi_list_map_info *vsi_list_info =
+ fm_list->vsi_list_info;
+
+ /* Remove the VSI list since it is no longer used */
+ status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
+ if (status) {
+ ice_debug(hw, ICE_DBG_SW,
+ "Failed to remove VSI list %d, error %d\n",
+ vsi_list_id, status);
+ return status;
+ }
+
+ list_del(&vsi_list_info->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
+ fm_list->vsi_list_info = NULL;
+ }
+
+ return status;
+}
+
+/**
+ * ice_remove_rule_internal - Remove a filter rule of a given type
+ * @hw: pointer to the hardware structure
+ * @recp_id: recipe id for which the rule needs to removed
+ * @f_entry: rule entry containing filter information
+ */
+static enum ice_status
+ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
+ struct ice_fltr_list_entry *f_entry)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_fltr_mgmt_list_entry *list_elem;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
+ enum ice_status status = 0;
+ bool remove_rule = false;
+ u16 vsi_handle;
+
+ if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
+ return ICE_ERR_PARAM;
+ f_entry->fltr_info.fwd_id.hw_vsi_id =
+ ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
+
+ rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
+ mutex_lock(rule_lock);
+ list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
+ if (!list_elem) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto exit;
+ }
+
+ if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
+ remove_rule = true;
+ } else if (!list_elem->vsi_list_info) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto exit;
+ } else {
+ if (list_elem->vsi_list_info->ref_cnt > 1)
+ list_elem->vsi_list_info->ref_cnt--;
+ vsi_handle = f_entry->fltr_info.vsi_handle;
+ status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
+ if (status)
+ goto exit;
+ /* if vsi count goes to zero after updating the vsi list */
+ if (list_elem->vsi_count == 0)
+ remove_rule = true;
+ }
+
+ if (remove_rule) {
+ /* Remove the lookup rule */
+ struct ice_aqc_sw_rules_elem *s_rule;
+
+ s_rule = devm_kzalloc(ice_hw_to_dev(hw),
+ ICE_SW_RULE_RX_TX_NO_HDR_SIZE,
+ GFP_KERNEL);
+ if (!s_rule) {
+ status = ICE_ERR_NO_MEMORY;
+ goto exit;
+ }
+
+ ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
+ ice_aqc_opc_remove_sw_rules);
+
+ status = ice_aq_sw_rules(hw, s_rule,
+ ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
+ ice_aqc_opc_remove_sw_rules, NULL);
+ if (status)
+ goto exit;
+
+ /* Remove a book keeping from the list */
+ devm_kfree(ice_hw_to_dev(hw), s_rule);
+
+ list_del(&list_elem->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), list_elem);
+ }
+exit:
+ mutex_unlock(rule_lock);
+ return status;
}
/**
@@ -1106,7 +1573,10 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
{
struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
struct ice_fltr_list_entry *m_list_itr;
+ struct list_head *rule_head;
u16 elem_sent, total_elem_left;
+ struct ice_switch_info *sw;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
enum ice_status status = 0;
u16 num_unicast = 0;
u16 s_rule_size;
@@ -1114,48 +1584,73 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
if (!m_list || !hw)
return ICE_ERR_PARAM;
+ s_rule = NULL;
+ sw = hw->switch_info;
+ rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
list_for_each_entry(m_list_itr, m_list, list_entry) {
u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
+ u16 vsi_handle;
+ u16 hw_vsi_id;
- if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
+ m_list_itr->fltr_info.flag = ICE_FLTR_TX;
+ vsi_handle = m_list_itr->fltr_info.vsi_handle;
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+ hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
+ m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
+ /* update the src in case it is vsi num */
+ if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
return ICE_ERR_PARAM;
- if (is_zero_ether_addr(add))
+ m_list_itr->fltr_info.src = hw_vsi_id;
+ if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
+ is_zero_ether_addr(add))
return ICE_ERR_PARAM;
if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
/* Don't overwrite the unicast address */
- if (ice_find_mac_entry(hw, add))
+ mutex_lock(rule_lock);
+ if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
+ &m_list_itr->fltr_info)) {
+ mutex_unlock(rule_lock);
return ICE_ERR_ALREADY_EXISTS;
+ }
+ mutex_unlock(rule_lock);
num_unicast++;
} else if (is_multicast_ether_addr(add) ||
(is_unicast_ether_addr(add) && hw->ucast_shared)) {
- status = ice_add_shared_mac(hw, m_list_itr);
- if (status) {
- m_list_itr->status = ICE_FLTR_STATUS_FW_FAIL;
- return status;
- }
- m_list_itr->status = ICE_FLTR_STATUS_FW_SUCCESS;
+ m_list_itr->status =
+ ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
+ m_list_itr);
+ if (m_list_itr->status)
+ return m_list_itr->status;
}
}
+ mutex_lock(rule_lock);
/* Exit if no suitable entries were found for adding bulk switch rule */
- if (!num_unicast)
- return 0;
+ if (!num_unicast) {
+ status = 0;
+ goto ice_add_mac_exit;
+ }
+
+ rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
/* Allocate switch rule buffer for the bulk update for unicast */
s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
GFP_KERNEL);
- if (!s_rule)
- return ICE_ERR_NO_MEMORY;
+ if (!s_rule) {
+ status = ICE_ERR_NO_MEMORY;
+ goto ice_add_mac_exit;
+ }
r_iter = s_rule;
list_for_each_entry(m_list_itr, m_list, list_entry) {
struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
- u8 *addr = &f_info->l_data.mac.mac_addr[0];
+ u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
- if (is_unicast_ether_addr(addr)) {
- ice_fill_sw_rule(hw, &m_list_itr->fltr_info,
- r_iter, ice_aqc_opc_add_sw_rules);
+ if (is_unicast_ether_addr(mac_addr)) {
+ ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
+ ice_aqc_opc_add_sw_rules);
r_iter = (struct ice_aqc_sw_rules_elem *)
((u8 *)r_iter + s_rule_size);
}
@@ -1183,11 +1678,10 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
r_iter = s_rule;
list_for_each_entry(m_list_itr, m_list, list_entry) {
struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
- u8 *addr = &f_info->l_data.mac.mac_addr[0];
- struct ice_switch_info *sw = hw->switch_info;
+ u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
struct ice_fltr_mgmt_list_entry *fm_entry;
- if (is_unicast_ether_addr(addr)) {
+ if (is_unicast_ether_addr(mac_addr)) {
f_info->fltr_rule_id =
le16_to_cpu(r_iter->pdata.lkup_tx_rx.index);
f_info->fltr_act = ICE_FWD_TO_VSI;
@@ -1203,46 +1697,21 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
/* The book keeping entries will get removed when
* base driver calls remove filter AQ command
*/
- mutex_lock(&sw->mac_list_lock);
- list_add(&fm_entry->list_entry, &sw->mac_list_head);
- mutex_unlock(&sw->mac_list_lock);
+ list_add(&fm_entry->list_entry, rule_head);
r_iter = (struct ice_aqc_sw_rules_elem *)
((u8 *)r_iter + s_rule_size);
}
}
ice_add_mac_exit:
- devm_kfree(ice_hw_to_dev(hw), s_rule);
+ mutex_unlock(rule_lock);
+ if (s_rule)
+ devm_kfree(ice_hw_to_dev(hw), s_rule);
return status;
}
/**
- * ice_find_vlan_entry
- * @hw: pointer to the hardware structure
- * @vlan_id: VLAN id to search for
- *
- * Helper function to search for a VLAN entry using a given VLAN id
- * Returns pointer to the entry if found.
- */
-static struct ice_fltr_mgmt_list_entry *
-ice_find_vlan_entry(struct ice_hw *hw, u16 vlan_id)
-{
- struct ice_fltr_mgmt_list_entry *vlan_list_itr, *vlan_ret = NULL;
- struct ice_switch_info *sw = hw->switch_info;
-
- mutex_lock(&sw->vlan_list_lock);
- list_for_each_entry(vlan_list_itr, &sw->vlan_list_head, list_entry)
- if (vlan_list_itr->fltr_info.l_data.vlan.vlan_id == vlan_id) {
- vlan_ret = vlan_list_itr;
- break;
- }
-
- mutex_unlock(&sw->vlan_list_lock);
- return vlan_ret;
-}
-
-/**
* ice_add_vlan_internal - Add one VLAN based filter rule
* @hw: pointer to the hardware structure
* @f_entry: filter entry containing one VLAN information
@@ -1250,53 +1719,150 @@ ice_find_vlan_entry(struct ice_hw *hw, u16 vlan_id)
static enum ice_status
ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
{
- struct ice_fltr_info *new_fltr, *cur_fltr;
+ struct ice_switch_info *sw = hw->switch_info;
struct ice_fltr_mgmt_list_entry *v_list_itr;
- u16 vlan_id;
+ struct ice_fltr_info *new_fltr, *cur_fltr;
+ enum ice_sw_lkup_type lkup_type;
+ u16 vsi_list_id = 0, vsi_handle;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
+ enum ice_status status = 0;
+
+ if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
+ return ICE_ERR_PARAM;
+ f_entry->fltr_info.fwd_id.hw_vsi_id =
+ ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
new_fltr = &f_entry->fltr_info;
+
/* VLAN id should only be 12 bits */
if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
return ICE_ERR_PARAM;
- vlan_id = new_fltr->l_data.vlan.vlan_id;
- v_list_itr = ice_find_vlan_entry(hw, vlan_id);
+ if (new_fltr->src_id != ICE_SRC_ID_VSI)
+ return ICE_ERR_PARAM;
+
+ new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
+ lkup_type = new_fltr->lkup_type;
+ vsi_handle = new_fltr->vsi_handle;
+ rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
+ mutex_lock(rule_lock);
+ v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
if (!v_list_itr) {
- u16 vsi_id = ICE_VSI_INVAL_ID;
- enum ice_status status;
- u16 vsi_list_id = 0;
+ struct ice_vsi_list_map_info *map_info = NULL;
if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
- enum ice_sw_lkup_type lkup_type = new_fltr->lkup_type;
-
- /* All VLAN pruning rules use a VSI list.
- * Convert the action to forwarding to a VSI list.
+ /* All VLAN pruning rules use a VSI list. Check if
+ * there is already a VSI list containing VSI that we
+ * want to add. If found, use the same vsi_list_id for
+ * this new VLAN rule or else create a new list.
*/
- vsi_id = new_fltr->fwd_id.vsi_id;
- status = ice_create_vsi_list_rule(hw, &vsi_id, 1,
- &vsi_list_id,
- lkup_type);
- if (status)
- return status;
+ map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
+ vsi_handle,
+ &vsi_list_id);
+ if (!map_info) {
+ status = ice_create_vsi_list_rule(hw,
+ &vsi_handle,
+ 1,
+ &vsi_list_id,
+ lkup_type);
+ if (status)
+ goto exit;
+ }
+ /* Convert the action to forwarding to a VSI list. */
new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
new_fltr->fwd_id.vsi_list_id = vsi_list_id;
}
status = ice_create_pkt_fwd_rule(hw, f_entry);
- if (!status && vsi_id != ICE_VSI_INVAL_ID) {
- v_list_itr = ice_find_vlan_entry(hw, vlan_id);
- if (!v_list_itr)
- return ICE_ERR_DOES_NOT_EXIST;
- v_list_itr->vsi_list_info =
- ice_create_vsi_list_map(hw, &vsi_id, 1,
- vsi_list_id);
+ if (!status) {
+ v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
+ new_fltr);
+ if (!v_list_itr) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto exit;
+ }
+ /* reuse VSI list for new rule and increment ref_cnt */
+ if (map_info) {
+ v_list_itr->vsi_list_info = map_info;
+ map_info->ref_cnt++;
+ } else {
+ v_list_itr->vsi_list_info =
+ ice_create_vsi_list_map(hw, &vsi_handle,
+ 1, vsi_list_id);
+ }
}
+ } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
+ /* Update existing VSI list to add new VSI id only if it used
+ * by one VLAN rule.
+ */
+ cur_fltr = &v_list_itr->fltr_info;
+ status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
+ new_fltr);
+ } else {
+ /* If VLAN rule exists and VSI list being used by this rule is
+ * referenced by more than 1 VLAN rule. Then create a new VSI
+ * list appending previous VSI with new VSI and update existing
+ * VLAN rule to point to new VSI list id
+ */
+ struct ice_fltr_info tmp_fltr;
+ u16 vsi_handle_arr[2];
+ u16 cur_handle;
- return status;
+ /* Current implementation only supports reusing VSI list with
+ * one VSI count. We should never hit below condition
+ */
+ if (v_list_itr->vsi_count > 1 &&
+ v_list_itr->vsi_list_info->ref_cnt > 1) {
+ ice_debug(hw, ICE_DBG_SW,
+ "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
+ status = ICE_ERR_CFG;
+ goto exit;
+ }
+
+ cur_handle =
+ find_first_bit(v_list_itr->vsi_list_info->vsi_map,
+ ICE_MAX_VSI);
+
+ /* A rule already exists with the new VSI being added */
+ if (cur_handle == vsi_handle) {
+ status = ICE_ERR_ALREADY_EXISTS;
+ goto exit;
+ }
+
+ vsi_handle_arr[0] = cur_handle;
+ vsi_handle_arr[1] = vsi_handle;
+ status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
+ &vsi_list_id, lkup_type);
+ if (status)
+ goto exit;
+
+ tmp_fltr = v_list_itr->fltr_info;
+ tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
+ tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
+ tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
+ /* Update the previous switch rule to a new VSI list which
+ * includes current VSI thats requested
+ */
+ status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
+ if (status)
+ goto exit;
+
+ /* before overriding VSI list map info. decrement ref_cnt of
+ * previous VSI list
+ */
+ v_list_itr->vsi_list_info->ref_cnt--;
+
+ /* now update to newly created list */
+ v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
+ v_list_itr->vsi_list_info =
+ ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
+ vsi_list_id);
+ v_list_itr->vsi_count++;
}
- cur_fltr = &v_list_itr->fltr_info;
- return ice_handle_vsi_list_mgmt(hw, v_list_itr, cur_fltr, new_fltr);
+exit:
+ mutex_unlock(rule_lock);
+ return status;
}
/**
@@ -1313,335 +1879,58 @@ ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
return ICE_ERR_PARAM;
list_for_each_entry(v_list_itr, v_list, list_entry) {
- enum ice_status status;
-
if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
return ICE_ERR_PARAM;
-
- status = ice_add_vlan_internal(hw, v_list_itr);
- if (status) {
- v_list_itr->status = ICE_FLTR_STATUS_FW_FAIL;
- return status;
- }
- v_list_itr->status = ICE_FLTR_STATUS_FW_SUCCESS;
+ v_list_itr->fltr_info.flag = ICE_FLTR_TX;
+ v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
+ if (v_list_itr->status)
+ return v_list_itr->status;
}
return 0;
}
/**
- * ice_remove_vsi_list_rule
+ * ice_rem_sw_rule_info
* @hw: pointer to the hardware structure
- * @vsi_list_id: VSI list id generated as part of allocate resource
- * @lkup_type: switch rule filter lookup type
+ * @rule_head: pointer to the switch list structure that we want to delete
*/
-static enum ice_status
-ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
- enum ice_sw_lkup_type lkup_type)
-{
- struct ice_aqc_sw_rules_elem *s_rule;
- enum ice_status status;
- u16 s_rule_size;
-
- s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
- s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
- if (!s_rule)
- return ICE_ERR_NO_MEMORY;
-
- s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
- s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
- /* FW expects number of VSIs in vsi_list resource to be 0 for clear
- * command. Since memory is zero'ed out during initialization, it's not
- * necessary to explicitly initialize the variable to 0.
- */
-
- status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1,
- ice_aqc_opc_remove_sw_rules, NULL);
- if (!status)
- /* Free the vsi_list resource that we allocated */
- status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
- ice_aqc_opc_free_res);
-
- devm_kfree(ice_hw_to_dev(hw), s_rule);
- return status;
-}
-
-/**
- * ice_handle_rem_vsi_list_mgmt
- * @hw: pointer to the hardware structure
- * @vsi_id: ID of the VSI to remove
- * @fm_list_itr: filter management entry for which the VSI list management
- * needs to be done
- */
-static enum ice_status
-ice_handle_rem_vsi_list_mgmt(struct ice_hw *hw, u16 vsi_id,
- struct ice_fltr_mgmt_list_entry *fm_list_itr)
+static void
+ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
{
- struct ice_switch_info *sw = hw->switch_info;
- enum ice_status status = 0;
- enum ice_sw_lkup_type lkup_type;
- bool is_last_elem = true;
- bool conv_list = false;
- bool del_list = false;
- u16 vsi_list_id;
-
- lkup_type = fm_list_itr->fltr_info.lkup_type;
- vsi_list_id = fm_list_itr->fltr_info.fwd_id.vsi_list_id;
-
- if (fm_list_itr->vsi_count > 1) {
- status = ice_update_vsi_list_rule(hw, &vsi_id, 1, vsi_list_id,
- true,
- ice_aqc_opc_update_sw_rules,
- lkup_type);
- if (status)
- return status;
- fm_list_itr->vsi_count--;
- is_last_elem = false;
- clear_bit(vsi_id, fm_list_itr->vsi_list_info->vsi_map);
- }
-
- /* For non-VLAN rules that forward packets to a VSI list, convert them
- * to forwarding packets to a VSI if there is only one VSI left in the
- * list. Unused lists are then removed.
- * VLAN rules need to use VSI lists even with only one VSI.
- */
- if (fm_list_itr->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST) {
- if (lkup_type == ICE_SW_LKUP_VLAN) {
- del_list = is_last_elem;
- } else if (fm_list_itr->vsi_count == 1) {
- conv_list = true;
- del_list = true;
- }
- }
-
- if (del_list) {
- /* Remove the VSI list since it is no longer used */
- struct ice_vsi_list_map_info *vsi_list_info =
- fm_list_itr->vsi_list_info;
+ if (!list_empty(rule_head)) {
+ struct ice_fltr_mgmt_list_entry *entry;
+ struct ice_fltr_mgmt_list_entry *tmp;
- status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
- if (status)
- return status;
-
- if (conv_list) {
- u16 rem_vsi_id;
-
- rem_vsi_id = find_first_bit(vsi_list_info->vsi_map,
- ICE_MAX_VSI);
-
- /* Error out when the expected last element is not in
- * the VSI list map
- */
- if (rem_vsi_id == ICE_MAX_VSI)
- return ICE_ERR_OUT_OF_RANGE;
-
- /* Change the list entry action from VSI_LIST to VSI */
- fm_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
- fm_list_itr->fltr_info.fwd_id.vsi_id = rem_vsi_id;
+ list_for_each_entry_safe(entry, tmp, rule_head, list_entry) {
+ list_del(&entry->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), entry);
}
-
- list_del(&vsi_list_info->list_entry);
- devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
- fm_list_itr->vsi_list_info = NULL;
- }
-
- if (conv_list) {
- /* Convert the rule's forward action to forwarding packets to
- * a VSI
- */
- struct ice_aqc_sw_rules_elem *s_rule;
-
- s_rule = devm_kzalloc(ice_hw_to_dev(hw),
- ICE_SW_RULE_RX_TX_ETH_HDR_SIZE,
- GFP_KERNEL);
- if (!s_rule)
- return ICE_ERR_NO_MEMORY;
-
- ice_fill_sw_rule(hw, &fm_list_itr->fltr_info, s_rule,
- ice_aqc_opc_update_sw_rules);
-
- s_rule->pdata.lkup_tx_rx.index =
- cpu_to_le16(fm_list_itr->fltr_info.fltr_rule_id);
-
- status = ice_aq_sw_rules(hw, s_rule,
- ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
- ice_aqc_opc_update_sw_rules, NULL);
- devm_kfree(ice_hw_to_dev(hw), s_rule);
- if (status)
- return status;
}
-
- if (is_last_elem) {
- /* Remove the lookup rule */
- struct ice_aqc_sw_rules_elem *s_rule;
-
- s_rule = devm_kzalloc(ice_hw_to_dev(hw),
- ICE_SW_RULE_RX_TX_NO_HDR_SIZE,
- GFP_KERNEL);
- if (!s_rule)
- return ICE_ERR_NO_MEMORY;
-
- ice_fill_sw_rule(hw, &fm_list_itr->fltr_info, s_rule,
- ice_aqc_opc_remove_sw_rules);
-
- status = ice_aq_sw_rules(hw, s_rule,
- ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
- ice_aqc_opc_remove_sw_rules, NULL);
- if (status)
- return status;
-
- /* Remove a book keeping entry from the MAC address list */
- mutex_lock(&sw->mac_list_lock);
- list_del(&fm_list_itr->list_entry);
- mutex_unlock(&sw->mac_list_lock);
- devm_kfree(ice_hw_to_dev(hw), fm_list_itr);
- devm_kfree(ice_hw_to_dev(hw), s_rule);
- }
- return status;
-}
-
-/**
- * ice_remove_mac_entry
- * @hw: pointer to the hardware structure
- * @f_entry: structure containing MAC forwarding information
- */
-static enum ice_status
-ice_remove_mac_entry(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
-{
- struct ice_fltr_mgmt_list_entry *m_entry;
- u16 vsi_id;
- u8 *add;
-
- add = &f_entry->fltr_info.l_data.mac.mac_addr[0];
-
- m_entry = ice_find_mac_entry(hw, add);
- if (!m_entry)
- return ICE_ERR_PARAM;
-
- vsi_id = f_entry->fltr_info.fwd_id.vsi_id;
- return ice_handle_rem_vsi_list_mgmt(hw, vsi_id, m_entry);
}
/**
- * ice_remove_mac - remove a MAC address based filter rule
+ * ice_cfg_dflt_vsi - change state of VSI to set/clear default
* @hw: pointer to the hardware structure
- * @m_list: list of MAC addresses and forwarding information
- *
- * This function removes either a MAC filter rule or a specific VSI from a
- * VSI list for a multicast MAC address.
- *
- * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
- * ice_add_mac. Caller should be aware that this call will only work if all
- * the entries passed into m_list were added previously. It will not attempt to
- * do a partial remove of entries that were found.
- */
-enum ice_status
-ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
-{
- struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
- u8 s_rule_size = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
- struct ice_switch_info *sw = hw->switch_info;
- struct ice_fltr_mgmt_list_entry *m_entry;
- struct ice_fltr_list_entry *m_list_itr;
- u16 elem_sent, total_elem_left;
- enum ice_status status = 0;
- u16 num_unicast = 0;
-
- if (!m_list)
- return ICE_ERR_PARAM;
-
- list_for_each_entry(m_list_itr, m_list, list_entry) {
- u8 *addr = m_list_itr->fltr_info.l_data.mac.mac_addr;
-
- if (is_unicast_ether_addr(addr) && !hw->ucast_shared)
- num_unicast++;
- else if (is_multicast_ether_addr(addr) ||
- (is_unicast_ether_addr(addr) && hw->ucast_shared))
- ice_remove_mac_entry(hw, m_list_itr);
- }
-
- /* Exit if no unicast addresses found. Multicast switch rules
- * were added individually
- */
- if (!num_unicast)
- return 0;
-
- /* Allocate switch rule buffer for the bulk update for unicast */
- s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
- GFP_KERNEL);
- if (!s_rule)
- return ICE_ERR_NO_MEMORY;
-
- r_iter = s_rule;
- list_for_each_entry(m_list_itr, m_list, list_entry) {
- u8 *addr = m_list_itr->fltr_info.l_data.mac.mac_addr;
-
- if (is_unicast_ether_addr(addr)) {
- m_entry = ice_find_mac_entry(hw, addr);
- if (!m_entry) {
- status = ICE_ERR_DOES_NOT_EXIST;
- goto ice_remove_mac_exit;
- }
-
- ice_fill_sw_rule(hw, &m_entry->fltr_info,
- r_iter, ice_aqc_opc_remove_sw_rules);
- r_iter = (struct ice_aqc_sw_rules_elem *)
- ((u8 *)r_iter + s_rule_size);
- }
- }
-
- /* Call AQ bulk switch rule update for all unicast addresses */
- r_iter = s_rule;
- /* Call AQ switch rule in AQ_MAX chunk */
- for (total_elem_left = num_unicast; total_elem_left > 0;
- total_elem_left -= elem_sent) {
- struct ice_aqc_sw_rules_elem *entry = r_iter;
-
- elem_sent = min(total_elem_left,
- (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
- status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
- elem_sent, ice_aqc_opc_remove_sw_rules,
- NULL);
- if (status)
- break;
- r_iter = (struct ice_aqc_sw_rules_elem *)
- ((u8 *)r_iter + s_rule_size);
- }
-
- list_for_each_entry(m_list_itr, m_list, list_entry) {
- u8 *addr = m_list_itr->fltr_info.l_data.mac.mac_addr;
-
- if (is_unicast_ether_addr(addr)) {
- m_entry = ice_find_mac_entry(hw, addr);
- if (!m_entry)
- return ICE_ERR_OUT_OF_RANGE;
- mutex_lock(&sw->mac_list_lock);
- list_del(&m_entry->list_entry);
- mutex_unlock(&sw->mac_list_lock);
- devm_kfree(ice_hw_to_dev(hw), m_entry);
- }
- }
-
-ice_remove_mac_exit:
- devm_kfree(ice_hw_to_dev(hw), s_rule);
- return status;
-}
-
-/**
- * ice_cfg_dflt_vsi - add filter rule to set/unset given VSI as default
- * VSI for the switch (represented by swid)
- * @hw: pointer to the hardware structure
- * @vsi_id: number of VSI to set as default
+ * @vsi_handle: VSI handle to set as default
* @set: true to add the above mentioned switch rule, false to remove it
* @direction: ICE_FLTR_RX or ICE_FLTR_TX
+ *
+ * add filter rule to set/unset given VSI as default VSI for the switch
+ * (represented by swid)
*/
enum ice_status
-ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_id, bool set, u8 direction)
+ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
{
struct ice_aqc_sw_rules_elem *s_rule;
struct ice_fltr_info f_info;
enum ice_adminq_opc opcode;
enum ice_status status;
u16 s_rule_size;
+ u16 hw_vsi_id;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+ hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
@@ -1654,15 +1943,17 @@ ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_id, bool set, u8 direction)
f_info.lkup_type = ICE_SW_LKUP_DFLT;
f_info.flag = direction;
f_info.fltr_act = ICE_FWD_TO_VSI;
- f_info.fwd_id.vsi_id = vsi_id;
+ f_info.fwd_id.hw_vsi_id = hw_vsi_id;
if (f_info.flag & ICE_FLTR_RX) {
f_info.src = hw->port_info->lport;
+ f_info.src_id = ICE_SRC_ID_LPORT;
if (!set)
f_info.fltr_rule_id =
hw->port_info->dflt_rx_vsi_rule_id;
} else if (f_info.flag & ICE_FLTR_TX) {
- f_info.src = vsi_id;
+ f_info.src_id = ICE_SRC_ID_VSI;
+ f_info.src = hw_vsi_id;
if (!set)
f_info.fltr_rule_id =
hw->port_info->dflt_tx_vsi_rule_id;
@@ -1682,10 +1973,10 @@ ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_id, bool set, u8 direction)
u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
if (f_info.flag & ICE_FLTR_TX) {
- hw->port_info->dflt_tx_vsi_num = vsi_id;
+ hw->port_info->dflt_tx_vsi_num = hw_vsi_id;
hw->port_info->dflt_tx_vsi_rule_id = index;
} else if (f_info.flag & ICE_FLTR_RX) {
- hw->port_info->dflt_rx_vsi_num = vsi_id;
+ hw->port_info->dflt_rx_vsi_num = hw_vsi_id;
hw->port_info->dflt_rx_vsi_rule_id = index;
}
} else {
@@ -1704,26 +1995,38 @@ out:
}
/**
- * ice_remove_vlan_internal - Remove one VLAN based filter rule
+ * ice_remove_mac - remove a MAC address based filter rule
* @hw: pointer to the hardware structure
- * @f_entry: filter entry containing one VLAN information
+ * @m_list: list of MAC addresses and forwarding information
+ *
+ * This function removes either a MAC filter rule or a specific VSI from a
+ * VSI list for a multicast MAC address.
+ *
+ * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
+ * ice_add_mac. Caller should be aware that this call will only work if all
+ * the entries passed into m_list were added previously. It will not attempt to
+ * do a partial remove of entries that were found.
*/
-static enum ice_status
-ice_remove_vlan_internal(struct ice_hw *hw,
- struct ice_fltr_list_entry *f_entry)
+enum ice_status
+ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
{
- struct ice_fltr_info *new_fltr;
- struct ice_fltr_mgmt_list_entry *v_list_elem;
- u16 vsi_id;
+ struct ice_fltr_list_entry *list_itr, *tmp;
- new_fltr = &f_entry->fltr_info;
-
- v_list_elem = ice_find_vlan_entry(hw, new_fltr->l_data.vlan.vlan_id);
- if (!v_list_elem)
+ if (!m_list)
return ICE_ERR_PARAM;
- vsi_id = f_entry->fltr_info.fwd_id.vsi_id;
- return ice_handle_rem_vsi_list_mgmt(hw, vsi_id, v_list_elem);
+ list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
+ enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
+
+ if (l_type != ICE_SW_LKUP_MAC)
+ return ICE_ERR_PARAM;
+ list_itr->status = ice_remove_rule_internal(hw,
+ ICE_SW_LKUP_MAC,
+ list_itr);
+ if (list_itr->status)
+ return list_itr->status;
+ }
+ return 0;
}
/**
@@ -1734,131 +2037,169 @@ ice_remove_vlan_internal(struct ice_hw *hw,
enum ice_status
ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
{
- struct ice_fltr_list_entry *v_list_itr;
- enum ice_status status = 0;
+ struct ice_fltr_list_entry *v_list_itr, *tmp;
if (!v_list || !hw)
return ICE_ERR_PARAM;
- list_for_each_entry(v_list_itr, v_list, list_entry) {
- status = ice_remove_vlan_internal(hw, v_list_itr);
- if (status) {
- v_list_itr->status = ICE_FLTR_STATUS_FW_FAIL;
- return status;
- }
- v_list_itr->status = ICE_FLTR_STATUS_FW_SUCCESS;
+ list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
+ enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
+
+ if (l_type != ICE_SW_LKUP_VLAN)
+ return ICE_ERR_PARAM;
+ v_list_itr->status = ice_remove_rule_internal(hw,
+ ICE_SW_LKUP_VLAN,
+ v_list_itr);
+ if (v_list_itr->status)
+ return v_list_itr->status;
}
- return status;
+ return 0;
+}
+
+/**
+ * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
+ * @fm_entry: filter entry to inspect
+ * @vsi_handle: VSI handle to compare with filter info
+ */
+static bool
+ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
+{
+ return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
+ fm_entry->fltr_info.vsi_handle == vsi_handle) ||
+ (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
+ (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
+}
+
+/**
+ * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle to remove filters from
+ * @vsi_list_head: pointer to the list to add entry to
+ * @fi: pointer to fltr_info of filter entry to copy & add
+ *
+ * Helper function, used when creating a list of filters to remove from
+ * a specific VSI. The entry added to vsi_list_head is a COPY of the
+ * original filter entry, with the exception of fltr_info.fltr_act and
+ * fltr_info.fwd_id fields. These are set such that later logic can
+ * extract which VSI to remove the fltr from, and pass on that information.
+ */
+static enum ice_status
+ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
+ struct list_head *vsi_list_head,
+ struct ice_fltr_info *fi)
+{
+ struct ice_fltr_list_entry *tmp;
+
+ /* this memory is freed up in the caller function
+ * once filters for this VSI are removed
+ */
+ tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return ICE_ERR_NO_MEMORY;
+
+ tmp->fltr_info = *fi;
+
+ /* Overwrite these fields to indicate which VSI to remove filter from,
+ * so find and remove logic can extract the information from the
+ * list entries. Note that original entries will still have proper
+ * values.
+ */
+ tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
+ tmp->fltr_info.vsi_handle = vsi_handle;
+ tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
+
+ list_add(&tmp->list_entry, vsi_list_head);
+
+ return 0;
}
/**
* ice_add_to_vsi_fltr_list - Add VSI filters to the list
* @hw: pointer to the hardware structure
- * @vsi_id: ID of VSI to remove filters from
+ * @vsi_handle: VSI handle to remove filters from
* @lkup_list_head: pointer to the list that has certain lookup type filters
- * @vsi_list_head: pointer to the list pertaining to VSI with vsi_id
+ * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
+ *
+ * Locates all filters in lkup_list_head that are used by the given VSI,
+ * and adds COPIES of those entries to vsi_list_head (intended to be used
+ * to remove the listed filters).
+ * Note that this means all entries in vsi_list_head must be explicitly
+ * deallocated by the caller when done with list.
*/
static enum ice_status
-ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_id,
+ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
struct list_head *lkup_list_head,
struct list_head *vsi_list_head)
{
struct ice_fltr_mgmt_list_entry *fm_entry;
+ enum ice_status status = 0;
/* check to make sure VSI id is valid and within boundary */
- if (vsi_id >=
- (sizeof(fm_entry->vsi_list_info->vsi_map) * BITS_PER_BYTE - 1))
+ if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
struct ice_fltr_info *fi;
fi = &fm_entry->fltr_info;
- if ((fi->fltr_act == ICE_FWD_TO_VSI &&
- fi->fwd_id.vsi_id == vsi_id) ||
- (fi->fltr_act == ICE_FWD_TO_VSI_LIST &&
- (test_bit(vsi_id, fm_entry->vsi_list_info->vsi_map)))) {
- struct ice_fltr_list_entry *tmp;
-
- /* this memory is freed up in the caller function
- * ice_remove_vsi_lkup_fltr() once filters for
- * this VSI are removed
- */
- tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp),
- GFP_KERNEL);
- if (!tmp)
- return ICE_ERR_NO_MEMORY;
+ if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
+ continue;
- memcpy(&tmp->fltr_info, fi, sizeof(*fi));
-
- /* Expected below fields to be set to ICE_FWD_TO_VSI and
- * the particular VSI id since we are only removing this
- * one VSI
- */
- if (fi->fltr_act == ICE_FWD_TO_VSI_LIST) {
- tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
- tmp->fltr_info.fwd_id.vsi_id = vsi_id;
- }
-
- list_add(&tmp->list_entry, vsi_list_head);
- }
+ status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
+ vsi_list_head, fi);
+ if (status)
+ return status;
}
- return 0;
+ return status;
}
/**
* ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
* @hw: pointer to the hardware structure
- * @vsi_id: ID of VSI to remove filters from
+ * @vsi_handle: VSI handle to remove filters from
* @lkup: switch rule filter lookup type
*/
static void
-ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_id,
+ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
enum ice_sw_lkup_type lkup)
{
struct ice_switch_info *sw = hw->switch_info;
struct ice_fltr_list_entry *fm_entry;
struct list_head remove_list_head;
+ struct list_head *rule_head;
struct ice_fltr_list_entry *tmp;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
enum ice_status status;
INIT_LIST_HEAD(&remove_list_head);
+ rule_lock = &sw->recp_list[lkup].filt_rule_lock;
+ rule_head = &sw->recp_list[lkup].filt_rules;
+ mutex_lock(rule_lock);
+ status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
+ &remove_list_head);
+ mutex_unlock(rule_lock);
+ if (status)
+ return;
+
switch (lkup) {
case ICE_SW_LKUP_MAC:
- mutex_lock(&sw->mac_list_lock);
- status = ice_add_to_vsi_fltr_list(hw, vsi_id,
- &sw->mac_list_head,
- &remove_list_head);
- mutex_unlock(&sw->mac_list_lock);
- if (!status) {
- ice_remove_mac(hw, &remove_list_head);
- goto free_fltr_list;
- }
+ ice_remove_mac(hw, &remove_list_head);
break;
case ICE_SW_LKUP_VLAN:
- mutex_lock(&sw->vlan_list_lock);
- status = ice_add_to_vsi_fltr_list(hw, vsi_id,
- &sw->vlan_list_head,
- &remove_list_head);
- mutex_unlock(&sw->vlan_list_lock);
- if (!status) {
- ice_remove_vlan(hw, &remove_list_head);
- goto free_fltr_list;
- }
+ ice_remove_vlan(hw, &remove_list_head);
break;
case ICE_SW_LKUP_MAC_VLAN:
case ICE_SW_LKUP_ETHERTYPE:
case ICE_SW_LKUP_ETHERTYPE_MAC:
case ICE_SW_LKUP_PROMISC:
- case ICE_SW_LKUP_PROMISC_VLAN:
case ICE_SW_LKUP_DFLT:
- ice_debug(hw, ICE_DBG_SW,
- "Remove filters for this lookup type hasn't been implemented yet\n");
+ case ICE_SW_LKUP_PROMISC_VLAN:
+ case ICE_SW_LKUP_LAST:
+ default:
+ ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup);
break;
}
- return;
-free_fltr_list:
list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
list_del(&fm_entry->list_entry);
devm_kfree(ice_hw_to_dev(hw), fm_entry);
@@ -1868,16 +2209,121 @@ free_fltr_list:
/**
* ice_remove_vsi_fltr - Remove all filters for a VSI
* @hw: pointer to the hardware structure
- * @vsi_id: ID of VSI to remove filters from
+ * @vsi_handle: VSI handle to remove filters from
+ */
+void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
+{
+ ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
+ ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
+ ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
+ ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
+ ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
+ ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
+ ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
+ ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
+}
+
+/**
+ * ice_replay_vsi_fltr - Replay filters for requested VSI
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: driver VSI handle
+ * @recp_id: Recipe id for which rules need to be replayed
+ * @list_head: list for which filters need to be replayed
+ *
+ * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
+ * It is required to pass valid VSI handle.
+ */
+static enum ice_status
+ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
+ struct list_head *list_head)
+{
+ struct ice_fltr_mgmt_list_entry *itr;
+ enum ice_status status = 0;
+ u16 hw_vsi_id;
+
+ if (list_empty(list_head))
+ return status;
+ hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
+
+ list_for_each_entry(itr, list_head, list_entry) {
+ struct ice_fltr_list_entry f_entry;
+
+ f_entry.fltr_info = itr->fltr_info;
+ if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
+ itr->fltr_info.vsi_handle == vsi_handle) {
+ /* update the src in case it is vsi num */
+ if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
+ f_entry.fltr_info.src = hw_vsi_id;
+ status = ice_add_rule_internal(hw, recp_id, &f_entry);
+ if (status)
+ goto end;
+ continue;
+ }
+ if (!itr->vsi_list_info ||
+ !test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
+ continue;
+ /* Clearing it so that the logic can add it back */
+ clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
+ f_entry.fltr_info.vsi_handle = vsi_handle;
+ f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
+ /* update the src in case it is vsi num */
+ if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
+ f_entry.fltr_info.src = hw_vsi_id;
+ if (recp_id == ICE_SW_LKUP_VLAN)
+ status = ice_add_vlan_internal(hw, &f_entry);
+ else
+ status = ice_add_rule_internal(hw, recp_id, &f_entry);
+ if (status)
+ goto end;
+ }
+end:
+ return status;
+}
+
+/**
+ * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: driver VSI handle
+ *
+ * Replays filters for requested VSI via vsi_handle.
*/
-void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_id)
+enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
{
- ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_MAC);
- ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_MAC_VLAN);
- ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_PROMISC);
- ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_VLAN);
- ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_DFLT);
- ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_ETHERTYPE);
- ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_ETHERTYPE_MAC);
- ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_PROMISC_VLAN);
+ struct ice_switch_info *sw = hw->switch_info;
+ enum ice_status status = 0;
+ u8 i;
+
+ for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
+ struct list_head *head;
+
+ head = &sw->recp_list[i].filt_replay_rules;
+ status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
+ if (status)
+ return status;
+ }
+ return status;
+}
+
+/**
+ * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
+ * @hw: pointer to the hw struct
+ *
+ * Deletes the filter replay rules.
+ */
+void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ u8 i;
+
+ if (!sw)
+ return;
+
+ for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
+ if (!list_empty(&sw->recp_list[i].filt_replay_rules)) {
+ struct list_head *l_head;
+
+ l_head = &sw->recp_list[i].filt_replay_rules;
+ ice_rem_sw_rule_info(hw, l_head);
+ }
+ }
}
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index 9b8ec128ee31..b88d96a1ef69 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -17,7 +17,9 @@ struct ice_vsi_ctx {
u16 vsis_unallocated;
u16 flags;
struct ice_aqc_vsi_props info;
+ struct ice_sched_vsi_info sched;
u8 alloc_from_pool;
+ u8 vf_num;
};
enum ice_sw_fwd_act_type {
@@ -39,6 +41,15 @@ enum ice_sw_lkup_type {
ICE_SW_LKUP_DFLT = 5,
ICE_SW_LKUP_ETHERTYPE_MAC = 8,
ICE_SW_LKUP_PROMISC_VLAN = 9,
+ ICE_SW_LKUP_LAST
+};
+
+/* type of filter src id */
+enum ice_src_id {
+ ICE_SRC_ID_UNKNOWN = 0,
+ ICE_SRC_ID_VSI,
+ ICE_SRC_ID_QUEUE,
+ ICE_SRC_ID_LPORT,
};
struct ice_fltr_info {
@@ -55,6 +66,7 @@ struct ice_fltr_info {
/* Source VSI for LOOKUP_TX or source port for LOOKUP_RX */
u16 src;
+ enum ice_src_id src_id;
union {
struct {
@@ -76,7 +88,10 @@ struct ice_fltr_info {
u16 ethertype;
u8 mac_addr[ETH_ALEN]; /* optional */
} ethertype_mac;
- } l_data;
+ } l_data; /* Make sure to zero out the memory of l_data before using
+ * it or only set the data associated with lookup match
+ * rest everything should be zero
+ */
/* Depending on filter action */
union {
@@ -84,12 +99,16 @@ struct ice_fltr_info {
* queue id in case of ICE_FWD_TO_QGRP.
*/
u16 q_id:11;
- u16 vsi_id:10;
+ u16 hw_vsi_id:10;
u16 vsi_list_id:10;
} fwd_id;
+ /* Sw VSI handle */
+ u16 vsi_handle;
+
/* Set to num_queues if action is ICE_FWD_TO_QGRP. This field
- * determines the range of queues the packet needs to be forwarded to
+ * determines the range of queues the packet needs to be forwarded to.
+ * Note that qgrp_size must be set to a power of 2.
*/
u8 qgrp_size;
@@ -98,29 +117,52 @@ struct ice_fltr_info {
u8 lan_en; /* Indicate if packet can be forwarded to the uplink */
};
+struct ice_sw_recipe {
+ struct list_head l_entry;
+
+ /* To protect modification of filt_rule list
+ * defined below
+ */
+ struct mutex filt_rule_lock;
+
+ /* List of type ice_fltr_mgmt_list_entry */
+ struct list_head filt_rules;
+ struct list_head filt_replay_rules;
+
+ /* linked list of type recipe_list_entry */
+ struct list_head rg_list;
+ /* linked list of type ice_sw_fv_list_entry*/
+ struct list_head fv_list;
+ struct ice_aqc_recipe_data_elem *r_buf;
+ u8 recp_count;
+ u8 root_rid;
+ u8 num_profs;
+ u8 *prof_ids;
+
+ /* recipe bitmap: what all recipes makes this recipe */
+ DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
+};
+
/* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list id */
struct ice_vsi_list_map_info {
struct list_head list_entry;
DECLARE_BITMAP(vsi_map, ICE_MAX_VSI);
u16 vsi_list_id;
-};
-
-enum ice_sw_fltr_status {
- ICE_FLTR_STATUS_NEW = 0,
- ICE_FLTR_STATUS_FW_SUCCESS,
- ICE_FLTR_STATUS_FW_FAIL,
+ /* counter to track how many rules are reusing this VSI list */
+ u16 ref_cnt;
};
struct ice_fltr_list_entry {
struct list_head list_entry;
- enum ice_sw_fltr_status status;
+ enum ice_status status;
struct ice_fltr_info fltr_info;
};
/* This defines an entry in the list that maintains MAC or VLAN membership
* to HW list mapping, since multiple VSIs can subscribe to the same MAC or
* VLAN. As an optimization the VSI list should be created only when a
- * second VSI becomes a subscriber to the VLAN address.
+ * second VSI becomes a subscriber to the same MAC address. VSI lists are always
+ * used for VLAN membership.
*/
struct ice_fltr_mgmt_list_entry {
/* back pointer to VSI list id to VSI list mapping */
@@ -138,24 +180,33 @@ struct ice_fltr_mgmt_list_entry {
/* VSI related commands */
enum ice_status
-ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
- struct ice_sq_cd *cd);
+ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
+ struct ice_sq_cd *cd);
enum ice_status
-ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
- struct ice_sq_cd *cd);
+ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
+ bool keep_vsi_alloc, struct ice_sq_cd *cd);
enum ice_status
-ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
- bool keep_vsi_alloc, struct ice_sq_cd *cd);
-
+ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
+ struct ice_sq_cd *cd);
+bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle);
+struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle);
enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw);
/* Switch/bridge related commands */
+enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw);
enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst);
enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst);
-void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_id);
+void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle);
enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);
enum ice_status ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list);
enum ice_status
-ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_id, bool set, u8 direction);
+ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction);
+
+enum ice_status ice_init_def_sw_recp(struct ice_hw *hw);
+u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle);
+bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle);
+
+enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle);
+void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw);
#endif /* _ICE_SWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 6481e3d86374..5dae968d853e 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -251,6 +251,7 @@ int ice_setup_tx_ring(struct ice_ring *tx_ring)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
+ tx_ring->tx_stats.prev_pkt = -1;
return 0;
err:
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 31bc998fe200..1d0f58bd389b 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -71,6 +71,7 @@ struct ice_txq_stats {
u64 restart_q;
u64 tx_busy;
u64 tx_linearize;
+ int prev_pkt; /* negative if no pending Tx descriptors */
};
struct ice_rxq_stats {
@@ -103,10 +104,17 @@ enum ice_rx_dtype {
#define ICE_RX_ITR ICE_IDX_ITR0
#define ICE_TX_ITR ICE_IDX_ITR1
#define ICE_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
-#define ICE_ITR_8K 0x003E
+#define ICE_ITR_8K 125
+#define ICE_ITR_20K 50
+#define ICE_DFLT_TX_ITR ICE_ITR_20K
+#define ICE_DFLT_RX_ITR ICE_ITR_20K
+/* apply ITR granularity translation to program the register. itr_gran is either
+ * 2 or 4 usecs so we need to divide by 2 first then shift by that value
+ */
+#define ITR_TO_REG(val, itr_gran) (((val) & ~ICE_ITR_DYNAMIC) >> \
+ ((itr_gran) / 2))
-/* apply ITR HW granularity translation to program the HW registers */
-#define ITR_TO_REG(val, itr_gran) (((val) & ~ICE_ITR_DYNAMIC) >> (itr_gran))
+#define ICE_DFLT_INTRL 0
/* Legacy or Advanced Mode Queue */
#define ICE_TX_ADVANCED 0
@@ -128,14 +136,6 @@ struct ice_ring {
u16 q_index; /* Queue number of ring */
u32 txq_teid; /* Added Tx queue TEID */
- /* high bit set means dynamic, use accessor routines to read/write.
- * hardware supports 2us/1us resolution for the ITR registers.
- * these values always store the USER setting, and must be converted
- * before programming to a register.
- */
- u16 rx_itr_setting;
- u16 tx_itr_setting;
-
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
@@ -172,6 +172,7 @@ struct ice_ring_container {
unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_pkts; /* total packets processed this int */
enum ice_latency_range latency_range;
+ int itr_idx; /* index in the interrupt vector */
u16 itr;
};
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 97c366e0ca59..12f9432abf11 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -18,6 +18,9 @@ static inline bool ice_is_tc_ena(u8 bitmap, u8 tc)
return test_bit(tc, (unsigned long *)&bitmap);
}
+/* Driver always calls main vsi_handle first */
+#define ICE_MAIN_VSI_HANDLE 0
+
/* debug masks - set these bits in hw->debug_mask to control output */
#define ICE_DBG_INIT BIT_ULL(1)
#define ICE_DBG_LINK BIT_ULL(4)
@@ -34,10 +37,15 @@ static inline bool ice_is_tc_ena(u8 bitmap, u8 tc)
enum ice_aq_res_ids {
ICE_NVM_RES_ID = 1,
ICE_SPD_RES_ID,
- ICE_GLOBAL_CFG_LOCK_RES_ID,
- ICE_CHANGE_LOCK_RES_ID
+ ICE_CHANGE_LOCK_RES_ID,
+ ICE_GLOBAL_CFG_LOCK_RES_ID
};
+/* FW update timeout definitions are in milliseconds */
+#define ICE_NVM_TIMEOUT 180000
+#define ICE_CHANGE_LOCK_TIMEOUT 1000
+#define ICE_GLOBAL_CFG_LOCK_TIMEOUT 3000
+
enum ice_aq_res_access_type {
ICE_RES_READ = 1,
ICE_RES_WRITE
@@ -76,6 +84,7 @@ enum ice_media_type {
enum ice_vsi_type {
ICE_VSI_PF = 0,
+ ICE_VSI_VF,
};
struct ice_link_status {
@@ -95,6 +104,15 @@ struct ice_link_status {
u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE];
};
+/* Different reset sources for which a disable queue AQ call has to be made in
+ * order to clean the TX scheduler as a part of the reset
+ */
+enum ice_disq_rst_src {
+ ICE_NO_RESET = 0,
+ ICE_VM_RESET,
+ ICE_VF_RESET,
+};
+
/* PHY info such as phy_type, etc... */
struct ice_phy_info {
struct ice_link_status link_info;
@@ -119,6 +137,9 @@ struct ice_hw_common_caps {
/* Max MTU for function or device */
u16 max_mtu;
+ /* Virtualization support */
+ u8 sr_iov_1_1; /* SR-IOV enabled */
+
/* RSS related capabilities */
u16 rss_table_size; /* 512 for PFs and 64 for VFs */
u8 rss_table_entry_width; /* RSS Entry width in bits */
@@ -127,12 +148,15 @@ struct ice_hw_common_caps {
/* Function specific capabilities */
struct ice_hw_func_caps {
struct ice_hw_common_caps common_cap;
+ u32 num_allocd_vfs; /* Number of allocated VFs */
+ u32 vf_base_id; /* Logical ID of the first VF */
u32 guaranteed_num_vsi;
};
/* Device wide capabilities */
struct ice_hw_dev_caps {
struct ice_hw_common_caps common_cap;
+ u32 num_vfs_exposed; /* Total number of VFs exposed */
u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */
};
@@ -142,11 +166,18 @@ struct ice_mac_info {
u8 perm_addr[ETH_ALEN];
};
-/* Various RESET request, These are not tied with HW reset types */
+/* Reset types used to determine which kind of reset was requested. These
+ * defines match what the RESET_TYPE field of the GLGEN_RSTAT register.
+ * ICE_RESET_PFR does not match any RESET_TYPE field in the GLGEN_RSTAT register
+ * because its reset source is different than the other types listed.
+ */
enum ice_reset_req {
- ICE_RESET_PFR = 0,
+ ICE_RESET_POR = 0,
+ ICE_RESET_INVAL = 0,
ICE_RESET_CORER = 1,
ICE_RESET_GLOBR = 2,
+ ICE_RESET_EMPR = 3,
+ ICE_RESET_PFR = 4,
};
/* Bus parameters */
@@ -180,7 +211,7 @@ struct ice_sched_node {
struct ice_sched_node **children;
struct ice_aqc_txsched_elem_data info;
u32 agg_id; /* aggregator group id */
- u16 vsi_id;
+ u16 vsi_handle;
u8 in_use; /* suspended or in use */
u8 tx_sched_layer; /* Logical Layer (1-9) */
u8 num_children;
@@ -204,6 +235,7 @@ enum ice_agg_type {
};
#define ICE_SCHED_DFLT_RL_PROF_ID 0
+#define ICE_SCHED_DFLT_BW_WT 1
/* vsi type list entry to locate corresponding vsi/ag nodes */
struct ice_sched_vsi_info {
@@ -238,8 +270,6 @@ struct ice_port_info {
struct ice_mac_info mac;
struct ice_phy_info phy;
struct mutex sched_lock; /* protect access to TXSched tree */
- struct ice_sched_tx_policy sched_policy;
- struct list_head vsi_info_list;
struct list_head agg_list; /* lists all aggregator */
u8 lport;
#define ICE_LPORT_MASK 0xff
@@ -247,19 +277,26 @@ struct ice_port_info {
};
struct ice_switch_info {
- /* Switch VSI lists to MAC/VLAN translation */
- struct mutex mac_list_lock; /* protect MAC list */
- struct list_head mac_list_head;
- struct mutex vlan_list_lock; /* protect VLAN list */
- struct list_head vlan_list_head;
- struct mutex eth_m_list_lock; /* protect ethtype list */
- struct list_head eth_m_list_head;
- struct mutex promisc_list_lock; /* protect promisc mode list */
- struct list_head promisc_list_head;
- struct mutex mac_vlan_list_lock; /* protect MAC-VLAN list */
- struct list_head mac_vlan_list_head;
-
struct list_head vsi_list_map_head;
+ struct ice_sw_recipe *recp_list;
+};
+
+/* FW logging configuration */
+struct ice_fw_log_evnt {
+ u8 cfg : 4; /* New event enables to configure */
+ u8 cur : 4; /* Current/active event enables */
+};
+
+struct ice_fw_log_cfg {
+ u8 cq_en : 1; /* FW logging is enabled via the control queue */
+ u8 uart_en : 1; /* FW logging is enabled via UART for all PFs */
+ u8 actv_evnts; /* Cumulation of currently enabled log events */
+
+#define ICE_FW_LOG_EVNT_INFO (ICE_AQC_FW_LOG_INFO_EN >> ICE_AQC_FW_LOG_EN_S)
+#define ICE_FW_LOG_EVNT_INIT (ICE_AQC_FW_LOG_INIT_EN >> ICE_AQC_FW_LOG_EN_S)
+#define ICE_FW_LOG_EVNT_FLOW (ICE_AQC_FW_LOG_FLOW_EN >> ICE_AQC_FW_LOG_EN_S)
+#define ICE_FW_LOG_EVNT_ERR (ICE_AQC_FW_LOG_ERR_EN >> ICE_AQC_FW_LOG_EN_S)
+ struct ice_fw_log_evnt evnts[ICE_AQC_FW_LOG_ID_MAX];
};
/* Port hardware description */
@@ -286,8 +323,11 @@ struct ice_hw {
u8 flattened_layers;
u8 max_cgds;
u8 sw_entry_point_layer;
+ u16 max_children[ICE_AQC_TOPO_MAX_LEVEL_NUM];
+ struct ice_vsi_ctx *vsi_ctx[ICE_MAX_VSI];
u8 evb_veb; /* true for VEB, false for VEPA */
+ u8 reset_ongoing; /* true if hw is in reset, false otherwise */
struct ice_bus_info bus;
struct ice_nvm_info nvm;
struct ice_hw_dev_caps dev_caps; /* device capabilities */
@@ -297,6 +337,7 @@ struct ice_hw {
/* Control Queue info */
struct ice_ctl_q_info adminq;
+ struct ice_ctl_q_info mailboxq;
u8 api_branch; /* API branch version */
u8 api_maj_ver; /* API major version */
@@ -308,16 +349,27 @@ struct ice_hw {
u8 fw_patch; /* firmware patch version */
u32 fw_build; /* firmware build number */
- /* minimum allowed value for different speeds */
-#define ICE_ITR_GRAN_MIN_200 1
-#define ICE_ITR_GRAN_MIN_100 1
-#define ICE_ITR_GRAN_MIN_50 2
-#define ICE_ITR_GRAN_MIN_25 4
+ struct ice_fw_log_cfg fw_log;
+
+/* Device max aggregate bandwidths corresponding to the GL_PWR_MODE_CTL
+ * register. Used for determining the itr/intrl granularity during
+ * initialization.
+ */
+#define ICE_MAX_AGG_BW_200G 0x0
+#define ICE_MAX_AGG_BW_100G 0X1
+#define ICE_MAX_AGG_BW_50G 0x2
+#define ICE_MAX_AGG_BW_25G 0x3
+ /* ITR granularity for different speeds */
+#define ICE_ITR_GRAN_ABOVE_25 2
+#define ICE_ITR_GRAN_MAX_25 4
/* ITR granularity in 1 us */
- u8 itr_gran_200;
- u8 itr_gran_100;
- u8 itr_gran_50;
- u8 itr_gran_25;
+ u8 itr_gran;
+ /* INTRL granularity for different speeds */
+#define ICE_INTRL_GRAN_ABOVE_25 4
+#define ICE_INTRL_GRAN_MAX_25 8
+ /* INTRL granularity in 1 us */
+ u8 intrl_gran;
+
u8 ucast_shared; /* true if VSIs can share unicast addr */
};
@@ -391,4 +443,7 @@ struct ice_hw_port_stats {
#define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800
#define ICE_SR_WORDS_IN_1KB 512
+/* Hash redirection LUT for VSI - maximum array size */
+#define ICE_VSIQF_HLUT_ARRAY_SIZE ((VSIQF_HLUT_MAX_INDEX + 1) * 4)
+
#endif /* _ICE_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
new file mode 100644
index 000000000000..45f10f8f01dc
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -0,0 +1,2675 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_lib.h"
+
+/**
+ * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
+ * @pf: pointer to the PF structure
+ * @v_opcode: operation code
+ * @v_retval: return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ */
+static void
+ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
+ enum ice_status v_retval, u8 *msg, u16 msglen)
+{
+ struct ice_hw *hw = &pf->hw;
+ struct ice_vf *vf = pf->vf;
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
+ /* Not all vfs are enabled so skip the ones that are not */
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
+ !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ continue;
+
+ /* Ignore return value on purpose - a given VF may fail, but
+ * we need to keep going and send to all of them
+ */
+ ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
+ msglen, NULL);
+ }
+}
+
+/**
+ * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
+ * @vf: pointer to the VF structure
+ * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
+ * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
+ * @link_up: whether or not to set the link up/down
+ */
+static void
+ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
+ int ice_link_speed, bool link_up)
+{
+ if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
+ pfe->event_data.link_event_adv.link_status = link_up;
+ /* Speed in Mbps */
+ pfe->event_data.link_event_adv.link_speed =
+ ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
+ } else {
+ pfe->event_data.link_event.link_status = link_up;
+ /* Legacy method for virtchnl link speeds */
+ pfe->event_data.link_event.link_speed =
+ (enum virtchnl_link_speed)
+ ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
+ }
+}
+
+/**
+ * ice_set_pfe_link_forced - Force the virtchnl_pf_event link speed/status
+ * @vf: pointer to the VF structure
+ * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
+ * @link_up: whether or not to set the link up/down
+ */
+static void
+ice_set_pfe_link_forced(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
+ bool link_up)
+{
+ u16 link_speed;
+
+ if (link_up)
+ link_speed = ICE_AQ_LINK_SPEED_40GB;
+ else
+ link_speed = ICE_AQ_LINK_SPEED_UNKNOWN;
+
+ ice_set_pfe_link(vf, pfe, link_speed, link_up);
+}
+
+/**
+ * ice_vc_notify_vf_link_state - Inform a VF of link status
+ * @vf: pointer to the VF structure
+ *
+ * send a link status message to a single VF
+ */
+static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
+{
+ struct virtchnl_pf_event pfe = { 0 };
+ struct ice_link_status *ls;
+ struct ice_pf *pf = vf->pf;
+ struct ice_hw *hw;
+
+ hw = &pf->hw;
+ ls = &hw->port_info->phy.link_info;
+
+ pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
+ pfe.severity = PF_EVENT_SEVERITY_INFO;
+
+ if (vf->link_forced)
+ ice_set_pfe_link_forced(vf, &pfe, vf->link_up);
+ else
+ ice_set_pfe_link(vf, &pfe, ls->link_speed, ls->link_info &
+ ICE_AQ_LINK_UP);
+
+ ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe,
+ sizeof(pfe), NULL);
+}
+
+/**
+ * ice_get_vf_vector - get VF interrupt vector register offset
+ * @vf_msix: number of MSIx vector per VF on a PF
+ * @vf_id: VF identifier
+ * @i: index of MSIx vector
+ */
+static u32 ice_get_vf_vector(int vf_msix, int vf_id, int i)
+{
+ return ((i == 0) ? VFINT_DYN_CTLN(vf_id) :
+ VFINT_DYN_CTLN(((vf_msix - 1) * (vf_id)) + (i - 1)));
+}
+
+/**
+ * ice_free_vf_res - Free a VF's resources
+ * @vf: pointer to the VF info
+ */
+static void ice_free_vf_res(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ int i, pf_vf_msix;
+
+ /* First, disable VF's configuration API to prevent OS from
+ * accessing the VF's VSI after it's freed or invalidated.
+ */
+ clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
+
+ /* free vsi & disconnect it from the parent uplink */
+ if (vf->lan_vsi_idx) {
+ ice_vsi_release(pf->vsi[vf->lan_vsi_idx]);
+ vf->lan_vsi_idx = 0;
+ vf->lan_vsi_num = 0;
+ vf->num_mac = 0;
+ }
+
+ pf_vf_msix = pf->num_vf_msix;
+ /* Disable interrupts so that VF starts in a known state */
+ for (i = 0; i < pf_vf_msix; i++) {
+ u32 reg_idx;
+
+ reg_idx = ice_get_vf_vector(pf_vf_msix, vf->vf_id, i);
+ wr32(&pf->hw, reg_idx, VFINT_DYN_CTLN_CLEARPBA_M);
+ ice_flush(&pf->hw);
+ }
+ /* reset some of the state variables keeping track of the resources */
+ clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
+ clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
+}
+
+/***********************enable_vf routines*****************************/
+
+/**
+ * ice_dis_vf_mappings
+ * @vf: pointer to the VF structure
+ */
+static void ice_dis_vf_mappings(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ int first, last, v;
+ struct ice_hw *hw;
+
+ hw = &pf->hw;
+ vsi = pf->vsi[vf->lan_vsi_idx];
+
+ wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
+ wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
+
+ first = vf->first_vector_idx;
+ last = first + pf->num_vf_msix - 1;
+ for (v = first; v <= last; v++) {
+ u32 reg;
+
+ reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
+ GLINT_VECT2FUNC_IS_PF_M) |
+ ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
+ GLINT_VECT2FUNC_PF_NUM_M));
+ wr32(hw, GLINT_VECT2FUNC(v), reg);
+ }
+
+ if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
+ wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
+ else
+ dev_err(&pf->pdev->dev,
+ "Scattered mode for VF Tx queues is not yet implemented\n");
+
+ if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
+ wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
+ else
+ dev_err(&pf->pdev->dev,
+ "Scattered mode for VF Rx queues is not yet implemented\n");
+}
+
+/**
+ * ice_free_vfs - Free all VFs
+ * @pf: pointer to the PF structure
+ */
+void ice_free_vfs(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ int tmp, i;
+
+ if (!pf->vf)
+ return;
+
+ while (test_and_set_bit(__ICE_VF_DIS, pf->state))
+ usleep_range(1000, 2000);
+
+ /* Avoid wait time by stopping all VFs at the same time */
+ for (i = 0; i < pf->num_alloc_vfs; i++) {
+ if (!test_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states))
+ continue;
+
+ /* stop rings without wait time */
+ ice_vsi_stop_tx_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
+ ICE_NO_RESET, i);
+ ice_vsi_stop_rx_rings(pf->vsi[pf->vf[i].lan_vsi_idx]);
+
+ clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states);
+ }
+
+ /* Disable IOV before freeing resources. This lets any VF drivers
+ * running in the host get themselves cleaned up before we yank
+ * the carpet out from underneath their feet.
+ */
+ if (!pci_vfs_assigned(pf->pdev))
+ pci_disable_sriov(pf->pdev);
+ else
+ dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
+
+ tmp = pf->num_alloc_vfs;
+ pf->num_vf_qps = 0;
+ pf->num_alloc_vfs = 0;
+ for (i = 0; i < tmp; i++) {
+ if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
+ /* disable VF qp mappings */
+ ice_dis_vf_mappings(&pf->vf[i]);
+
+ /* Set this state so that assigned VF vectors can be
+ * reclaimed by PF for reuse in ice_vsi_release(). No
+ * need to clear this bit since pf->vf array is being
+ * freed anyways after this for loop
+ */
+ set_bit(ICE_VF_STATE_CFG_INTR, pf->vf[i].vf_states);
+ ice_free_vf_res(&pf->vf[i]);
+ }
+ }
+
+ devm_kfree(&pf->pdev->dev, pf->vf);
+ pf->vf = NULL;
+
+ /* This check is for when the driver is unloaded while VFs are
+ * assigned. Setting the number of VFs to 0 through sysfs is caught
+ * before this function ever gets called.
+ */
+ if (!pci_vfs_assigned(pf->pdev)) {
+ int vf_id;
+
+ /* Acknowledge VFLR for all VFs. Without this, VFs will fail to
+ * work correctly when SR-IOV gets re-enabled.
+ */
+ for (vf_id = 0; vf_id < tmp; vf_id++) {
+ u32 reg_idx, bit_idx;
+
+ reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
+ bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
+ wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
+ }
+ }
+ clear_bit(__ICE_VF_DIS, pf->state);
+ clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
+}
+
+/**
+ * ice_trigger_vf_reset - Reset a VF on HW
+ * @vf: pointer to the VF structure
+ * @is_vflr: true if VFLR was issued, false if not
+ *
+ * Trigger hardware to start a reset for a particular VF. Expects the caller
+ * to wait the proper amount of time to allow hardware to reset the VF before
+ * it cleans up and restores VF functionality.
+ */
+static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr)
+{
+ struct ice_pf *pf = vf->pf;
+ u32 reg, reg_idx, bit_idx;
+ struct ice_hw *hw;
+ int vf_abs_id, i;
+
+ hw = &pf->hw;
+ vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+ /* Inform VF that it is no longer active, as a warning */
+ clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
+
+ /* Disable VF's configuration API during reset. The flag is re-enabled
+ * in ice_alloc_vf_res(), when it's safe again to access VF's VSI.
+ * It's normally disabled in ice_free_vf_res(), but it's safer
+ * to do it earlier to give some time to finish to any VF config
+ * functions that may still be running at this point.
+ */
+ clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
+
+ /* In the case of a VFLR, the HW has already reset the VF and we
+ * just need to clean up, so don't hit the VFRTRIG register.
+ */
+ if (!is_vflr) {
+ /* reset VF using VPGEN_VFRTRIG reg */
+ reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
+ reg |= VPGEN_VFRTRIG_VFSWR_M;
+ wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
+ }
+ /* clear the VFLR bit in GLGEN_VFLRSTAT */
+ reg_idx = (vf_abs_id) / 32;
+ bit_idx = (vf_abs_id) % 32;
+ wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
+ ice_flush(hw);
+
+ wr32(hw, PF_PCI_CIAA,
+ VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
+ for (i = 0; i < 100; i++) {
+ reg = rd32(hw, PF_PCI_CIAD);
+ if ((reg & VF_TRANS_PENDING_M) != 0)
+ dev_err(&pf->pdev->dev,
+ "VF %d PCI transactions stuck\n", vf->vf_id);
+ udelay(1);
+ }
+}
+
+/**
+ * ice_vsi_set_pvid - Set port VLAN id for the VSI
+ * @vsi: the VSI being changed
+ * @vid: the VLAN id to set as a PVID
+ */
+static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid)
+{
+ struct device *dev = &vsi->back->pdev->dev;
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx ctxt = { 0 };
+ enum ice_status status;
+
+ ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_TAGGED |
+ ICE_AQ_VSI_PVLAN_INSERT_PVID |
+ ICE_AQ_VSI_VLAN_EMOD_STR;
+ ctxt.info.pvid = cpu_to_le16(vid);
+ ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
+
+ status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
+ if (status) {
+ dev_info(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
+ status, hw->adminq.sq_last_status);
+ return -EIO;
+ }
+
+ vsi->info.pvid = ctxt.info.pvid;
+ vsi->info.vlan_flags = ctxt.info.vlan_flags;
+ return 0;
+}
+
+/**
+ * ice_vsi_kill_pvid - Remove port VLAN id from the VSI
+ * @vsi: the VSI being changed
+ */
+static int ice_vsi_kill_pvid(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+
+ if (ice_vsi_manage_vlan_stripping(vsi, false)) {
+ dev_err(&pf->pdev->dev, "Error removing Port VLAN on VSI %i\n",
+ vsi->vsi_num);
+ return -ENODEV;
+ }
+
+ vsi->info.pvid = 0;
+ return 0;
+}
+
+/**
+ * ice_vf_vsi_setup - Set up a VF VSI
+ * @pf: board private structure
+ * @pi: pointer to the port_info instance
+ * @vf_id: defines VF id to which this VSI connects.
+ *
+ * Returns pointer to the successfully allocated VSI struct on success,
+ * otherwise returns NULL on failure.
+ */
+static struct ice_vsi *
+ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id)
+{
+ return ice_vsi_setup(pf, pi, ICE_VSI_VF, vf_id);
+}
+
+/**
+ * ice_alloc_vsi_res - Setup VF VSI and its resources
+ * @vf: pointer to the VF structure
+ *
+ * Returns 0 on success, negative value on failure
+ */
+static int ice_alloc_vsi_res(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ LIST_HEAD(tmp_add_list);
+ u8 broadcast[ETH_ALEN];
+ struct ice_vsi *vsi;
+ int status = 0;
+
+ vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id);
+
+ if (!vsi) {
+ dev_err(&pf->pdev->dev, "Failed to create VF VSI\n");
+ return -ENOMEM;
+ }
+
+ vf->lan_vsi_idx = vsi->idx;
+ vf->lan_vsi_num = vsi->vsi_num;
+
+ /* first vector index is the VFs OICR index */
+ vf->first_vector_idx = vsi->hw_base_vector;
+ /* Since hw_base_vector holds the vector where data queue interrupts
+ * starts, increment by 1 since VFs allocated vectors include OICR intr
+ * as well.
+ */
+ vsi->hw_base_vector += 1;
+
+ /* Check if port VLAN exist before, and restore it accordingly */
+ if (vf->port_vlan_id)
+ ice_vsi_set_pvid(vsi, vf->port_vlan_id);
+
+ eth_broadcast_addr(broadcast);
+
+ status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
+ if (status)
+ goto ice_alloc_vsi_res_exit;
+
+ if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) {
+ status = ice_add_mac_to_list(vsi, &tmp_add_list,
+ vf->dflt_lan_addr.addr);
+ if (status)
+ goto ice_alloc_vsi_res_exit;
+ }
+
+ status = ice_add_mac(&pf->hw, &tmp_add_list);
+ if (status)
+ dev_err(&pf->pdev->dev, "could not add mac filters\n");
+
+ /* Clear this bit after VF initialization since we shouldn't reclaim
+ * and reassign interrupts for synchronous or asynchronous VFR events.
+ * We don't want to reconfigure interrupts since AVF driver doesn't
+ * expect vector assignment to be changed unless there is a request for
+ * more vectors.
+ */
+ clear_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states);
+ice_alloc_vsi_res_exit:
+ ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+ return status;
+}
+
+/**
+ * ice_alloc_vf_res - Allocate VF resources
+ * @vf: pointer to the VF structure
+ */
+static int ice_alloc_vf_res(struct ice_vf *vf)
+{
+ int status;
+
+ /* setup VF VSI and necessary resources */
+ status = ice_alloc_vsi_res(vf);
+ if (status)
+ goto ice_alloc_vf_res_exit;
+
+ if (vf->trusted)
+ set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+ else
+ clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+
+ /* VF is now completely initialized */
+ set_bit(ICE_VF_STATE_INIT, vf->vf_states);
+
+ return status;
+
+ice_alloc_vf_res_exit:
+ ice_free_vf_res(vf);
+ return status;
+}
+
+/**
+ * ice_ena_vf_mappings
+ * @vf: pointer to the VF structure
+ *
+ * Enable VF vectors and queues allocation by writing the details into
+ * respective registers.
+ */
+static void ice_ena_vf_mappings(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ int first, last, v;
+ struct ice_hw *hw;
+ int abs_vf_id;
+ u32 reg;
+
+ hw = &pf->hw;
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ first = vf->first_vector_idx;
+ last = (first + pf->num_vf_msix) - 1;
+ abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+ /* VF Vector allocation */
+ reg = (((first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) |
+ ((last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) |
+ VPINT_ALLOC_VALID_M);
+ wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
+
+ reg = (((first << VPINT_ALLOC_PCI_FIRST_S) & VPINT_ALLOC_PCI_FIRST_M) |
+ ((last << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) |
+ VPINT_ALLOC_PCI_VALID_M);
+ wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
+ /* map the interrupts to its functions */
+ for (v = first; v <= last; v++) {
+ reg = (((abs_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
+ GLINT_VECT2FUNC_VF_NUM_M) |
+ ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
+ GLINT_VECT2FUNC_PF_NUM_M));
+ wr32(hw, GLINT_VECT2FUNC(v), reg);
+ }
+
+ /* set regardless of mapping mode */
+ wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
+
+ /* VF Tx queues allocation */
+ if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
+ /* set the VF PF Tx queue range
+ * VFNUMQ value should be set to (number of queues - 1). A value
+ * of 0 means 1 queue and a value of 255 means 256 queues
+ */
+ reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
+ VPLAN_TX_QBASE_VFFIRSTQ_M) |
+ (((vsi->alloc_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
+ VPLAN_TX_QBASE_VFNUMQ_M));
+ wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
+ } else {
+ dev_err(&pf->pdev->dev,
+ "Scattered mode for VF Tx queues is not yet implemented\n");
+ }
+
+ /* set regardless of mapping mode */
+ wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
+
+ /* VF Rx queues allocation */
+ if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
+ /* set the VF PF Rx queue range
+ * VFNUMQ value should be set to (number of queues - 1). A value
+ * of 0 means 1 queue and a value of 255 means 256 queues
+ */
+ reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
+ VPLAN_RX_QBASE_VFFIRSTQ_M) |
+ (((vsi->alloc_txq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
+ VPLAN_RX_QBASE_VFNUMQ_M));
+ wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
+ } else {
+ dev_err(&pf->pdev->dev,
+ "Scattered mode for VF Rx queues is not yet implemented\n");
+ }
+}
+
+/**
+ * ice_determine_res
+ * @pf: pointer to the PF structure
+ * @avail_res: available resources in the PF structure
+ * @max_res: maximum resources that can be given per VF
+ * @min_res: minimum resources that can be given per VF
+ *
+ * Returns non-zero value if resources (queues/vectors) are available or
+ * returns zero if PF cannot accommodate for all num_alloc_vfs.
+ */
+static int
+ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
+{
+ bool checked_min_res = false;
+ int res;
+
+ /* start by checking if PF can assign max number of resources for
+ * all num_alloc_vfs.
+ * if yes, return number per VF
+ * If no, divide by 2 and roundup, check again
+ * repeat the loop till we reach a point where even minimum resources
+ * are not available, in that case return 0
+ */
+ res = max_res;
+ while ((res >= min_res) && !checked_min_res) {
+ int num_all_res;
+
+ num_all_res = pf->num_alloc_vfs * res;
+ if (num_all_res <= avail_res)
+ return res;
+
+ if (res == min_res)
+ checked_min_res = true;
+
+ res = DIV_ROUND_UP(res, 2);
+ }
+ return 0;
+}
+
+/**
+ * ice_check_avail_res - check if vectors and queues are available
+ * @pf: pointer to the PF structure
+ *
+ * This function is where we calculate actual number of resources for VF VSIs,
+ * we don't reserve ahead of time during probe. Returns success if vectors and
+ * queues resources are available, otherwise returns error code
+ */
+static int ice_check_avail_res(struct ice_pf *pf)
+{
+ u16 num_msix, num_txq, num_rxq;
+
+ if (!pf->num_alloc_vfs)
+ return -EINVAL;
+
+ /* Grab from HW interrupts common pool
+ * Note: By the time the user decides it needs more vectors in a VF
+ * its already too late since one must decide this prior to creating the
+ * VF interface. So the best we can do is take a guess as to what the
+ * user might want.
+ *
+ * We have two policies for vector allocation:
+ * 1. if num_alloc_vfs is from 1 to 16, then we consider this as small
+ * number of NFV VFs used for NFV appliances, since this is a special
+ * case, we try to assign maximum vectors per VF (65) as much as
+ * possible, based on determine_resources algorithm.
+ * 2. if num_alloc_vfs is from 17 to 256, then its large number of
+ * regular VFs which are not used for any special purpose. Hence try to
+ * grab default interrupt vectors (5 as supported by AVF driver).
+ */
+ if (pf->num_alloc_vfs <= 16) {
+ num_msix = ice_determine_res(pf, pf->num_avail_hw_msix,
+ ICE_MAX_INTR_PER_VF,
+ ICE_MIN_INTR_PER_VF);
+ } else if (pf->num_alloc_vfs <= ICE_MAX_VF_COUNT) {
+ num_msix = ice_determine_res(pf, pf->num_avail_hw_msix,
+ ICE_DFLT_INTR_PER_VF,
+ ICE_MIN_INTR_PER_VF);
+ } else {
+ dev_err(&pf->pdev->dev,
+ "Number of VFs %d exceeds max VF count %d\n",
+ pf->num_alloc_vfs, ICE_MAX_VF_COUNT);
+ return -EIO;
+ }
+
+ if (!num_msix)
+ return -EIO;
+
+ /* Grab from the common pool
+ * start by requesting Default queues (4 as supported by AVF driver),
+ * Note that, the main difference between queues and vectors is, latter
+ * can only be reserved at init time but queues can be requested by VF
+ * at runtime through Virtchnl, that is the reason we start by reserving
+ * few queues.
+ */
+ num_txq = ice_determine_res(pf, pf->q_left_tx, ICE_DFLT_QS_PER_VF,
+ ICE_MIN_QS_PER_VF);
+
+ num_rxq = ice_determine_res(pf, pf->q_left_rx, ICE_DFLT_QS_PER_VF,
+ ICE_MIN_QS_PER_VF);
+
+ if (!num_txq || !num_rxq)
+ return -EIO;
+
+ /* since AVF driver works with only queue pairs which means, it expects
+ * to have equal number of Rx and Tx queues, so take the minimum of
+ * available Tx or Rx queues
+ */
+ pf->num_vf_qps = min_t(int, num_txq, num_rxq);
+ pf->num_vf_msix = num_msix;
+
+ return 0;
+}
+
+/**
+ * ice_cleanup_and_realloc_vf - Clean up VF and reallocate resources after reset
+ * @vf: pointer to the VF structure
+ *
+ * Cleanup a VF after the hardware reset is finished. Expects the caller to
+ * have verified whether the reset is finished properly, and ensure the
+ * minimum amount of wait time has passed. Reallocate VF resources back to make
+ * VF state active
+ */
+static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_hw *hw;
+ u32 reg;
+
+ hw = &pf->hw;
+
+ /* PF software completes the flow by notifying VF that reset flow is
+ * completed. This is done by enabling hardware by clearing the reset
+ * bit in the VPGEN_VFRTRIG reg and setting VFR_STATE in the VFGEN_RSTAT
+ * register to VFR completed (done at the end of this function)
+ * By doing this we allow HW to access VF memory at any point. If we
+ * did it any sooner, HW could access memory while it was being freed
+ * in ice_free_vf_res(), causing an IOMMU fault.
+ *
+ * On the other hand, this needs to be done ASAP, because the VF driver
+ * is waiting for this to happen and may report a timeout. It's
+ * harmless, but it gets logged into Guest OS kernel log, so best avoid
+ * it.
+ */
+ reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
+ reg &= ~VPGEN_VFRTRIG_VFSWR_M;
+ wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
+
+ /* reallocate VF resources to finish resetting the VSI state */
+ if (!ice_alloc_vf_res(vf)) {
+ ice_ena_vf_mappings(vf);
+ set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
+ clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
+ vf->num_vlan = 0;
+ }
+
+ /* Tell the VF driver the reset is done. This needs to be done only
+ * after VF has been fully initialized, because the VF driver may
+ * request resources immediately after setting this flag.
+ */
+ wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
+}
+
+/**
+ * ice_reset_all_vfs - reset all allocated VFs in one go
+ * @pf: pointer to the PF structure
+ * @is_vflr: true if VFLR was issued, false if not
+ *
+ * First, tell the hardware to reset each VF, then do all the waiting in one
+ * chunk, and finally finish restoring each VF after the wait. This is useful
+ * during PF routines which need to reset all VFs, as otherwise it must perform
+ * these resets in a serialized fashion.
+ *
+ * Returns true if any VFs were reset, and false otherwise.
+ */
+bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
+{
+ struct ice_hw *hw = &pf->hw;
+ int v, i;
+
+ /* If we don't have any VFs, then there is nothing to reset */
+ if (!pf->num_alloc_vfs)
+ return false;
+
+ /* If VFs have been disabled, there is no need to reset */
+ if (test_and_set_bit(__ICE_VF_DIS, pf->state))
+ return false;
+
+ /* Begin reset on all VFs at once */
+ for (v = 0; v < pf->num_alloc_vfs; v++)
+ ice_trigger_vf_reset(&pf->vf[v], is_vflr);
+
+ /* Call Disable LAN Tx queue AQ call with VFR bit set and 0
+ * queues to inform Firmware about VF reset.
+ */
+ for (v = 0; v < pf->num_alloc_vfs; v++)
+ ice_dis_vsi_txq(pf->vsi[0]->port_info, 0, NULL, NULL,
+ ICE_VF_RESET, v, NULL);
+
+ /* HW requires some time to make sure it can flush the FIFO for a VF
+ * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
+ * sequence to make sure that it has completed. We'll keep track of
+ * the VFs using a simple iterator that increments once that VF has
+ * finished resetting.
+ */
+ for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
+ usleep_range(10000, 20000);
+
+ /* Check each VF in sequence */
+ while (v < pf->num_alloc_vfs) {
+ struct ice_vf *vf = &pf->vf[v];
+ u32 reg;
+
+ reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
+ if (!(reg & VPGEN_VFRSTAT_VFRD_M))
+ break;
+
+ /* If the current VF has finished resetting, move on
+ * to the next VF in sequence.
+ */
+ v++;
+ }
+ }
+
+ /* Display a warning if at least one VF didn't manage to reset in
+ * time, but continue on with the operation.
+ */
+ if (v < pf->num_alloc_vfs)
+ dev_warn(&pf->pdev->dev, "VF reset check timeout\n");
+ usleep_range(10000, 20000);
+
+ /* free VF resources to begin resetting the VSI state */
+ for (v = 0; v < pf->num_alloc_vfs; v++)
+ ice_free_vf_res(&pf->vf[v]);
+
+ if (ice_check_avail_res(pf)) {
+ dev_err(&pf->pdev->dev,
+ "Cannot allocate VF resources, try with fewer number of VFs\n");
+ return false;
+ }
+
+ /* Finish the reset on each VF */
+ for (v = 0; v < pf->num_alloc_vfs; v++)
+ ice_cleanup_and_realloc_vf(&pf->vf[v]);
+
+ ice_flush(hw);
+ clear_bit(__ICE_VF_DIS, pf->state);
+
+ return true;
+}
+
+/**
+ * ice_reset_vf - Reset a particular VF
+ * @vf: pointer to the VF structure
+ * @is_vflr: true if VFLR was issued, false if not
+ *
+ * Returns true if the VF is reset, false otherwise.
+ */
+static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_hw *hw = &pf->hw;
+ bool rsd = false;
+ u32 reg;
+ int i;
+
+ /* If the VFs have been disabled, this means something else is
+ * resetting the VF, so we shouldn't continue.
+ */
+ if (test_and_set_bit(__ICE_VF_DIS, pf->state))
+ return false;
+
+ ice_trigger_vf_reset(vf, is_vflr);
+
+ if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
+ ice_vsi_stop_tx_rings(pf->vsi[vf->lan_vsi_idx], ICE_VF_RESET,
+ vf->vf_id);
+ ice_vsi_stop_rx_rings(pf->vsi[vf->lan_vsi_idx]);
+ clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
+ } else {
+ /* Call Disable LAN Tx queue AQ call even when queues are not
+ * enabled. This is needed for successful completiom of VFR
+ */
+ ice_dis_vsi_txq(pf->vsi[vf->lan_vsi_idx]->port_info, 0,
+ NULL, NULL, ICE_VF_RESET, vf->vf_id, NULL);
+ }
+
+ /* poll VPGEN_VFRSTAT reg to make sure
+ * that reset is complete
+ */
+ for (i = 0; i < 10; i++) {
+ /* VF reset requires driver to first reset the VF and then
+ * poll the status register to make sure that the reset
+ * completed successfully.
+ */
+ usleep_range(10000, 20000);
+ reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
+ if (reg & VPGEN_VFRSTAT_VFRD_M) {
+ rsd = true;
+ break;
+ }
+ }
+
+ /* Display a warning if VF didn't manage to reset in time, but need to
+ * continue on with the operation.
+ */
+ if (!rsd)
+ dev_warn(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
+ vf->vf_id);
+
+ usleep_range(10000, 20000);
+
+ /* free VF resources to begin resetting the VSI state */
+ ice_free_vf_res(vf);
+
+ ice_cleanup_and_realloc_vf(vf);
+
+ ice_flush(hw);
+ clear_bit(__ICE_VF_DIS, pf->state);
+
+ return true;
+}
+
+/**
+ * ice_vc_notify_link_state - Inform all VFs on a PF of link status
+ * @pf: pointer to the PF structure
+ */
+void ice_vc_notify_link_state(struct ice_pf *pf)
+{
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vfs; i++)
+ ice_vc_notify_vf_link_state(&pf->vf[i]);
+}
+
+/**
+ * ice_vc_notify_reset - Send pending reset message to all VFs
+ * @pf: pointer to the PF structure
+ *
+ * indicate a pending reset to all VFs on a given PF
+ */
+void ice_vc_notify_reset(struct ice_pf *pf)
+{
+ struct virtchnl_pf_event pfe;
+
+ if (!pf->num_alloc_vfs)
+ return;
+
+ pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
+ pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
+ ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, ICE_SUCCESS,
+ (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
+}
+
+/**
+ * ice_vc_notify_vf_reset - Notify VF of a reset event
+ * @vf: pointer to the VF structure
+ */
+static void ice_vc_notify_vf_reset(struct ice_vf *vf)
+{
+ struct virtchnl_pf_event pfe;
+
+ /* validate the request */
+ if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
+ return;
+
+ /* verify if the VF is in either init or active before proceeding */
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
+ !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ return;
+
+ pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
+ pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
+ ice_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0,
+ (u8 *)&pfe, sizeof(pfe), NULL);
+}
+
+/**
+ * ice_alloc_vfs - Allocate and set up VFs resources
+ * @pf: pointer to the PF structure
+ * @num_alloc_vfs: number of VFs to allocate
+ */
+static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
+{
+ struct ice_hw *hw = &pf->hw;
+ struct ice_vf *vfs;
+ int i, ret;
+
+ /* Disable global interrupt 0 so we don't try to handle the VFLR. */
+ wr32(hw, GLINT_DYN_CTL(pf->hw_oicr_idx),
+ ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
+
+ ice_flush(hw);
+
+ ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
+ if (ret) {
+ pf->num_alloc_vfs = 0;
+ goto err_unroll_intr;
+ }
+ /* allocate memory */
+ vfs = devm_kcalloc(&pf->pdev->dev, num_alloc_vfs, sizeof(*vfs),
+ GFP_KERNEL);
+ if (!vfs) {
+ ret = -ENOMEM;
+ goto err_unroll_sriov;
+ }
+ pf->vf = vfs;
+
+ /* apply default profile */
+ for (i = 0; i < num_alloc_vfs; i++) {
+ vfs[i].pf = pf;
+ vfs[i].vf_sw_id = pf->first_sw;
+ vfs[i].vf_id = i;
+
+ /* assign default capabilities */
+ set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
+ vfs[i].spoofchk = true;
+
+ /* Set this state so that PF driver does VF vector assignment */
+ set_bit(ICE_VF_STATE_CFG_INTR, vfs[i].vf_states);
+ }
+ pf->num_alloc_vfs = num_alloc_vfs;
+
+ /* VF resources get allocated during reset */
+ if (!ice_reset_all_vfs(pf, false))
+ goto err_unroll_sriov;
+
+ goto err_unroll_intr;
+
+err_unroll_sriov:
+ pci_disable_sriov(pf->pdev);
+err_unroll_intr:
+ /* rearm interrupts here */
+ ice_irq_dynamic_ena(hw, NULL, NULL);
+ return ret;
+}
+
+/**
+ * ice_pf_state_is_nominal - checks the pf for nominal state
+ * @pf: pointer to pf to check
+ *
+ * Check the PF's state for a collection of bits that would indicate
+ * the PF is in a state that would inhibit normal operation for
+ * driver functionality.
+ *
+ * Returns true if PF is in a nominal state.
+ * Returns false otherwise
+ */
+static bool ice_pf_state_is_nominal(struct ice_pf *pf)
+{
+ DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 };
+
+ if (!pf)
+ return false;
+
+ bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS);
+ if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_pci_sriov_ena - Enable or change number of VFs
+ * @pf: pointer to the PF structure
+ * @num_vfs: number of VFs to allocate
+ */
+static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
+{
+ int pre_existing_vfs = pci_num_vf(pf->pdev);
+ struct device *dev = &pf->pdev->dev;
+ int err;
+
+ if (!ice_pf_state_is_nominal(pf)) {
+ dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
+ return -EBUSY;
+ }
+
+ if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
+ dev_err(dev, "This device is not capable of SR-IOV\n");
+ return -ENODEV;
+ }
+
+ if (pre_existing_vfs && pre_existing_vfs != num_vfs)
+ ice_free_vfs(pf);
+ else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
+ return num_vfs;
+
+ if (num_vfs > pf->num_vfs_supported) {
+ dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
+ num_vfs, pf->num_vfs_supported);
+ return -ENOTSUPP;
+ }
+
+ dev_info(dev, "Allocating %d VFs\n", num_vfs);
+ err = ice_alloc_vfs(pf, num_vfs);
+ if (err) {
+ dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
+ return err;
+ }
+
+ set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
+ return num_vfs;
+}
+
+/**
+ * ice_sriov_configure - Enable or change number of VFs via sysfs
+ * @pdev: pointer to a pci_dev structure
+ * @num_vfs: number of VFs to allocate
+ *
+ * This function is called when the user updates the number of VFs in sysfs.
+ */
+int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct ice_pf *pf = pci_get_drvdata(pdev);
+
+ if (num_vfs)
+ return ice_pci_sriov_ena(pf, num_vfs);
+
+ if (!pci_vfs_assigned(pdev)) {
+ ice_free_vfs(pf);
+ } else {
+ dev_err(&pf->pdev->dev,
+ "can't free VFs because some are assigned to VMs.\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_process_vflr_event - Free VF resources via IRQ calls
+ * @pf: pointer to the PF structure
+ *
+ * called from the VLFR IRQ handler to
+ * free up VF resources and state variables
+ */
+void ice_process_vflr_event(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ int vf_id;
+ u32 reg;
+
+ if (!test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
+ !pf->num_alloc_vfs)
+ return;
+
+ /* Re-enable the VFLR interrupt cause here, before looking for which
+ * VF got reset. Otherwise, if another VF gets a reset while the
+ * first one is being processed, that interrupt will be lost, and
+ * that VF will be stuck in reset forever.
+ */
+ reg = rd32(hw, PFINT_OICR_ENA);
+ reg |= PFINT_OICR_VFLR_M;
+ wr32(hw, PFINT_OICR_ENA, reg);
+ ice_flush(hw);
+
+ clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
+ for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
+ struct ice_vf *vf = &pf->vf[vf_id];
+ u32 reg_idx, bit_idx;
+
+ reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
+ bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
+ /* read GLGEN_VFLRSTAT register to find out the flr VFs */
+ reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
+ if (reg & BIT(bit_idx))
+ /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
+ ice_reset_vf(vf, true);
+ }
+}
+
+/**
+ * ice_vc_dis_vf - Disable a given VF via SW reset
+ * @vf: pointer to the VF info
+ *
+ * Disable the VF through a SW reset
+ */
+static void ice_vc_dis_vf(struct ice_vf *vf)
+{
+ ice_vc_notify_vf_reset(vf);
+ ice_reset_vf(vf, false);
+}
+
+/**
+ * ice_vc_send_msg_to_vf - Send message to VF
+ * @vf: pointer to the VF info
+ * @v_opcode: virtual channel opcode
+ * @v_retval: virtual channel return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * send msg to VF
+ */
+static int ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
+ enum ice_status v_retval, u8 *msg, u16 msglen)
+{
+ enum ice_status aq_ret;
+ struct ice_pf *pf;
+
+ /* validate the request */
+ if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
+ return -EINVAL;
+
+ pf = vf->pf;
+
+ /* single place to detect unsuccessful return values */
+ if (v_retval) {
+ vf->num_inval_msgs++;
+ dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
+ vf->vf_id, v_opcode, v_retval);
+ if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
+ dev_err(&pf->pdev->dev,
+ "Number of invalid messages exceeded for VF %d\n",
+ vf->vf_id);
+ dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
+ set_bit(ICE_VF_STATE_DIS, vf->vf_states);
+ return -EIO;
+ }
+ } else {
+ vf->num_valid_msgs++;
+ /* reset the invalid counter, if a valid message is received. */
+ vf->num_inval_msgs = 0;
+ }
+
+ aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
+ msg, msglen, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "Unable to send the message to VF %d aq_err %d\n",
+ vf->vf_id, pf->hw.mailboxq.sq_last_status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vc_get_ver_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to request the API version used by the PF
+ */
+static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_version_info info = {
+ VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
+ };
+
+ vf->vf_ver = *(struct virtchnl_version_info *)msg;
+ /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
+ if (VF_IS_V10(&vf->vf_ver))
+ info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
+
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, ICE_SUCCESS,
+ (u8 *)&info,
+ sizeof(struct virtchnl_version_info));
+}
+
+/**
+ * ice_vc_get_vf_res_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to request its resources
+ */
+static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_vf_resource *vfres = NULL;
+ enum ice_status aq_ret = 0;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ int len = 0;
+ int ret;
+
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto err;
+ }
+
+ len = sizeof(struct virtchnl_vf_resource);
+
+ vfres = devm_kzalloc(&pf->pdev->dev, len, GFP_KERNEL);
+ if (!vfres) {
+ aq_ret = ICE_ERR_NO_MEMORY;
+ len = 0;
+ goto err;
+ }
+ if (VF_IS_V11(&vf->vf_ver))
+ vf->driver_caps = *(u32 *)msg;
+ else
+ vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
+ VIRTCHNL_VF_OFFLOAD_RSS_REG |
+ VIRTCHNL_VF_OFFLOAD_VLAN;
+
+ vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi->info.pvid)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
+ } else {
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
+ else
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
+ }
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
+
+ if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
+
+ vfres->num_vsis = 1;
+ /* Tx and Rx queue are equal for VF */
+ vfres->num_queue_pairs = vsi->num_txq;
+ vfres->max_vectors = pf->num_vf_msix;
+ vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
+ vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
+
+ vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
+ vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
+ vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
+ ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
+ vf->dflt_lan_addr.addr);
+
+ set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
+
+err:
+ /* send the response back to the VF */
+ ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, aq_ret,
+ (u8 *)vfres, len);
+
+ devm_kfree(&pf->pdev->dev, vfres);
+ return ret;
+}
+
+/**
+ * ice_vc_reset_vf_msg
+ * @vf: pointer to the VF info
+ *
+ * called from the VF to reset itself,
+ * unlike other virtchnl messages, PF driver
+ * doesn't send the response back to the VF
+ */
+static void ice_vc_reset_vf_msg(struct ice_vf *vf)
+{
+ if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ ice_reset_vf(vf, false);
+}
+
+/**
+ * ice_find_vsi_from_id
+ * @pf: the pf structure to search for the VSI
+ * @id: id of the VSI it is searching for
+ *
+ * searches for the VSI with the given id
+ */
+static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
+{
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vsi; i++)
+ if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
+ return pf->vsi[i];
+
+ return NULL;
+}
+
+/**
+ * ice_vc_isvalid_vsi_id
+ * @vf: pointer to the VF info
+ * @vsi_id: VF relative VSI id
+ *
+ * check for the valid VSI id
+ */
+static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+
+ vsi = ice_find_vsi_from_id(pf, vsi_id);
+
+ return (vsi && (vsi->vf_id == vf->vf_id));
+}
+
+/**
+ * ice_vc_isvalid_q_id
+ * @vf: pointer to the VF info
+ * @vsi_id: VSI id
+ * @qid: VSI relative queue id
+ *
+ * check for the valid queue id
+ */
+static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
+{
+ struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
+ /* allocated Tx and Rx queues should be always equal for VF VSI */
+ return (vsi && (qid < vsi->alloc_txq));
+}
+
+/**
+ * ice_vc_config_rss_key
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS key
+ */
+static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_rss_key *vrk =
+ (struct virtchnl_rss_key *)msg;
+ struct ice_vsi *vsi = NULL;
+ enum ice_status aq_ret;
+ int ret;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_find_vsi_from_id(vf->pf, vrk->vsi_id);
+ if (!vsi) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ ret = ice_set_rss(vsi, vrk->key, NULL, 0);
+ aq_ret = ret ? ICE_ERR_PARAM : ICE_SUCCESS;
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, aq_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_config_rss_lut
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS LUT
+ */
+static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
+ struct ice_vsi *vsi = NULL;
+ enum ice_status aq_ret;
+ int ret;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_find_vsi_from_id(vf->pf, vrl->vsi_id);
+ if (!vsi) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ ret = ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE);
+ aq_ret = ret ? ICE_ERR_PARAM : ICE_SUCCESS;
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, aq_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_get_stats_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to get VSI stats
+ */
+static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_queue_select *vqs =
+ (struct virtchnl_queue_select *)msg;
+ enum ice_status aq_ret = 0;
+ struct ice_eth_stats stats;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id);
+ if (!vsi) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ memset(&stats, 0, sizeof(struct ice_eth_stats));
+ ice_update_eth_stats(vsi);
+
+ stats = vsi->eth_stats;
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
+ (u8 *)&stats, sizeof(stats));
+}
+
+/**
+ * ice_vc_ena_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to enable all or specific queue(s)
+ */
+static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_queue_select *vqs =
+ (struct virtchnl_queue_select *)msg;
+ enum ice_status aq_ret = 0;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!vqs->rx_queues && !vqs->tx_queues) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id);
+ if (!vsi) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Enable only Rx rings, Tx rings were enabled by the FW when the
+ * Tx queue group list was configured and the context bits were
+ * programmed using ice_vsi_cfg_txqs
+ */
+ if (ice_vsi_start_rx_rings(vsi))
+ aq_ret = ICE_ERR_PARAM;
+
+ /* Set flag to indicate that queues are enabled */
+ if (!aq_ret)
+ set_bit(ICE_VF_STATE_ENA, vf->vf_states);
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, aq_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_dis_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to disable all or specific
+ * queue(s)
+ */
+static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_queue_select *vqs =
+ (struct virtchnl_queue_select *)msg;
+ enum ice_status aq_ret = 0;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
+ !test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!vqs->rx_queues && !vqs->tx_queues) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id);
+ if (!vsi) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, vf->vf_id)) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to stop tx rings on VSI %d\n",
+ vsi->vsi_num);
+ aq_ret = ICE_ERR_PARAM;
+ }
+
+ if (ice_vsi_stop_rx_rings(vsi)) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to stop rx rings on VSI %d\n",
+ vsi->vsi_num);
+ aq_ret = ICE_ERR_PARAM;
+ }
+
+ /* Clear enabled queues flag */
+ if (!aq_ret)
+ clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, aq_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_cfg_irq_map_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure the IRQ to queue map
+ */
+static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_irq_map_info *irqmap_info =
+ (struct virtchnl_irq_map_info *)msg;
+ u16 vsi_id, vsi_q_id, vector_id;
+ struct virtchnl_vector_map *map;
+ struct ice_vsi *vsi = NULL;
+ struct ice_pf *pf = vf->pf;
+ enum ice_status aq_ret = 0;
+ unsigned long qmap;
+ int i;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < irqmap_info->num_vectors; i++) {
+ map = &irqmap_info->vecmap[i];
+
+ vector_id = map->vector_id;
+ vsi_id = map->vsi_id;
+ /* validate msg params */
+ if (!(vector_id < pf->hw.func_caps.common_cap
+ .num_msix_vectors) || !ice_vc_isvalid_vsi_id(vf, vsi_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
+ if (!vsi) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* lookout for the invalid queue index */
+ qmap = map->rxq_map;
+ for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
+ struct ice_q_vector *q_vector;
+
+ if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+ q_vector = vsi->q_vectors[i];
+ q_vector->num_ring_rx++;
+ q_vector->rx.itr_idx = map->rxitr_idx;
+ vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
+ }
+
+ qmap = map->txq_map;
+ for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
+ struct ice_q_vector *q_vector;
+
+ if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+ q_vector = vsi->q_vectors[i];
+ q_vector->num_ring_tx++;
+ q_vector->tx.itr_idx = map->txitr_idx;
+ vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
+ }
+ }
+
+ if (vsi)
+ ice_vsi_cfg_msix(vsi);
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, aq_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_cfg_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure the Rx/Tx queues
+ */
+static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_vsi_queue_config_info *qci =
+ (struct virtchnl_vsi_queue_config_info *)msg;
+ struct virtchnl_queue_pair_info *qpi;
+ enum ice_status aq_ret = 0;
+ struct ice_vsi *vsi;
+ int i;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_find_vsi_from_id(vf->pf, qci->vsi_id);
+ if (!vsi) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < qci->num_queue_pairs; i++) {
+ qpi = &qci->qpair[i];
+ if (qpi->txq.vsi_id != qci->vsi_id ||
+ qpi->rxq.vsi_id != qci->vsi_id ||
+ qpi->rxq.queue_id != qpi->txq.queue_id ||
+ !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+ /* copy Tx queue info from VF into VSI */
+ vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
+ vsi->tx_rings[i]->count = qpi->txq.ring_len;
+ /* copy Rx queue info from VF into vsi */
+ vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
+ vsi->rx_rings[i]->count = qpi->rxq.ring_len;
+ if (qpi->rxq.databuffer_size > ((16 * 1024) - 128)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+ vsi->rx_buf_len = qpi->rxq.databuffer_size;
+ if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
+ qpi->rxq.max_pkt_size < 64) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+ vsi->max_frame = qpi->rxq.max_pkt_size;
+ }
+
+ /* VF can request to configure less than allocated queues
+ * or default allocated queues. So update the VSI with new number
+ */
+ vsi->num_txq = qci->num_queue_pairs;
+ vsi->num_rxq = qci->num_queue_pairs;
+
+ if (!ice_vsi_cfg_txqs(vsi) && !ice_vsi_cfg_rxqs(vsi))
+ aq_ret = 0;
+ else
+ aq_ret = ICE_ERR_PARAM;
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, aq_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_is_vf_trusted
+ * @vf: pointer to the VF info
+ */
+static bool ice_is_vf_trusted(struct ice_vf *vf)
+{
+ return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+}
+
+/**
+ * ice_can_vf_change_mac
+ * @vf: pointer to the VF info
+ *
+ * Return true if the VF is allowed to change its MAC filters, false otherwise
+ */
+static bool ice_can_vf_change_mac(struct ice_vf *vf)
+{
+ /* If the VF MAC address has been set administratively (via the
+ * ndo_set_vf_mac command), then deny permission to the VF to
+ * add/delete unicast MAC addresses, unless the VF is trusted
+ */
+ if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_vc_handle_mac_addr_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @set: true if mac filters are being set, false otherwise
+ *
+ * add guest mac address filter
+ */
+static int
+ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
+{
+ struct virtchnl_ether_addr_list *al =
+ (struct virtchnl_ether_addr_list *)msg;
+ struct ice_pf *pf = vf->pf;
+ enum virtchnl_ops vc_op;
+ enum ice_status ret;
+ LIST_HEAD(mac_list);
+ struct ice_vsi *vsi;
+ int mac_count = 0;
+ int i;
+
+ if (set)
+ vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
+ else
+ vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
+ !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
+ ret = ICE_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+
+ if (set && !ice_is_vf_trusted(vf) &&
+ (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
+ dev_err(&pf->pdev->dev,
+ "Can't add more MAC addresses, because VF is not trusted, switch the VF to trusted mode in order to add more functionalities\n");
+ ret = ICE_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+
+ for (i = 0; i < al->num_elements; i++) {
+ u8 *maddr = al->list[i].addr;
+
+ if (ether_addr_equal(maddr, vf->dflt_lan_addr.addr) ||
+ is_broadcast_ether_addr(maddr)) {
+ if (set) {
+ /* VF is trying to add filters that the PF
+ * already added. Just continue.
+ */
+ dev_info(&pf->pdev->dev,
+ "mac %pM already set for VF %d\n",
+ maddr, vf->vf_id);
+ continue;
+ } else {
+ /* VF can't remove dflt_lan_addr/bcast mac */
+ dev_err(&pf->pdev->dev,
+ "can't remove mac %pM for VF %d\n",
+ maddr, vf->vf_id);
+ ret = ICE_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+ }
+
+ /* check for the invalid cases and bail if necessary */
+ if (is_zero_ether_addr(maddr)) {
+ dev_err(&pf->pdev->dev,
+ "invalid mac %pM provided for VF %d\n",
+ maddr, vf->vf_id);
+ ret = ICE_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+
+ if (is_unicast_ether_addr(maddr) &&
+ !ice_can_vf_change_mac(vf)) {
+ dev_err(&pf->pdev->dev,
+ "can't change unicast mac for untrusted VF %d\n",
+ vf->vf_id);
+ ret = ICE_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+
+ /* get here if maddr is multicast or if VF can change mac */
+ if (ice_add_mac_to_list(vsi, &mac_list, al->list[i].addr)) {
+ ret = ICE_ERR_NO_MEMORY;
+ goto handle_mac_exit;
+ }
+ mac_count++;
+ }
+
+ /* program the updated filter list */
+ if (set)
+ ret = ice_add_mac(&pf->hw, &mac_list);
+ else
+ ret = ice_remove_mac(&pf->hw, &mac_list);
+
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "can't update mac filters for VF %d, error %d\n",
+ vf->vf_id, ret);
+ } else {
+ if (set)
+ vf->num_mac += mac_count;
+ else
+ vf->num_mac -= mac_count;
+ }
+
+handle_mac_exit:
+ ice_free_fltr_list(&pf->pdev->dev, &mac_list);
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, vc_op, ret, NULL, 0);
+}
+
+/**
+ * ice_vc_add_mac_addr_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * add guest MAC address filter
+ */
+static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
+{
+ return ice_vc_handle_mac_addr_msg(vf, msg, true);
+}
+
+/**
+ * ice_vc_del_mac_addr_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * remove guest MAC address filter
+ */
+static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
+{
+ return ice_vc_handle_mac_addr_msg(vf, msg, false);
+}
+
+/**
+ * ice_vc_request_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * VFs get a default number of queues but can use this message to request a
+ * different number. If the request is successful, PF will reset the VF and
+ * return 0. If unsuccessful, PF will send message informing VF of number of
+ * available queue pairs via virtchnl message response to VF.
+ */
+static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_vf_res_request *vfres =
+ (struct virtchnl_vf_res_request *)msg;
+ int req_queues = vfres->num_queue_pairs;
+ enum ice_status aq_ret = 0;
+ struct ice_pf *pf = vf->pf;
+ int tx_rx_queue_left;
+ int cur_queues;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ cur_queues = pf->num_vf_qps;
+ tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx);
+ if (req_queues <= 0) {
+ dev_err(&pf->pdev->dev,
+ "VF %d tried to request %d queues. Ignoring.\n",
+ vf->vf_id, req_queues);
+ } else if (req_queues > ICE_MAX_QS_PER_VF) {
+ dev_err(&pf->pdev->dev,
+ "VF %d tried to request more than %d queues.\n",
+ vf->vf_id, ICE_MAX_QS_PER_VF);
+ vfres->num_queue_pairs = ICE_MAX_QS_PER_VF;
+ } else if (req_queues - cur_queues > tx_rx_queue_left) {
+ dev_warn(&pf->pdev->dev,
+ "VF %d requested %d more queues, but only %d left.\n",
+ vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
+ vfres->num_queue_pairs = tx_rx_queue_left + cur_queues;
+ } else {
+ /* request is successful, then reset VF */
+ vf->num_req_qs = req_queues;
+ ice_vc_dis_vf(vf);
+ dev_info(&pf->pdev->dev,
+ "VF %d granted request of %d queues.\n",
+ vf->vf_id, req_queues);
+ return 0;
+ }
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
+ aq_ret, (u8 *)vfres, sizeof(*vfres));
+}
+
+/**
+ * ice_set_vf_port_vlan
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @vlan_id: VLAN id being set
+ * @qos: priority setting
+ * @vlan_proto: VLAN protocol
+ *
+ * program VF Port VLAN id and/or qos
+ */
+int
+ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
+ __be16 vlan_proto)
+{
+ u16 vlanprio = vlan_id | (qos << ICE_VLAN_PRIORITY_S);
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+ struct ice_vsi *vsi;
+ struct ice_vf *vf;
+ int ret = 0;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "invalid VF id: %d\n", vf_id);
+ return -EINVAL;
+ }
+
+ if (vlan_id > ICE_MAX_VLANID || qos > 7) {
+ dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
+ return -EINVAL;
+ }
+
+ if (vlan_proto != htons(ETH_P_8021Q)) {
+ dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
+ return -EPROTONOSUPPORT;
+ }
+
+ vf = &pf->vf[vf_id];
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
+ return -EBUSY;
+ }
+
+ if (le16_to_cpu(vsi->info.pvid) == vlanprio) {
+ /* duplicate request, so just return success */
+ dev_info(&pf->pdev->dev,
+ "Duplicate pvid %d request\n", vlanprio);
+ return ret;
+ }
+
+ /* If pvid, then remove all filters on the old VLAN */
+ if (vsi->info.pvid)
+ ice_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
+ VLAN_VID_MASK));
+
+ if (vlan_id || qos) {
+ ret = ice_vsi_set_pvid(vsi, vlanprio);
+ if (ret)
+ goto error_set_pvid;
+ } else {
+ ice_vsi_kill_pvid(vsi);
+ }
+
+ if (vlan_id) {
+ dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
+ vlan_id, qos, vf_id);
+
+ /* add new VLAN filter for each MAC */
+ ret = ice_vsi_add_vlan(vsi, vlan_id);
+ if (ret)
+ goto error_set_pvid;
+ }
+
+ /* The Port VLAN needs to be saved across resets the same as the
+ * default LAN MAC address.
+ */
+ vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
+
+error_set_pvid:
+ return ret;
+}
+
+/**
+ * ice_vc_process_vlan_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @add_v: Add VLAN if true, otherwise delete VLAN
+ *
+ * Process virtchnl op to add or remove programmed guest VLAN id
+ */
+static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
+{
+ struct virtchnl_vlan_filter_list *vfl =
+ (struct virtchnl_vlan_filter_list *)msg;
+ enum ice_status aq_ret = 0;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ int i;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (add_v && !ice_is_vf_trusted(vf) &&
+ vf->num_vlan >= ICE_MAX_VLAN_PER_VF) {
+ dev_info(&pf->pdev->dev,
+ "VF is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n");
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < vfl->num_elements; i++) {
+ if (vfl->vlan_id[i] > ICE_MAX_VLANID) {
+ aq_ret = ICE_ERR_PARAM;
+ dev_err(&pf->pdev->dev,
+ "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
+ goto error_param;
+ }
+ }
+
+ vsi = ice_find_vsi_from_id(vf->pf, vfl->vsi_id);
+ if (!vsi) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vsi->info.pvid) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (ice_vsi_manage_vlan_stripping(vsi, add_v)) {
+ dev_err(&pf->pdev->dev,
+ "%sable VLAN stripping failed for VSI %i\n",
+ add_v ? "en" : "dis", vsi->vsi_num);
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (add_v) {
+ for (i = 0; i < vfl->num_elements; i++) {
+ u16 vid = vfl->vlan_id[i];
+
+ if (!ice_vsi_add_vlan(vsi, vid)) {
+ vf->num_vlan++;
+ set_bit(vid, vsi->active_vlans);
+
+ /* Enable VLAN pruning when VLAN 0 is added */
+ if (unlikely(!vid))
+ if (ice_cfg_vlan_pruning(vsi, true))
+ aq_ret = ICE_ERR_PARAM;
+ } else {
+ aq_ret = ICE_ERR_PARAM;
+ }
+ }
+ } else {
+ for (i = 0; i < vfl->num_elements; i++) {
+ u16 vid = vfl->vlan_id[i];
+
+ /* Make sure ice_vsi_kill_vlan is successful before
+ * updating VLAN information
+ */
+ if (!ice_vsi_kill_vlan(vsi, vid)) {
+ vf->num_vlan--;
+ clear_bit(vid, vsi->active_vlans);
+
+ /* Disable VLAN pruning when removing VLAN 0 */
+ if (unlikely(!vid))
+ ice_cfg_vlan_pruning(vsi, false);
+ }
+ }
+ }
+
+error_param:
+ /* send the response to the VF */
+ if (add_v)
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret,
+ NULL, 0);
+ else
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_add_vlan_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Add and program guest VLAN id
+ */
+static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
+{
+ return ice_vc_process_vlan_msg(vf, msg, true);
+}
+
+/**
+ * ice_vc_remove_vlan_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * remove programmed guest VLAN id
+ */
+static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
+{
+ return ice_vc_process_vlan_msg(vf, msg, false);
+}
+
+/**
+ * ice_vc_ena_vlan_stripping
+ * @vf: pointer to the VF info
+ *
+ * Enable VLAN header stripping for a given VF
+ */
+static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
+{
+ enum ice_status aq_ret = 0;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (ice_vsi_manage_vlan_stripping(vsi, true))
+ aq_ret = ICE_ERR_AQ_ERROR;
+
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
+ aq_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_dis_vlan_stripping
+ * @vf: pointer to the VF info
+ *
+ * Disable VLAN header stripping for a given VF
+ */
+static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
+{
+ enum ice_status aq_ret = 0;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (ice_vsi_manage_vlan_stripping(vsi, false))
+ aq_ret = ICE_ERR_AQ_ERROR;
+
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
+ aq_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_process_vf_msg - Process request from VF
+ * @pf: pointer to the PF structure
+ * @event: pointer to the AQ event
+ *
+ * called from the common asq/arq handler to
+ * process request from VF
+ */
+void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
+{
+ u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
+ s16 vf_id = le16_to_cpu(event->desc.retval);
+ u16 msglen = event->msg_len;
+ u8 *msg = event->msg_buf;
+ struct ice_vf *vf = NULL;
+ int err = 0;
+
+ if (vf_id >= pf->num_alloc_vfs) {
+ err = -EINVAL;
+ goto error_handler;
+ }
+
+ vf = &pf->vf[vf_id];
+
+ /* Check if VF is disabled. */
+ if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
+ err = -EPERM;
+ goto error_handler;
+ }
+
+ /* Perform basic checks on the msg */
+ err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
+ if (err) {
+ if (err == VIRTCHNL_ERR_PARAM)
+ err = -EPERM;
+ else
+ err = -EINVAL;
+ goto error_handler;
+ }
+
+ /* Perform additional checks specific to RSS and Virtchnl */
+ if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) {
+ struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg;
+
+ if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE)
+ err = -EINVAL;
+ } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) {
+ struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
+
+ if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE)
+ err = -EINVAL;
+ }
+
+error_handler:
+ if (err) {
+ ice_vc_send_msg_to_vf(vf, v_opcode, ICE_ERR_PARAM, NULL, 0);
+ dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
+ vf_id, v_opcode, msglen, err);
+ return;
+ }
+
+ switch (v_opcode) {
+ case VIRTCHNL_OP_VERSION:
+ err = ice_vc_get_ver_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
+ err = ice_vc_get_vf_res_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_RESET_VF:
+ ice_vc_reset_vf_msg(vf);
+ break;
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
+ err = ice_vc_add_mac_addr_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_DEL_ETH_ADDR:
+ err = ice_vc_del_mac_addr_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ err = ice_vc_cfg_qs_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_ENABLE_QUEUES:
+ err = ice_vc_ena_qs_msg(vf, msg);
+ ice_vc_notify_vf_link_state(vf);
+ break;
+ case VIRTCHNL_OP_DISABLE_QUEUES:
+ err = ice_vc_dis_qs_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_REQUEST_QUEUES:
+ err = ice_vc_request_qs_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ err = ice_vc_cfg_irq_map_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_KEY:
+ err = ice_vc_config_rss_key(vf, msg);
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_LUT:
+ err = ice_vc_config_rss_lut(vf, msg);
+ break;
+ case VIRTCHNL_OP_GET_STATS:
+ err = ice_vc_get_stats_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_ADD_VLAN:
+ err = ice_vc_add_vlan_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_DEL_VLAN:
+ err = ice_vc_remove_vlan_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ err = ice_vc_ena_vlan_stripping(vf);
+ break;
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+ err = ice_vc_dis_vlan_stripping(vf);
+ break;
+ case VIRTCHNL_OP_UNKNOWN:
+ default:
+ dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
+ v_opcode, vf_id);
+ err = ice_vc_send_msg_to_vf(vf, v_opcode, ICE_ERR_NOT_IMPL,
+ NULL, 0);
+ break;
+ }
+ if (err) {
+ /* Helper function cares less about error return values here
+ * as it is busy with pending work.
+ */
+ dev_info(&pf->pdev->dev,
+ "PF failed to honor VF %d, opcode %d\n, error %d\n",
+ vf_id, v_opcode, err);
+ }
+}
+
+/**
+ * ice_get_vf_cfg
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @ivi: VF configuration structure
+ *
+ * return VF configuration
+ */
+int ice_get_vf_cfg(struct net_device *netdev, int vf_id,
+ struct ifla_vf_info *ivi)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ struct ice_vf *vf;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ netdev_err(netdev, "invalid VF id: %d\n", vf_id);
+ return -EINVAL;
+ }
+
+ vf = &pf->vf[vf_id];
+ vsi = pf->vsi[vf->lan_vsi_idx];
+
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
+ return -EBUSY;
+ }
+
+ ivi->vf = vf_id;
+ ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr);
+
+ /* VF configuration for VLAN and applicable QoS */
+ ivi->vlan = le16_to_cpu(vsi->info.pvid) & ICE_VLAN_M;
+ ivi->qos = (le16_to_cpu(vsi->info.pvid) & ICE_PRIORITY_M) >>
+ ICE_VLAN_PRIORITY_S;
+
+ ivi->trusted = vf->trusted;
+ ivi->spoofchk = vf->spoofchk;
+ if (!vf->link_forced)
+ ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
+ else if (vf->link_up)
+ ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+ else
+ ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
+ ivi->max_tx_rate = vf->tx_rate;
+ ivi->min_tx_rate = 0;
+ return 0;
+}
+
+/**
+ * ice_set_vf_spoofchk
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @ena: flag to enable or disable feature
+ *
+ * Enable or disable VF spoof checking
+ */
+int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi_ctx ctx = { 0 };
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ struct ice_vf *vf;
+ int status;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ netdev_err(netdev, "invalid VF id: %d\n", vf_id);
+ return -EINVAL;
+ }
+
+ vf = &pf->vf[vf_id];
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
+ return -EBUSY;
+ }
+
+ if (ena == vf->spoofchk) {
+ dev_dbg(&pf->pdev->dev, "VF spoofchk already %s\n",
+ ena ? "ON" : "OFF");
+ return 0;
+ }
+
+ ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
+
+ if (ena) {
+ ctx.info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
+ ctx.info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M;
+ }
+
+ status = ice_update_vsi(&pf->hw, vsi->idx, &ctx, NULL);
+ if (status) {
+ dev_dbg(&pf->pdev->dev,
+ "Error %d, failed to update VSI* parameters\n", status);
+ return -EIO;
+ }
+
+ vf->spoofchk = ena;
+ vsi->info.sec_flags = ctx.info.sec_flags;
+ vsi->info.sw_flags2 = ctx.info.sw_flags2;
+
+ return status;
+}
+
+/**
+ * ice_set_vf_mac
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @mac: mac address
+ *
+ * program VF mac address
+ */
+int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ struct ice_vf *vf;
+ int ret = 0;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ netdev_err(netdev, "invalid VF id: %d\n", vf_id);
+ return -EINVAL;
+ }
+
+ vf = &pf->vf[vf_id];
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
+ return -EBUSY;
+ }
+
+ if (is_zero_ether_addr(mac) || is_multicast_ether_addr(mac)) {
+ netdev_err(netdev, "%pM not a valid unicast address\n", mac);
+ return -EINVAL;
+ }
+
+ /* copy mac into dflt_lan_addr and trigger a VF reset. The reset
+ * flow will use the updated dflt_lan_addr and add a MAC filter
+ * using ice_add_mac. Also set pf_set_mac to indicate that the PF has
+ * set the MAC address for this VF.
+ */
+ ether_addr_copy(vf->dflt_lan_addr.addr, mac);
+ vf->pf_set_mac = true;
+ netdev_info(netdev,
+ "mac on VF %d set to %pM\n. VF driver will be reinitialized\n",
+ vf_id, mac);
+
+ ice_vc_dis_vf(vf);
+ return ret;
+}
+
+/**
+ * ice_set_vf_trust
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @trusted: Boolean value to enable/disable trusted VF
+ *
+ * Enable or disable a given VF as trusted
+ */
+int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ struct ice_vf *vf;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "invalid VF id: %d\n", vf_id);
+ return -EINVAL;
+ }
+
+ vf = &pf->vf[vf_id];
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
+ return -EBUSY;
+ }
+
+ /* Check if already trusted */
+ if (trusted == vf->trusted)
+ return 0;
+
+ vf->trusted = trusted;
+ ice_vc_dis_vf(vf);
+ dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
+ vf_id, trusted ? "" : "un");
+
+ return 0;
+}
+
+/**
+ * ice_set_vf_link_state
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @link_state: required link state
+ *
+ * Set VF's link state, irrespective of physical link state status
+ */
+int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+ struct virtchnl_pf_event pfe = { 0 };
+ struct ice_link_status *ls;
+ struct ice_vf *vf;
+ struct ice_hw *hw;
+
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+ return -EINVAL;
+ }
+
+ vf = &pf->vf[vf_id];
+ hw = &pf->hw;
+ ls = &pf->hw.port_info->phy.link_info;
+
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ dev_err(&pf->pdev->dev, "vf %d in reset. Try again.\n", vf_id);
+ return -EBUSY;
+ }
+
+ pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
+ pfe.severity = PF_EVENT_SEVERITY_INFO;
+
+ switch (link_state) {
+ case IFLA_VF_LINK_STATE_AUTO:
+ vf->link_forced = false;
+ vf->link_up = ls->link_info & ICE_AQ_LINK_UP;
+ break;
+ case IFLA_VF_LINK_STATE_ENABLE:
+ vf->link_forced = true;
+ vf->link_up = true;
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ vf->link_forced = true;
+ vf->link_up = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (vf->link_forced)
+ ice_set_pfe_link_forced(vf, &pfe, vf->link_up);
+ else
+ ice_set_pfe_link(vf, &pfe, ls->link_speed, vf->link_up);
+
+ /* Notify the VF of its new link state */
+ ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe,
+ sizeof(pfe), NULL);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
new file mode 100644
index 000000000000..10131e0180f9
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Intel Corporation. */
+
+#ifndef _ICE_VIRTCHNL_PF_H_
+#define _ICE_VIRTCHNL_PF_H_
+#include "ice.h"
+
+#define ICE_MAX_VLANID 4095
+#define ICE_VLAN_PRIORITY_S 12
+#define ICE_VLAN_M 0xFFF
+#define ICE_PRIORITY_M 0x7000
+
+/* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */
+#define ICE_MAX_VLAN_PER_VF 8
+#define ICE_MAX_MACADDR_PER_VF 12
+
+/* Malicious Driver Detection */
+#define ICE_DFLT_NUM_MDD_EVENTS_ALLOWED 3
+#define ICE_DFLT_NUM_INVAL_MSGS_ALLOWED 10
+
+/* Static VF transaction/status register def */
+#define VF_DEVICE_STATUS 0xAA
+#define VF_TRANS_PENDING_M 0x20
+
+/* Specific VF states */
+enum ice_vf_states {
+ ICE_VF_STATE_INIT = 0,
+ ICE_VF_STATE_ACTIVE,
+ ICE_VF_STATE_ENA,
+ ICE_VF_STATE_DIS,
+ ICE_VF_STATE_MC_PROMISC,
+ ICE_VF_STATE_UC_PROMISC,
+ /* state to indicate if PF needs to do vector assignment for VF.
+ * This needs to be set during first time VF initialization or later
+ * when VF asks for more Vectors through virtchnl OP.
+ */
+ ICE_VF_STATE_CFG_INTR,
+ ICE_VF_STATES_NBITS
+};
+
+/* VF capabilities */
+enum ice_virtchnl_cap {
+ ICE_VIRTCHNL_VF_CAP_L2 = 0,
+ ICE_VIRTCHNL_VF_CAP_PRIVILEGE,
+};
+
+/* VF information structure */
+struct ice_vf {
+ struct ice_pf *pf;
+
+ s16 vf_id; /* VF id in the PF space */
+ u32 driver_caps; /* reported by VF driver */
+ int first_vector_idx; /* first vector index of this VF */
+ struct ice_sw *vf_sw_id; /* switch id the VF VSIs connect to */
+ struct virtchnl_version_info vf_ver;
+ struct virtchnl_ether_addr dflt_lan_addr;
+ u16 port_vlan_id;
+ u8 pf_set_mac; /* VF MAC address set by VMM admin */
+ u8 trusted;
+ u16 lan_vsi_idx; /* index into PF struct */
+ u16 lan_vsi_num; /* ID as used by firmware */
+ u64 num_mdd_events; /* number of mdd events detected */
+ u64 num_inval_msgs; /* number of continuous invalid msgs */
+ u64 num_valid_msgs; /* number of valid msgs detected */
+ unsigned long vf_caps; /* vf's adv. capabilities */
+ DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
+ unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
+ u8 link_forced;
+ u8 link_up; /* only valid if VF link is forced */
+ u8 spoofchk;
+ u16 num_mac;
+ u16 num_vlan;
+ u8 num_req_qs; /* num of queue pairs requested by VF */
+};
+
+#ifdef CONFIG_PCI_IOV
+void ice_process_vflr_event(struct ice_pf *pf);
+int ice_sriov_configure(struct pci_dev *pdev, int num_vfs);
+int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
+int ice_get_vf_cfg(struct net_device *netdev, int vf_id,
+ struct ifla_vf_info *ivi);
+
+void ice_free_vfs(struct ice_pf *pf);
+void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event);
+void ice_vc_notify_link_state(struct ice_pf *pf);
+void ice_vc_notify_reset(struct ice_pf *pf);
+bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr);
+
+int ice_set_vf_port_vlan(struct net_device *netdev, int vf_id,
+ u16 vlan_id, u8 qos, __be16 vlan_proto);
+
+int ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+ int max_tx_rate);
+
+int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted);
+
+int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state);
+
+int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena);
+#else /* CONFIG_PCI_IOV */
+#define ice_process_vflr_event(pf) do {} while (0)
+#define ice_free_vfs(pf) do {} while (0)
+#define ice_vc_process_vf_msg(pf, event) do {} while (0)
+#define ice_vc_notify_link_state(pf) do {} while (0)
+#define ice_vc_notify_reset(pf) do {} while (0)
+
+static inline bool
+ice_reset_all_vfs(struct ice_pf __always_unused *pf,
+ bool __always_unused is_vflr)
+{
+ return true;
+}
+
+static inline int
+ice_sriov_configure(struct pci_dev __always_unused *pdev,
+ int __always_unused num_vfs)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_mac(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, u8 __always_unused *mac)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_get_vf_cfg(struct net_device __always_unused *netdev,
+ int __always_unused vf_id,
+ struct ifla_vf_info __always_unused *ivi)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_trust(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, bool __always_unused trusted)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_port_vlan(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, u16 __always_unused vid,
+ u8 __always_unused qos, __be16 __always_unused v_proto)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_spoofchk(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, bool __always_unused ena)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_link_state(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, int __always_unused link_state)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_bw(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, int __always_unused min_tx_rate,
+ int __always_unused max_tx_rate)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_PCI_IOV */
+#endif /* _ICE_VIRTCHNL_PF_H_ */