aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/ice
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/ice')
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile5
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h197
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c273
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_debugfs.c667
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c115
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c48
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c44
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c482
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h60
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fwlog.c470
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fwlog.h79
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hwmon.c126
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hwmon.h15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c122
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c185
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c117
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c117
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_consts.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c444
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h48
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c135
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c35
39 files changed, 2930 insertions, 1040 deletions
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 0679907980f7..cddd82d4ca0f 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -34,7 +34,9 @@ ice-y := ice_main.o \
ice_lag.o \
ice_ethtool.o \
ice_repr.o \
- ice_tc_lib.o
+ ice_tc_lib.o \
+ ice_fwlog.o \
+ ice_debugfs.o
ice-$(CONFIG_PCI_IOV) += \
ice_sriov.o \
ice_virtchnl.o \
@@ -49,3 +51,4 @@ ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
ice-$(CONFIG_ICE_SWITCHDEV) += ice_eswitch.o ice_eswitch_br.o
ice-$(CONFIG_GNSS) += ice_gnss.o
+ice-$(CONFIG_ICE_HWMON) += ice_hwmon.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index cd7dcd0fa7f2..50304e4a4fb0 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -360,6 +360,7 @@ struct ice_vsi {
/* RSS config */
u16 rss_table_size; /* HW RSS table size */
u16 rss_size; /* Allocated RSS queues */
+ u8 rss_hfunc; /* User configured hash type */
u8 *rss_hkey_user; /* User configured hash keys */
u8 *rss_lut_user; /* User configured lookup table entries */
u8 rss_lut_type; /* used to configure Get/Set RSS LUT AQ call */
@@ -570,6 +571,10 @@ struct ice_pf {
struct ice_vsi_stats **vsi_stats;
struct ice_sw *first_sw; /* first switch created by firmware */
u16 eswitch_mode; /* current mode of eswitch */
+ struct dentry *ice_debugfs_pf;
+ struct dentry *ice_debugfs_pf_fwlog;
+ /* keep track of all the dentrys for FW log modules */
+ struct dentry **ice_debugfs_pf_fwlog_modules;
struct ice_vfs vfs;
DECLARE_BITMAP(features, ICE_F_MAX);
DECLARE_BITMAP(state, ICE_STATE_NBITS);
@@ -655,6 +660,7 @@ struct ice_pf {
#define ICE_MAX_VF_AGG_NODES 32
struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES];
struct ice_dplls dplls;
+ struct device *hwmon_dev;
};
extern struct workqueue_struct *ice_lag_wq;
@@ -888,6 +894,11 @@ static inline bool ice_is_adq_active(struct ice_pf *pf)
return false;
}
+void ice_debugfs_fwlog_init(struct ice_pf *pf);
+void ice_debugfs_init(void);
+void ice_debugfs_exit(void);
+void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module);
+
bool netif_is_ice(const struct net_device *dev);
int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
int ice_vsi_setup_rx_rings(struct ice_vsi *vsi);
@@ -919,6 +930,7 @@ int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size);
int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size);
int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed);
int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed);
+int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc);
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index d7fdb7ba7268..12c510bb1d9b 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -117,6 +117,7 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_NET_VER 0x004C
#define ICE_AQC_CAPS_PENDING_NET_VER 0x004D
#define ICE_AQC_CAPS_RDMA 0x0051
+#define ICE_AQC_CAPS_SENSOR_READING 0x0067
#define ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE 0x0076
#define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077
#define ICE_AQC_CAPS_NVM_MGMT 0x0080
@@ -491,10 +492,10 @@ struct ice_aqc_vsi_props {
#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M (0xF << ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S)
#define ICE_AQ_VSI_Q_OPT_RSS_HASH_S 6
#define ICE_AQ_VSI_Q_OPT_RSS_HASH_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
-#define ICE_AQ_VSI_Q_OPT_RSS_TPLZ (0x0 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
-#define ICE_AQ_VSI_Q_OPT_RSS_SYM_TPLZ (0x1 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
-#define ICE_AQ_VSI_Q_OPT_RSS_XOR (0x2 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
-#define ICE_AQ_VSI_Q_OPT_RSS_JHASH (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ 0x0U
+#define ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ 0x1U
+#define ICE_AQ_VSI_Q_OPT_RSS_HASH_XOR 0x2U
+#define ICE_AQ_VSI_Q_OPT_RSS_HASH_JHASH 0x3U
u8 q_opt_tc;
#define ICE_AQ_VSI_Q_OPT_TC_OVR_S 0
#define ICE_AQ_VSI_Q_OPT_TC_OVR_M (0x1F << ICE_AQ_VSI_Q_OPT_TC_OVR_S)
@@ -1413,6 +1414,30 @@ struct ice_aqc_get_phy_rec_clk_out {
__le16 node_handle;
};
+/* Get sensor reading (direct 0x0632) */
+struct ice_aqc_get_sensor_reading {
+ u8 sensor;
+ u8 format;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Get sensor reading response (direct 0x0632) */
+struct ice_aqc_get_sensor_reading_resp {
+ union {
+ u8 raw[8];
+ /* Output data for sensor 0x00, format 0x00 */
+ struct _packed {
+ s8 temp;
+ u8 temp_warning_threshold;
+ u8 temp_critical_threshold;
+ u8 temp_fatal_threshold;
+ u8 reserved[4];
+ } s0f0;
+ } data;
+};
+
struct ice_aqc_link_topo_params {
u8 lport_num;
u8 lport_num_valid;
@@ -2069,78 +2094,6 @@ struct ice_aqc_add_rdma_qset_data {
struct ice_aqc_add_tx_rdma_qset_entry rdma_qsets[];
};
-/* Configure Firmware Logging Command (indirect 0xFF09)
- * Logging Information Read Response (indirect 0xFF10)
- * Note: The 0xFF10 command has no input parameters.
- */
-struct ice_aqc_fw_logging {
- u8 log_ctrl;
-#define ICE_AQC_FW_LOG_AQ_EN BIT(0)
-#define ICE_AQC_FW_LOG_UART_EN BIT(1)
- u8 rsvd0;
- u8 log_ctrl_valid; /* Not used by 0xFF10 Response */
-#define ICE_AQC_FW_LOG_AQ_VALID BIT(0)
-#define ICE_AQC_FW_LOG_UART_VALID BIT(1)
- u8 rsvd1[5];
- __le32 addr_high;
- __le32 addr_low;
-};
-
-enum ice_aqc_fw_logging_mod {
- ICE_AQC_FW_LOG_ID_GENERAL = 0,
- ICE_AQC_FW_LOG_ID_CTRL,
- ICE_AQC_FW_LOG_ID_LINK,
- ICE_AQC_FW_LOG_ID_LINK_TOPO,
- ICE_AQC_FW_LOG_ID_DNL,
- ICE_AQC_FW_LOG_ID_I2C,
- ICE_AQC_FW_LOG_ID_SDP,
- ICE_AQC_FW_LOG_ID_MDIO,
- ICE_AQC_FW_LOG_ID_ADMINQ,
- ICE_AQC_FW_LOG_ID_HDMA,
- ICE_AQC_FW_LOG_ID_LLDP,
- ICE_AQC_FW_LOG_ID_DCBX,
- ICE_AQC_FW_LOG_ID_DCB,
- ICE_AQC_FW_LOG_ID_NETPROXY,
- ICE_AQC_FW_LOG_ID_NVM,
- ICE_AQC_FW_LOG_ID_AUTH,
- ICE_AQC_FW_LOG_ID_VPD,
- ICE_AQC_FW_LOG_ID_IOSF,
- ICE_AQC_FW_LOG_ID_PARSER,
- ICE_AQC_FW_LOG_ID_SW,
- ICE_AQC_FW_LOG_ID_SCHEDULER,
- ICE_AQC_FW_LOG_ID_TXQ,
- ICE_AQC_FW_LOG_ID_RSVD,
- ICE_AQC_FW_LOG_ID_POST,
- ICE_AQC_FW_LOG_ID_WATCHDOG,
- ICE_AQC_FW_LOG_ID_TASK_DISPATCH,
- ICE_AQC_FW_LOG_ID_MNG,
- ICE_AQC_FW_LOG_ID_MAX,
-};
-
-/* Defines for both above FW logging command/response buffers */
-#define ICE_AQC_FW_LOG_ID_S 0
-#define ICE_AQC_FW_LOG_ID_M (0xFFF << ICE_AQC_FW_LOG_ID_S)
-
-#define ICE_AQC_FW_LOG_CONF_SUCCESS 0 /* Used by response */
-#define ICE_AQC_FW_LOG_CONF_BAD_INDX BIT(12) /* Used by response */
-
-#define ICE_AQC_FW_LOG_EN_S 12
-#define ICE_AQC_FW_LOG_EN_M (0xF << ICE_AQC_FW_LOG_EN_S)
-#define ICE_AQC_FW_LOG_INFO_EN BIT(12) /* Used by command */
-#define ICE_AQC_FW_LOG_INIT_EN BIT(13) /* Used by command */
-#define ICE_AQC_FW_LOG_FLOW_EN BIT(14) /* Used by command */
-#define ICE_AQC_FW_LOG_ERR_EN BIT(15) /* Used by command */
-
-/* Get/Clear FW Log (indirect 0xFF11) */
-struct ice_aqc_get_clear_fw_log {
- u8 flags;
-#define ICE_AQC_FW_LOG_CLEAR BIT(0)
-#define ICE_AQC_FW_LOG_MORE_DATA_AVAIL BIT(1)
- u8 rsvd1[7];
- __le32 addr_high;
- __le32 addr_low;
-};
-
/* Download Package (indirect 0x0C40) */
/* Also used for Update Package (indirect 0x0C41 and 0x0C42) */
struct ice_aqc_download_pkg {
@@ -2403,6 +2356,84 @@ struct ice_aqc_event_lan_overflow {
u8 reserved[8];
};
+enum ice_aqc_fw_logging_mod {
+ ICE_AQC_FW_LOG_ID_GENERAL = 0,
+ ICE_AQC_FW_LOG_ID_CTRL,
+ ICE_AQC_FW_LOG_ID_LINK,
+ ICE_AQC_FW_LOG_ID_LINK_TOPO,
+ ICE_AQC_FW_LOG_ID_DNL,
+ ICE_AQC_FW_LOG_ID_I2C,
+ ICE_AQC_FW_LOG_ID_SDP,
+ ICE_AQC_FW_LOG_ID_MDIO,
+ ICE_AQC_FW_LOG_ID_ADMINQ,
+ ICE_AQC_FW_LOG_ID_HDMA,
+ ICE_AQC_FW_LOG_ID_LLDP,
+ ICE_AQC_FW_LOG_ID_DCBX,
+ ICE_AQC_FW_LOG_ID_DCB,
+ ICE_AQC_FW_LOG_ID_XLR,
+ ICE_AQC_FW_LOG_ID_NVM,
+ ICE_AQC_FW_LOG_ID_AUTH,
+ ICE_AQC_FW_LOG_ID_VPD,
+ ICE_AQC_FW_LOG_ID_IOSF,
+ ICE_AQC_FW_LOG_ID_PARSER,
+ ICE_AQC_FW_LOG_ID_SW,
+ ICE_AQC_FW_LOG_ID_SCHEDULER,
+ ICE_AQC_FW_LOG_ID_TXQ,
+ ICE_AQC_FW_LOG_ID_RSVD,
+ ICE_AQC_FW_LOG_ID_POST,
+ ICE_AQC_FW_LOG_ID_WATCHDOG,
+ ICE_AQC_FW_LOG_ID_TASK_DISPATCH,
+ ICE_AQC_FW_LOG_ID_MNG,
+ ICE_AQC_FW_LOG_ID_SYNCE,
+ ICE_AQC_FW_LOG_ID_HEALTH,
+ ICE_AQC_FW_LOG_ID_TSDRV,
+ ICE_AQC_FW_LOG_ID_PFREG,
+ ICE_AQC_FW_LOG_ID_MDLVER,
+ ICE_AQC_FW_LOG_ID_MAX,
+};
+
+/* Set FW Logging configuration (indirect 0xFF30)
+ * Register for FW Logging (indirect 0xFF31)
+ * Query FW Logging (indirect 0xFF32)
+ * FW Log Event (indirect 0xFF33)
+ */
+struct ice_aqc_fw_log {
+ u8 cmd_flags;
+#define ICE_AQC_FW_LOG_CONF_UART_EN BIT(0)
+#define ICE_AQC_FW_LOG_CONF_AQ_EN BIT(1)
+#define ICE_AQC_FW_LOG_QUERY_REGISTERED BIT(2)
+#define ICE_AQC_FW_LOG_CONF_SET_VALID BIT(3)
+#define ICE_AQC_FW_LOG_AQ_REGISTER BIT(0)
+#define ICE_AQC_FW_LOG_AQ_QUERY BIT(2)
+
+ u8 rsp_flag;
+ __le16 fw_rt_msb;
+ union {
+ struct {
+ __le32 fw_rt_lsb;
+ } sync;
+ struct {
+ __le16 log_resolution;
+#define ICE_AQC_FW_LOG_MIN_RESOLUTION (1)
+#define ICE_AQC_FW_LOG_MAX_RESOLUTION (128)
+
+ __le16 mdl_cnt;
+ } cfg;
+ } ops;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Response Buffer for:
+ * Set Firmware Logging Configuration (0xFF30)
+ * Query FW Logging (0xFF32)
+ */
+struct ice_aqc_fw_log_cfg_resp {
+ __le16 module_identifier;
+ u8 log_level;
+ u8 rsvd0;
+};
+
/**
* struct ice_aq_desc - Admin Queue (AQ) descriptor
* @flags: ICE_AQ_FLAG_* flags
@@ -2443,6 +2474,8 @@ struct ice_aq_desc {
struct ice_aqc_restart_an restart_an;
struct ice_aqc_set_phy_rec_clk_out set_phy_rec_clk_out;
struct ice_aqc_get_phy_rec_clk_out get_phy_rec_clk_out;
+ struct ice_aqc_get_sensor_reading get_sensor_reading;
+ struct ice_aqc_get_sensor_reading_resp get_sensor_reading_resp;
struct ice_aqc_gpio read_write_gpio;
struct ice_aqc_sff_eeprom read_write_sff_param;
struct ice_aqc_set_port_id_led set_port_id_led;
@@ -2480,8 +2513,6 @@ struct ice_aq_desc {
struct ice_aqc_add_rdma_qset add_rdma_qset;
struct ice_aqc_add_get_update_free_vsi vsi_cmd;
struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res;
- struct ice_aqc_fw_logging fw_logging;
- struct ice_aqc_get_clear_fw_log get_clear_fw_log;
struct ice_aqc_download_pkg download_pkg;
struct ice_aqc_set_cgu_input_config set_cgu_input_config;
struct ice_aqc_get_cgu_input_config get_cgu_input_config;
@@ -2493,6 +2524,7 @@ struct ice_aq_desc {
struct ice_aqc_get_cgu_ref_prio get_cgu_ref_prio;
struct ice_aqc_get_cgu_info get_cgu_info;
struct ice_aqc_driver_shared_params drv_shared_params;
+ struct ice_aqc_fw_log fw_log;
struct ice_aqc_set_mac_lb set_mac_lb;
struct ice_aqc_alloc_free_res_cmd sw_res_ctrl;
struct ice_aqc_set_mac_cfg set_mac_cfg;
@@ -2618,6 +2650,7 @@ enum ice_adminq_opc {
ice_aqc_opc_set_mac_lb = 0x0620,
ice_aqc_opc_set_phy_rec_clk_out = 0x0630,
ice_aqc_opc_get_phy_rec_clk_out = 0x0631,
+ ice_aqc_opc_get_sensor_reading = 0x0632,
ice_aqc_opc_get_link_topo = 0x06E0,
ice_aqc_opc_read_i2c = 0x06E2,
ice_aqc_opc_write_i2c = 0x06E3,
@@ -2690,9 +2723,11 @@ enum ice_adminq_opc {
/* Standalone Commands/Events */
ice_aqc_opc_event_lan_overflow = 0x1001,
- /* debug commands */
- ice_aqc_opc_fw_logging = 0xFF09,
- ice_aqc_opc_fw_logging_info = 0xFF10,
+ /* FW Logging Commands */
+ ice_aqc_opc_fw_logs_config = 0xFF30,
+ ice_aqc_opc_fw_logs_register = 0xFF31,
+ ice_aqc_opc_fw_logs_query = 0xFF32,
+ ice_aqc_opc_fw_logs_event = 0xFF33,
};
#endif /* _ICE_ADMINQ_CMD_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 7fa43827a3f0..edad5f9ab16c 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -189,10 +189,18 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
}
q_vector = vsi->q_vectors[v_idx];
- ice_for_each_tx_ring(tx_ring, q_vector->tx)
+ ice_for_each_tx_ring(tx_ring, q_vector->tx) {
+ if (vsi->netdev)
+ netif_queue_set_napi(vsi->netdev, tx_ring->q_index,
+ NETDEV_QUEUE_TYPE_TX, NULL);
tx_ring->q_vector = NULL;
- ice_for_each_rx_ring(rx_ring, q_vector->rx)
+ }
+ ice_for_each_rx_ring(rx_ring, q_vector->rx) {
+ if (vsi->netdev)
+ netif_queue_set_napi(vsi->netdev, rx_ring->q_index,
+ NETDEV_QUEUE_TYPE_RX, NULL);
rx_ring->q_vector = NULL;
+ }
/* only VSI with an associated netdev is set up with NAPI */
if (vsi->netdev)
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 9a6c25f98632..08a1f699a34f 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -934,216 +934,6 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
}
/**
- * ice_get_fw_log_cfg - get FW logging configuration
- * @hw: pointer to the HW struct
- */
-static int ice_get_fw_log_cfg(struct ice_hw *hw)
-{
- struct ice_aq_desc desc;
- __le16 *config;
- int status;
- u16 size;
-
- size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX;
- config = kzalloc(size, GFP_KERNEL);
- if (!config)
- return -ENOMEM;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
-
- status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
- if (!status) {
- u16 i;
-
- /* Save FW logging information into the HW structure */
- for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
- u16 v, m, flgs;
-
- v = le16_to_cpu(config[i]);
- m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
- flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S;
-
- if (m < ICE_AQC_FW_LOG_ID_MAX)
- hw->fw_log.evnts[m].cur = flgs;
- }
- }
-
- kfree(config);
-
- return status;
-}
-
-/**
- * ice_cfg_fw_log - configure FW logging
- * @hw: pointer to the HW struct
- * @enable: enable certain FW logging events if true, disable all if false
- *
- * This function enables/disables the FW logging via Rx CQ events and a UART
- * port based on predetermined configurations. FW logging via the Rx CQ can be
- * enabled/disabled for individual PF's. However, FW logging via the UART can
- * only be enabled/disabled for all PFs on the same device.
- *
- * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in
- * hw->fw_log need to be set accordingly, e.g. based on user-provided input,
- * before initializing the device.
- *
- * When re/configuring FW logging, callers need to update the "cfg" elements of
- * the hw->fw_log.evnts array with the desired logging event configurations for
- * modules of interest. When disabling FW logging completely, the callers can
- * just pass false in the "enable" parameter. On completion, the function will
- * update the "cur" element of the hw->fw_log.evnts array with the resulting
- * logging event configurations of the modules that are being re/configured. FW
- * logging modules that are not part of a reconfiguration operation retain their
- * previous states.
- *
- * Before resetting the device, it is recommended that the driver disables FW
- * logging before shutting down the control queue. When disabling FW logging
- * ("enable" = false), the latest configurations of FW logging events stored in
- * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after
- * a device reset.
- *
- * When enabling FW logging to emit log messages via the Rx CQ during the
- * device's initialization phase, a mechanism alternative to interrupt handlers
- * needs to be used to extract FW log messages from the Rx CQ periodically and
- * to prevent the Rx CQ from being full and stalling other types of control
- * messages from FW to SW. Interrupts are typically disabled during the device's
- * initialization phase.
- */
-static int ice_cfg_fw_log(struct ice_hw *hw, bool enable)
-{
- struct ice_aqc_fw_logging *cmd;
- u16 i, chgs = 0, len = 0;
- struct ice_aq_desc desc;
- __le16 *data = NULL;
- u8 actv_evnts = 0;
- void *buf = NULL;
- int status = 0;
-
- if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
- return 0;
-
- /* Disable FW logging only when the control queue is still responsive */
- if (!enable &&
- (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
- return 0;
-
- /* Get current FW log settings */
- status = ice_get_fw_log_cfg(hw);
- if (status)
- return status;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
- cmd = &desc.params.fw_logging;
-
- /* Indicate which controls are valid */
- if (hw->fw_log.cq_en)
- cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
-
- if (hw->fw_log.uart_en)
- cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
-
- if (enable) {
- /* Fill in an array of entries with FW logging modules and
- * logging events being reconfigured.
- */
- for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
- u16 val;
-
- /* Keep track of enabled event types */
- actv_evnts |= hw->fw_log.evnts[i].cfg;
-
- if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
- continue;
-
- if (!data) {
- data = devm_kcalloc(ice_hw_to_dev(hw),
- ICE_AQC_FW_LOG_ID_MAX,
- sizeof(*data),
- GFP_KERNEL);
- if (!data)
- return -ENOMEM;
- }
-
- val = i << ICE_AQC_FW_LOG_ID_S;
- val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
- data[chgs++] = cpu_to_le16(val);
- }
-
- /* Only enable FW logging if at least one module is specified.
- * If FW logging is currently enabled but all modules are not
- * enabled to emit log messages, disable FW logging altogether.
- */
- if (actv_evnts) {
- /* Leave if there is effectively no change */
- if (!chgs)
- goto out;
-
- if (hw->fw_log.cq_en)
- cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
-
- if (hw->fw_log.uart_en)
- cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
-
- buf = data;
- len = sizeof(*data) * chgs;
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
- }
- }
-
- status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
- if (!status) {
- /* Update the current configuration to reflect events enabled.
- * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW
- * logging mode is enabled for the device. They do not reflect
- * actual modules being enabled to emit log messages. So, their
- * values remain unchanged even when all modules are disabled.
- */
- u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
-
- hw->fw_log.actv_evnts = actv_evnts;
- for (i = 0; i < cnt; i++) {
- u16 v, m;
-
- if (!enable) {
- /* When disabling all FW logging events as part
- * of device's de-initialization, the original
- * configurations are retained, and can be used
- * to reconfigure FW logging later if the device
- * is re-initialized.
- */
- hw->fw_log.evnts[i].cur = 0;
- continue;
- }
-
- v = le16_to_cpu(data[i]);
- m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
- hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
- }
- }
-
-out:
- devm_kfree(ice_hw_to_dev(hw), data);
-
- return status;
-}
-
-/**
- * ice_output_fw_log
- * @hw: pointer to the HW struct
- * @desc: pointer to the AQ message descriptor
- * @buf: pointer to the buffer accompanying the AQ message
- *
- * Formats a FW Log message and outputs it via the standard driver logs.
- */
-void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
-{
- ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n");
- ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf,
- le16_to_cpu(desc->datalen));
- ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n");
-}
-
-/**
* ice_get_itr_intrl_gran
* @hw: pointer to the HW struct
*
@@ -1200,10 +990,10 @@ int ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_cqinit;
- /* Enable FW logging. Not fatal if this fails. */
- status = ice_cfg_fw_log(hw, true);
+ status = ice_fwlog_init(hw);
if (status)
- ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
+ ice_debug(hw, ICE_DBG_FW_LOG, "Error initializing FW logging: %d\n",
+ status);
status = ice_clear_pf_cfg(hw);
if (status)
@@ -1354,8 +1144,7 @@ void ice_deinit_hw(struct ice_hw *hw)
ice_free_hw_tbls(hw);
mutex_destroy(&hw->tnl_lock);
- /* Attempt to disable FW logging before shutting down control queues */
- ice_cfg_fw_log(hw, false);
+ ice_fwlog_deinit(hw);
ice_destroy_all_ctrlq(hw);
/* Clear VSI contexts if not already cleared */
@@ -2711,6 +2500,26 @@ ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
}
/**
+ * ice_parse_sensor_reading_cap - Parse ICE_AQC_CAPS_SENSOR_READING cap
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse ICE_AQC_CAPS_SENSOR_READING for device capability for reading
+ * enabled sensors.
+ */
+static void
+ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
+ struct ice_aqc_list_caps_elem *cap)
+{
+ dev_p->supported_sensors = le32_to_cpu(cap->number);
+
+ ice_debug(hw, ICE_DBG_INIT,
+ "dev caps: supported sensors (bitmap) = 0x%x\n",
+ dev_p->supported_sensors);
+}
+
+/**
* ice_parse_dev_caps - Parse device capabilities
* @hw: pointer to the HW struct
* @dev_p: pointer to device capabilities structure
@@ -2755,9 +2564,12 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
case ICE_AQC_CAPS_1588:
ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_FD:
+ case ICE_AQC_CAPS_FD:
ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
break;
+ case ICE_AQC_CAPS_SENSOR_READING:
+ ice_parse_sensor_reading_cap(hw, dev_p, &cap_resp[i]);
+ break;
default:
/* Don't list common capabilities as unknown */
if (!found)
@@ -5541,6 +5353,35 @@ ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num,
}
/**
+ * ice_aq_get_sensor_reading
+ * @hw: pointer to the HW struct
+ * @data: pointer to data to be read from the sensor
+ *
+ * Get sensor reading (0x0632)
+ */
+int ice_aq_get_sensor_reading(struct ice_hw *hw,
+ struct ice_aqc_get_sensor_reading_resp *data)
+{
+ struct ice_aqc_get_sensor_reading *cmd;
+ struct ice_aq_desc desc;
+ int status;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sensor_reading);
+ cmd = &desc.params.get_sensor_reading;
+#define ICE_INTERNAL_TEMP_SENSOR_FORMAT 0
+#define ICE_INTERNAL_TEMP_SENSOR 0
+ cmd->sensor = ICE_INTERNAL_TEMP_SENSOR;
+ cmd->format = ICE_INTERNAL_TEMP_SENSOR_FORMAT;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (!status)
+ memcpy(data, &desc.params.get_sensor_reading_resp,
+ sizeof(*data));
+
+ return status;
+}
+
+/**
* ice_replay_pre_init - replay pre initialization
* @hw: pointer to the HW struct
*
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 31fdcac33986..3e933f75e948 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -6,6 +6,7 @@
#include <linux/bitfield.h>
+#include "ice.h"
#include "ice_type.h"
#include "ice_nvm.h"
#include "ice_flex_pipe.h"
@@ -199,7 +200,6 @@ ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf,
struct ice_sq_cd *cd);
int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw);
-void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle);
int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in);
@@ -241,6 +241,8 @@ ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable,
int
ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num,
u8 *flags, u16 *node_handle);
+int ice_aq_get_sensor_reading(struct ice_hw *hw,
+ struct ice_aqc_get_sensor_reading_resp *data);
void
ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
diff --git a/drivers/net/ethernet/intel/ice/ice_debugfs.c b/drivers/net/ethernet/intel/ice/ice_debugfs.c
new file mode 100644
index 000000000000..c2bfba6b9ead
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_debugfs.c
@@ -0,0 +1,667 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022, Intel Corporation. */
+
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/random.h>
+#include <linux/vmalloc.h>
+#include "ice.h"
+
+static struct dentry *ice_debugfs_root;
+
+/* create a define that has an extra module that doesn't really exist. this
+ * is so we can add a module 'all' to easily enable/disable all the modules
+ */
+#define ICE_NR_FW_LOG_MODULES (ICE_AQC_FW_LOG_ID_MAX + 1)
+
+/* the ordering in this array is important. it matches the ordering of the
+ * values in the FW so the index is the same value as in ice_aqc_fw_logging_mod
+ */
+static const char * const ice_fwlog_module_string[] = {
+ "general",
+ "ctrl",
+ "link",
+ "link_topo",
+ "dnl",
+ "i2c",
+ "sdp",
+ "mdio",
+ "adminq",
+ "hdma",
+ "lldp",
+ "dcbx",
+ "dcb",
+ "xlr",
+ "nvm",
+ "auth",
+ "vpd",
+ "iosf",
+ "parser",
+ "sw",
+ "scheduler",
+ "txq",
+ "rsvd",
+ "post",
+ "watchdog",
+ "task_dispatch",
+ "mng",
+ "synce",
+ "health",
+ "tsdrv",
+ "pfreg",
+ "mdlver",
+ "all",
+};
+
+/* the ordering in this array is important. it matches the ordering of the
+ * values in the FW so the index is the same value as in ice_fwlog_level
+ */
+static const char * const ice_fwlog_level_string[] = {
+ "none",
+ "error",
+ "warning",
+ "normal",
+ "verbose",
+};
+
+/* the order in this array is important. it matches the ordering of the
+ * values in the FW so the index is the same value as in ice_fwlog_level
+ */
+static const char * const ice_fwlog_log_size[] = {
+ "128K",
+ "256K",
+ "512K",
+ "1M",
+ "2M",
+};
+
+/**
+ * ice_fwlog_print_module_cfg - print current FW logging module configuration
+ * @hw: pointer to the HW structure
+ * @module: module to print
+ * @s: the seq file to put data into
+ */
+static void
+ice_fwlog_print_module_cfg(struct ice_hw *hw, int module, struct seq_file *s)
+{
+ struct ice_fwlog_cfg *cfg = &hw->fwlog_cfg;
+ struct ice_fwlog_module_entry *entry;
+
+ if (module != ICE_AQC_FW_LOG_ID_MAX) {
+ entry = &cfg->module_entries[module];
+
+ seq_printf(s, "\tModule: %s, Log Level: %s\n",
+ ice_fwlog_module_string[entry->module_id],
+ ice_fwlog_level_string[entry->log_level]);
+ } else {
+ int i;
+
+ for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
+ entry = &cfg->module_entries[i];
+
+ seq_printf(s, "\tModule: %s, Log Level: %s\n",
+ ice_fwlog_module_string[entry->module_id],
+ ice_fwlog_level_string[entry->log_level]);
+ }
+ }
+}
+
+static int ice_find_module_by_dentry(struct ice_pf *pf, struct dentry *d)
+{
+ int i, module;
+
+ module = -1;
+ /* find the module based on the dentry */
+ for (i = 0; i < ICE_NR_FW_LOG_MODULES; i++) {
+ if (d == pf->ice_debugfs_pf_fwlog_modules[i]) {
+ module = i;
+ break;
+ }
+ }
+
+ return module;
+}
+
+/**
+ * ice_debugfs_module_show - read from 'module' file
+ * @s: the opened file
+ * @v: pointer to the offset
+ */
+static int ice_debugfs_module_show(struct seq_file *s, void *v)
+{
+ const struct file *filp = s->file;
+ struct dentry *dentry;
+ struct ice_pf *pf;
+ int module;
+
+ dentry = file_dentry(filp);
+ pf = s->private;
+
+ module = ice_find_module_by_dentry(pf, dentry);
+ if (module < 0) {
+ dev_info(ice_pf_to_dev(pf), "unknown module\n");
+ return -EINVAL;
+ }
+
+ ice_fwlog_print_module_cfg(&pf->hw, module, s);
+
+ return 0;
+}
+
+static int ice_debugfs_module_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, ice_debugfs_module_show, inode->i_private);
+}
+
+/**
+ * ice_debugfs_module_write - write into 'module' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+ice_debugfs_module_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ice_pf *pf = file_inode(filp)->i_private;
+ struct dentry *dentry = file_dentry(filp);
+ struct device *dev = ice_pf_to_dev(pf);
+ char user_val[16], *cmd_buf;
+ int module, log_level, cnt;
+
+ /* don't allow partial writes or invalid input */
+ if (*ppos != 0 || count > 8)
+ return -EINVAL;
+
+ cmd_buf = memdup_user(buf, count);
+ if (IS_ERR(cmd_buf))
+ return PTR_ERR(cmd_buf);
+
+ module = ice_find_module_by_dentry(pf, dentry);
+ if (module < 0) {
+ dev_info(dev, "unknown module\n");
+ return -EINVAL;
+ }
+
+ cnt = sscanf(cmd_buf, "%s", user_val);
+ if (cnt != 1)
+ return -EINVAL;
+
+ log_level = sysfs_match_string(ice_fwlog_level_string, user_val);
+ if (log_level < 0) {
+ dev_info(dev, "unknown log level '%s'\n", user_val);
+ return -EINVAL;
+ }
+
+ if (module != ICE_AQC_FW_LOG_ID_MAX) {
+ ice_pf_fwlog_update_module(pf, log_level, module);
+ } else {
+ /* the module 'all' is a shortcut so that we can set
+ * all of the modules to the same level quickly
+ */
+ int i;
+
+ for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++)
+ ice_pf_fwlog_update_module(pf, log_level, i);
+ }
+
+ return count;
+}
+
+static const struct file_operations ice_debugfs_module_fops = {
+ .owner = THIS_MODULE,
+ .open = ice_debugfs_module_open,
+ .read = seq_read,
+ .release = single_release,
+ .write = ice_debugfs_module_write,
+};
+
+/**
+ * ice_debugfs_nr_messages_read - read from 'nr_messages' file
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ */
+static ssize_t ice_debugfs_nr_messages_read(struct file *filp,
+ char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct ice_pf *pf = filp->private_data;
+ struct ice_hw *hw = &pf->hw;
+ char buff[32] = {};
+
+ snprintf(buff, sizeof(buff), "%d\n",
+ hw->fwlog_cfg.log_resolution);
+
+ return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
+}
+
+/**
+ * ice_debugfs_nr_messages_write - write into 'nr_messages' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+ice_debugfs_nr_messages_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ice_pf *pf = filp->private_data;
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ char user_val[8], *cmd_buf;
+ s16 nr_messages;
+ ssize_t ret;
+
+ /* don't allow partial writes or invalid input */
+ if (*ppos != 0 || count > 4)
+ return -EINVAL;
+
+ cmd_buf = memdup_user(buf, count);
+ if (IS_ERR(cmd_buf))
+ return PTR_ERR(cmd_buf);
+
+ ret = sscanf(cmd_buf, "%s", user_val);
+ if (ret != 1)
+ return -EINVAL;
+
+ ret = kstrtos16(user_val, 0, &nr_messages);
+ if (ret)
+ return ret;
+
+ if (nr_messages < ICE_AQC_FW_LOG_MIN_RESOLUTION ||
+ nr_messages > ICE_AQC_FW_LOG_MAX_RESOLUTION) {
+ dev_err(dev, "Invalid FW log number of messages %d, value must be between %d - %d\n",
+ nr_messages, ICE_AQC_FW_LOG_MIN_RESOLUTION,
+ ICE_AQC_FW_LOG_MAX_RESOLUTION);
+ return -EINVAL;
+ }
+
+ hw->fwlog_cfg.log_resolution = nr_messages;
+
+ return count;
+}
+
+static const struct file_operations ice_debugfs_nr_messages_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = ice_debugfs_nr_messages_read,
+ .write = ice_debugfs_nr_messages_write,
+};
+
+/**
+ * ice_debugfs_enable_read - read from 'enable' file
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ */
+static ssize_t ice_debugfs_enable_read(struct file *filp,
+ char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct ice_pf *pf = filp->private_data;
+ struct ice_hw *hw = &pf->hw;
+ char buff[32] = {};
+
+ snprintf(buff, sizeof(buff), "%u\n",
+ (u16)(hw->fwlog_cfg.options &
+ ICE_FWLOG_OPTION_IS_REGISTERED) >> 3);
+
+ return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
+}
+
+/**
+ * ice_debugfs_enable_write - write into 'enable' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+ice_debugfs_enable_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ice_pf *pf = filp->private_data;
+ struct ice_hw *hw = &pf->hw;
+ char user_val[8], *cmd_buf;
+ bool enable;
+ ssize_t ret;
+
+ /* don't allow partial writes or invalid input */
+ if (*ppos != 0 || count > 2)
+ return -EINVAL;
+
+ cmd_buf = memdup_user(buf, count);
+ if (IS_ERR(cmd_buf))
+ return PTR_ERR(cmd_buf);
+
+ ret = sscanf(cmd_buf, "%s", user_val);
+ if (ret != 1)
+ return -EINVAL;
+
+ ret = kstrtobool(user_val, &enable);
+ if (ret)
+ goto enable_write_error;
+
+ if (enable)
+ hw->fwlog_cfg.options |= ICE_FWLOG_OPTION_ARQ_ENA;
+ else
+ hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_ARQ_ENA;
+
+ ret = ice_fwlog_set(hw, &hw->fwlog_cfg);
+ if (ret)
+ goto enable_write_error;
+
+ if (enable)
+ ret = ice_fwlog_register(hw);
+ else
+ ret = ice_fwlog_unregister(hw);
+
+ if (ret)
+ goto enable_write_error;
+
+ /* if we get here, nothing went wrong; return count since we didn't
+ * really write anything
+ */
+ ret = (ssize_t)count;
+
+enable_write_error:
+ /* This function always consumes all of the written input, or produces
+ * an error. Check and enforce this. Otherwise, the write operation
+ * won't complete properly.
+ */
+ if (WARN_ON(ret != (ssize_t)count && ret >= 0))
+ ret = -EIO;
+
+ return ret;
+}
+
+static const struct file_operations ice_debugfs_enable_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = ice_debugfs_enable_read,
+ .write = ice_debugfs_enable_write,
+};
+
+/**
+ * ice_debugfs_log_size_read - read from 'log_size' file
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ */
+static ssize_t ice_debugfs_log_size_read(struct file *filp,
+ char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct ice_pf *pf = filp->private_data;
+ struct ice_hw *hw = &pf->hw;
+ char buff[32] = {};
+ int index;
+
+ index = hw->fwlog_ring.index;
+ snprintf(buff, sizeof(buff), "%s\n", ice_fwlog_log_size[index]);
+
+ return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
+}
+
+/**
+ * ice_debugfs_log_size_write - write into 'log_size' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+ice_debugfs_log_size_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ice_pf *pf = filp->private_data;
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ char user_val[8], *cmd_buf;
+ ssize_t ret;
+ int index;
+
+ /* don't allow partial writes or invalid input */
+ if (*ppos != 0 || count > 5)
+ return -EINVAL;
+
+ cmd_buf = memdup_user(buf, count);
+ if (IS_ERR(cmd_buf))
+ return PTR_ERR(cmd_buf);
+
+ ret = sscanf(cmd_buf, "%s", user_val);
+ if (ret != 1)
+ return -EINVAL;
+
+ index = sysfs_match_string(ice_fwlog_log_size, user_val);
+ if (index < 0) {
+ dev_info(dev, "Invalid log size '%s'. The value must be one of 128K, 256K, 512K, 1M, 2M\n",
+ user_val);
+ ret = -EINVAL;
+ goto log_size_write_error;
+ } else if (hw->fwlog_cfg.options & ICE_FWLOG_OPTION_IS_REGISTERED) {
+ dev_info(dev, "FW logging is currently running. Please disable FW logging to change log_size\n");
+ ret = -EINVAL;
+ goto log_size_write_error;
+ }
+
+ /* free all the buffers and the tracking info and resize */
+ ice_fwlog_realloc_rings(hw, index);
+
+ /* if we get here, nothing went wrong; return count since we didn't
+ * really write anything
+ */
+ ret = (ssize_t)count;
+
+log_size_write_error:
+ /* This function always consumes all of the written input, or produces
+ * an error. Check and enforce this. Otherwise, the write operation
+ * won't complete properly.
+ */
+ if (WARN_ON(ret != (ssize_t)count && ret >= 0))
+ ret = -EIO;
+
+ return ret;
+}
+
+static const struct file_operations ice_debugfs_log_size_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = ice_debugfs_log_size_read,
+ .write = ice_debugfs_log_size_write,
+};
+
+/**
+ * ice_debugfs_data_read - read from 'data' file
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ */
+static ssize_t ice_debugfs_data_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct ice_pf *pf = filp->private_data;
+ struct ice_hw *hw = &pf->hw;
+ int data_copied = 0;
+ bool done = false;
+
+ if (ice_fwlog_ring_empty(&hw->fwlog_ring))
+ return 0;
+
+ while (!ice_fwlog_ring_empty(&hw->fwlog_ring) && !done) {
+ struct ice_fwlog_data *log;
+ u16 cur_buf_len;
+
+ log = &hw->fwlog_ring.rings[hw->fwlog_ring.head];
+ cur_buf_len = log->data_size;
+ if (cur_buf_len >= count) {
+ done = true;
+ continue;
+ }
+
+ if (copy_to_user(buffer, log->data, cur_buf_len)) {
+ /* if there is an error then bail and return whatever
+ * the driver has copied so far
+ */
+ done = true;
+ continue;
+ }
+
+ data_copied += cur_buf_len;
+ buffer += cur_buf_len;
+ count -= cur_buf_len;
+ *ppos += cur_buf_len;
+ ice_fwlog_ring_increment(&hw->fwlog_ring.head,
+ hw->fwlog_ring.size);
+ }
+
+ return data_copied;
+}
+
+/**
+ * ice_debugfs_data_write - write into 'data' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+ice_debugfs_data_write(struct file *filp, const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct ice_pf *pf = filp->private_data;
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ ssize_t ret;
+
+ /* don't allow partial writes */
+ if (*ppos != 0)
+ return 0;
+
+ /* any value is allowed to clear the buffer so no need to even look at
+ * what the value is
+ */
+ if (!(hw->fwlog_cfg.options & ICE_FWLOG_OPTION_IS_REGISTERED)) {
+ hw->fwlog_ring.head = 0;
+ hw->fwlog_ring.tail = 0;
+ } else {
+ dev_info(dev, "Can't clear FW log data while FW log running\n");
+ ret = -EINVAL;
+ goto nr_buffs_write_error;
+ }
+
+ /* if we get here, nothing went wrong; return count since we didn't
+ * really write anything
+ */
+ ret = (ssize_t)count;
+
+nr_buffs_write_error:
+ /* This function always consumes all of the written input, or produces
+ * an error. Check and enforce this. Otherwise, the write operation
+ * won't complete properly.
+ */
+ if (WARN_ON(ret != (ssize_t)count && ret >= 0))
+ ret = -EIO;
+
+ return ret;
+}
+
+static const struct file_operations ice_debugfs_data_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = ice_debugfs_data_read,
+ .write = ice_debugfs_data_write,
+};
+
+/**
+ * ice_debugfs_fwlog_init - setup the debugfs directory
+ * @pf: the ice that is starting up
+ */
+void ice_debugfs_fwlog_init(struct ice_pf *pf)
+{
+ const char *name = pci_name(pf->pdev);
+ struct dentry *fw_modules_dir;
+ struct dentry **fw_modules;
+ int i;
+
+ /* only support fw log commands on PF 0 */
+ if (pf->hw.bus.func)
+ return;
+
+ /* allocate space for this first because if it fails then we don't
+ * need to unwind
+ */
+ fw_modules = kcalloc(ICE_NR_FW_LOG_MODULES, sizeof(*fw_modules),
+ GFP_KERNEL);
+ if (!fw_modules)
+ return;
+
+ pf->ice_debugfs_pf = debugfs_create_dir(name, ice_debugfs_root);
+ if (IS_ERR(pf->ice_debugfs_pf))
+ goto err_create_module_files;
+
+ pf->ice_debugfs_pf_fwlog = debugfs_create_dir("fwlog",
+ pf->ice_debugfs_pf);
+ if (IS_ERR(pf->ice_debugfs_pf))
+ goto err_create_module_files;
+
+ fw_modules_dir = debugfs_create_dir("modules",
+ pf->ice_debugfs_pf_fwlog);
+ if (IS_ERR(fw_modules_dir))
+ goto err_create_module_files;
+
+ for (i = 0; i < ICE_NR_FW_LOG_MODULES; i++) {
+ fw_modules[i] = debugfs_create_file(ice_fwlog_module_string[i],
+ 0600, fw_modules_dir, pf,
+ &ice_debugfs_module_fops);
+ if (IS_ERR(fw_modules[i]))
+ goto err_create_module_files;
+ }
+
+ debugfs_create_file("nr_messages", 0600,
+ pf->ice_debugfs_pf_fwlog, pf,
+ &ice_debugfs_nr_messages_fops);
+
+ pf->ice_debugfs_pf_fwlog_modules = fw_modules;
+
+ debugfs_create_file("enable", 0600, pf->ice_debugfs_pf_fwlog,
+ pf, &ice_debugfs_enable_fops);
+
+ debugfs_create_file("log_size", 0600, pf->ice_debugfs_pf_fwlog,
+ pf, &ice_debugfs_log_size_fops);
+
+ debugfs_create_file("data", 0600, pf->ice_debugfs_pf_fwlog,
+ pf, &ice_debugfs_data_fops);
+
+ return;
+
+err_create_module_files:
+ debugfs_remove_recursive(pf->ice_debugfs_pf_fwlog);
+ kfree(fw_modules);
+}
+
+/**
+ * ice_debugfs_init - create root directory for debugfs entries
+ */
+void ice_debugfs_init(void)
+{
+ ice_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ if (IS_ERR(ice_debugfs_root))
+ pr_info("init of debugfs failed\n");
+}
+
+/**
+ * ice_debugfs_exit - remove debugfs entries
+ */
+void ice_debugfs_exit(void)
+{
+ debugfs_remove_recursive(ice_debugfs_root);
+ ice_debugfs_root = NULL;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index f4e24d11ebd0..65be56f2af9e 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -193,6 +193,24 @@ ice_info_pending_netlist_build(struct ice_pf __always_unused *pf,
snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash);
}
+static void ice_info_cgu_fw_build(struct ice_pf *pf, struct ice_info_ctx *ctx)
+{
+ u32 id, cfg_ver, fw_ver;
+
+ if (!ice_is_feature_supported(pf, ICE_F_CGU))
+ return;
+ if (ice_aq_get_cgu_info(&pf->hw, &id, &cfg_ver, &fw_ver))
+ return;
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", id, cfg_ver, fw_ver);
+}
+
+static void ice_info_cgu_id(struct ice_pf *pf, struct ice_info_ctx *ctx)
+{
+ if (!ice_is_feature_supported(pf, ICE_F_CGU))
+ return;
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u", pf->hw.cgu_part_number);
+}
+
#define fixed(key, getter) { ICE_VERSION_FIXED, key, getter, NULL }
#define running(key, getter) { ICE_VERSION_RUNNING, key, getter, NULL }
#define stored(key, getter, fallback) { ICE_VERSION_STORED, key, getter, fallback }
@@ -235,6 +253,8 @@ static const struct ice_devlink_version {
running("fw.app.bundle_id", ice_info_ddp_pkg_bundle_id),
combined("fw.netlist", ice_info_netlist_ver, ice_info_pending_netlist_ver),
combined("fw.netlist.build", ice_info_netlist_build, ice_info_pending_netlist_build),
+ fixed("cgu.id", ice_info_cgu_id),
+ running("fw.cgu", ice_info_cgu_fw_build),
};
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c
index 86b180cb32a0..b9c5eced6326 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.c
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.c
@@ -513,31 +513,6 @@ ice_dpll_lock_status_get(const struct dpll_device *dpll, void *dpll_priv,
}
/**
- * ice_dpll_mode_supported - check if dpll's working mode is supported
- * @dpll: registered dpll pointer
- * @dpll_priv: private data pointer passed on dpll registration
- * @mode: mode to be checked for support
- * @extack: error reporting
- *
- * Dpll subsystem callback. Provides information if working mode is supported
- * by dpll.
- *
- * Return:
- * * true - mode is supported
- * * false - mode is not supported
- */
-static bool ice_dpll_mode_supported(const struct dpll_device *dpll,
- void *dpll_priv,
- enum dpll_mode mode,
- struct netlink_ext_ack *extack)
-{
- if (mode == DPLL_MODE_AUTOMATIC)
- return true;
-
- return false;
-}
-
-/**
* ice_dpll_mode_get - get dpll's working mode
* @dpll: registered dpll pointer
* @dpll_priv: private data pointer passed on dpll registration
@@ -1197,7 +1172,6 @@ static const struct dpll_pin_ops ice_dpll_output_ops = {
static const struct dpll_device_ops ice_dpll_ops = {
.lock_status_get = ice_dpll_lock_status_get,
- .mode_supported = ice_dpll_mode_supported,
.mode_get = ice_dpll_mode_get,
};
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index a34083567e6f..47ab37ba62d2 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -1142,8 +1142,7 @@ __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data,
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ICE_VSI_STATS_LEN; i++)
- ethtool_sprintf(&p, "%s",
- ice_gstrings_vsi_stats[i].stat_string);
+ ethtool_puts(&p, ice_gstrings_vsi_stats[i].stat_string);
if (ice_is_port_repr_netdev(netdev))
return;
@@ -1162,8 +1161,7 @@ __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data,
return;
for (i = 0; i < ICE_PF_STATS_LEN; i++)
- ethtool_sprintf(&p, "%s",
- ice_gstrings_pf_stats[i].stat_string);
+ ethtool_puts(&p, ice_gstrings_pf_stats[i].stat_string);
for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
ethtool_sprintf(&p, "tx_priority_%u_xon.nic", i);
@@ -1179,8 +1177,7 @@ __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data,
break;
case ETH_SS_PRIV_FLAGS:
for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++)
- ethtool_sprintf(&p, "%s",
- ice_gstrings_priv_flags[i].name);
+ ethtool_puts(&p, ice_gstrings_priv_flags[i].name);
break;
default:
break;
@@ -2505,27 +2502,15 @@ static u32 ice_parse_hdrs(struct ethtool_rxnfc *nfc)
return hdrs;
}
-#define ICE_FLOW_HASH_FLD_IPV4_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)
-#define ICE_FLOW_HASH_FLD_IPV6_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)
-#define ICE_FLOW_HASH_FLD_IPV4_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)
-#define ICE_FLOW_HASH_FLD_IPV6_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)
-#define ICE_FLOW_HASH_FLD_TCP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)
-#define ICE_FLOW_HASH_FLD_TCP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)
-#define ICE_FLOW_HASH_FLD_UDP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)
-#define ICE_FLOW_HASH_FLD_UDP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)
-#define ICE_FLOW_HASH_FLD_SCTP_SRC_PORT \
- BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)
-#define ICE_FLOW_HASH_FLD_SCTP_DST_PORT \
- BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)
-
/**
* ice_parse_hash_flds - parses hash fields from RSS hash input
* @nfc: ethtool rxnfc command
+ * @symm: true if Symmetric Topelitz is set
*
* This function parses the rxnfc command and returns intended
* hash fields for RSS configuration
*/
-static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc)
+static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc, bool symm)
{
u64 hfld = ICE_HASH_INVALID;
@@ -2594,9 +2579,11 @@ static int
ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
{
struct ice_pf *pf = vsi->back;
+ struct ice_rss_hash_cfg cfg;
struct device *dev;
u64 hashed_flds;
int status;
+ bool symm;
u32 hdrs;
dev = ice_pf_to_dev(pf);
@@ -2606,7 +2593,8 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
return -EINVAL;
}
- hashed_flds = ice_parse_hash_flds(nfc);
+ symm = !!(vsi->rss_hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ);
+ hashed_flds = ice_parse_hash_flds(nfc, symm);
if (hashed_flds == ICE_HASH_INVALID) {
dev_dbg(dev, "Invalid hash fields, vsi num = %d\n",
vsi->vsi_num);
@@ -2620,7 +2608,12 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
return -EINVAL;
}
- status = ice_add_rss_cfg(&pf->hw, vsi->idx, hashed_flds, hdrs);
+ cfg.hash_flds = hashed_flds;
+ cfg.addl_hdrs = hdrs;
+ cfg.hdr_type = ICE_RSS_ANY_HEADERS;
+ cfg.symm = symm;
+
+ status = ice_add_rss_cfg(&pf->hw, vsi, &cfg);
if (status) {
dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %d\n",
vsi->vsi_num, status);
@@ -2641,6 +2634,7 @@ ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
struct ice_pf *pf = vsi->back;
struct device *dev;
u64 hash_flds;
+ bool symm;
u32 hdrs;
dev = ice_pf_to_dev(pf);
@@ -2659,7 +2653,7 @@ ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
return;
}
- hash_flds = ice_get_rss_cfg(&pf->hw, vsi->idx, hdrs);
+ hash_flds = ice_get_rss_cfg(&pf->hw, vsi->idx, hdrs, &symm);
if (hash_flds == ICE_HASH_INVALID) {
dev_dbg(dev, "No hash fields found for the given header type, vsi num = %d\n",
vsi->vsi_num);
@@ -3198,11 +3192,18 @@ static u32 ice_get_rxfh_indir_size(struct net_device *netdev)
return np->vsi->rss_table_size;
}
+/**
+ * ice_get_rxfh - get the Rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @rxfh: pointer to param struct (indir, key, hfunc)
+ *
+ * Reads the indirection table directly from the hardware.
+ */
static int
-ice_get_rxfh_context(struct net_device *netdev, u32 *indir,
- u8 *key, u8 *hfunc, u32 rss_context)
+ice_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ u32 rss_context = rxfh->rss_context;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
u16 qcount, offset;
@@ -3233,17 +3234,18 @@ ice_get_rxfh_context(struct net_device *netdev, u32 *indir,
vsi = vsi->tc_map_vsi[rss_context];
}
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (vsi->rss_hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ)
+ rxfh->input_xfrm |= RXH_XFRM_SYM_XOR;
- if (!indir)
+ if (!rxfh->indir)
return 0;
lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
if (!lut)
return -ENOMEM;
- err = ice_get_rss_key(vsi, key);
+ err = ice_get_rss_key(vsi, rxfh->key);
if (err)
goto out;
@@ -3253,12 +3255,12 @@ ice_get_rxfh_context(struct net_device *netdev, u32 *indir,
if (ice_is_adq_active(pf)) {
for (i = 0; i < vsi->rss_table_size; i++)
- indir[i] = offset + lut[i] % qcount;
+ rxfh->indir[i] = offset + lut[i] % qcount;
goto out;
}
for (i = 0; i < vsi->rss_table_size; i++)
- indir[i] = lut[i];
+ rxfh->indir[i] = lut[i];
out:
kfree(lut);
@@ -3266,42 +3268,31 @@ out:
}
/**
- * ice_get_rxfh - get the Rx flow hash indirection table
- * @netdev: network interface device structure
- * @indir: indirection table
- * @key: hash key
- * @hfunc: hash function
- *
- * Reads the indirection table directly from the hardware.
- */
-static int
-ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
-{
- return ice_get_rxfh_context(netdev, indir, key, hfunc, 0);
-}
-
-/**
* ice_set_rxfh - set the Rx flow hash indirection table
* @netdev: network interface device structure
- * @indir: indirection table
- * @key: hash key
- * @hfunc: hash function
+ * @rxfh: pointer to param struct (indir, key, hfunc)
+ * @extack: extended ACK from the Netlink message
*
* Returns -EINVAL if the table specifies an invalid queue ID, otherwise
* returns 0 after programming the table.
*/
static int
-ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
- const u8 hfunc)
+ice_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ u8 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct device *dev;
int err;
dev = ice_pf_to_dev(pf);
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (rxfh->rss_context)
return -EOPNOTSUPP;
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
@@ -3315,7 +3306,15 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
return -EOPNOTSUPP;
}
- if (key) {
+ /* Update the VSI's hash function */
+ if (rxfh->input_xfrm & RXH_XFRM_SYM_XOR)
+ hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ;
+
+ err = ice_set_rss_hfunc(vsi, hfunc);
+ if (err)
+ return err;
+
+ if (rxfh->key) {
if (!vsi->rss_hkey_user) {
vsi->rss_hkey_user =
devm_kzalloc(dev, ICE_VSIQF_HKEY_ARRAY_SIZE,
@@ -3323,7 +3322,8 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
if (!vsi->rss_hkey_user)
return -ENOMEM;
}
- memcpy(vsi->rss_hkey_user, key, ICE_VSIQF_HKEY_ARRAY_SIZE);
+ memcpy(vsi->rss_hkey_user, rxfh->key,
+ ICE_VSIQF_HKEY_ARRAY_SIZE);
err = ice_set_rss_key(vsi, vsi->rss_hkey_user);
if (err)
@@ -3338,11 +3338,11 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
}
/* Each 32 bits pointed by 'indir' is stored with a lut entry */
- if (indir) {
+ if (rxfh->indir) {
int i;
for (i = 0; i < vsi->rss_table_size; i++)
- vsi->rss_lut_user[i] = (u8)(indir[i]);
+ vsi->rss_lut_user[i] = (u8)(rxfh->indir[i]);
} else {
ice_fill_rss_lut(vsi->rss_lut_user, vsi->rss_table_size,
vsi->rss_size);
@@ -4220,9 +4220,11 @@ ice_get_module_eeprom(struct net_device *netdev,
}
static const struct ethtool_ops ice_ethtool_ops = {
+ .cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE |
ETHTOOL_COALESCE_RX_USECS_HIGH,
+ .cap_rss_sym_xor_supported = true,
.get_link_ksettings = ice_get_link_ksettings,
.set_link_ksettings = ice_set_link_ksettings,
.get_drvinfo = ice_get_drvinfo,
@@ -4253,7 +4255,6 @@ static const struct ethtool_ops ice_ethtool_ops = {
.set_pauseparam = ice_set_pauseparam,
.get_rxfh_key_size = ice_get_rxfh_key_size,
.get_rxfh_indir_size = ice_get_rxfh_indir_size,
- .get_rxfh_context = ice_get_rxfh_context,
.get_rxfh = ice_get_rxfh,
.set_rxfh = ice_set_rxfh,
.get_channels = ice_get_channels,
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index d151e5bacfec..54e4536219aa 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -302,9 +302,7 @@ void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx)
continue;
for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
- u64 prof_id;
-
- prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
+ u64 prof_id = prof->prof_id[tun];
for (i = 0; i < prof->cnt; i++) {
if (prof->vsi_h[i] != vsi_idx)
@@ -362,10 +360,9 @@ ice_fdir_erase_flow_from_hw(struct ice_hw *hw, enum ice_block blk, int flow)
return;
for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
- u64 prof_id;
+ u64 prof_id = prof->prof_id[tun];
int j;
- prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
for (j = 0; j < prof->cnt; j++) {
u16 vsi_num;
@@ -439,14 +436,12 @@ void ice_fdir_replay_flows(struct ice_hw *hw)
for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
struct ice_flow_prof *hw_prof;
struct ice_fd_hw_prof *prof;
- u64 prof_id;
int j;
prof = hw->fdir_prof[flow];
- prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
- ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id,
+ ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX,
prof->fdir_seg[tun], TNL_SEG_CNT(tun),
- &hw_prof);
+ false, &hw_prof);
for (j = 0; j < prof->cnt; j++) {
enum ice_flow_priority prio;
u64 entry_h = 0;
@@ -454,7 +449,7 @@ void ice_fdir_replay_flows(struct ice_hw *hw)
prio = ICE_FLOW_PRIO_NORMAL;
err = ice_flow_add_entry(hw, ICE_BLK_FD,
- prof_id,
+ hw_prof->id,
prof->vsi_h[0],
prof->vsi_h[j],
prio, prof->fdir_seg,
@@ -464,6 +459,7 @@ void ice_fdir_replay_flows(struct ice_hw *hw)
flow);
continue;
}
+ prof->prof_id[tun] = hw_prof->id;
prof->entry_h[j][tun] = entry_h;
}
}
@@ -638,7 +634,6 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
u64 entry1_h = 0;
u64 entry2_h = 0;
bool del_last;
- u64 prof_id;
int err;
int idx;
@@ -668,7 +663,7 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
* then return error.
*/
if (hw->fdir_fltr_cnt[flow]) {
- dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n");
+ dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n");
return -EINVAL;
}
@@ -686,23 +681,23 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
* That is the final parameters are 1 header (segment), no
* actions (NULL) and zero actions 0.
*/
- prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
- err = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
- TNL_SEG_CNT(tun), &prof);
+ err = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, seg,
+ TNL_SEG_CNT(tun), false, &prof);
if (err)
return err;
- err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
+ err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, main_vsi->idx,
main_vsi->idx, ICE_FLOW_PRIO_NORMAL,
seg, &entry1_h);
if (err)
goto err_prof;
- err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
+ err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, main_vsi->idx,
ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
seg, &entry2_h);
if (err)
goto err_entry;
hw_prof->fdir_seg[tun] = seg;
+ hw_prof->prof_id[tun] = prof->id;
hw_prof->entry_h[0][tun] = entry1_h;
hw_prof->entry_h[1][tun] = entry2_h;
hw_prof->vsi_h[0] = main_vsi->idx;
@@ -719,7 +714,7 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
entry1_h = 0;
vsi_h = main_vsi->tc_map_vsi[idx]->idx;
- err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id,
+ err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id,
main_vsi->idx, vsi_h,
ICE_FLOW_PRIO_NORMAL, seg,
&entry1_h);
@@ -756,7 +751,7 @@ err_unroll:
if (!hw_prof->entry_h[idx][tun])
continue;
- ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
+ ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof->id);
ice_flow_rem_entry(hw, ICE_BLK_FD, hw_prof->entry_h[idx][tun]);
hw_prof->entry_h[idx][tun] = 0;
if (del_last)
@@ -766,11 +761,11 @@ err_unroll:
hw_prof->cnt = 0;
err_entry:
ice_rem_prof_id_flow(hw, ICE_BLK_FD,
- ice_get_hw_vsi_num(hw, main_vsi->idx), prof_id);
+ ice_get_hw_vsi_num(hw, main_vsi->idx), prof->id);
ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
err_prof:
- ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
- dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n");
+ ice_flow_rem_prof(hw, ICE_BLK_FD, prof->id);
+ dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n");
return err;
}
@@ -1853,6 +1848,7 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
struct ice_pf *pf;
struct ice_hw *hw;
int fltrs_needed;
+ u32 max_location;
u16 tunnel_port;
int ret;
@@ -1884,8 +1880,10 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
if (ret)
return ret;
- if (fsp->location >= ice_get_fdir_cnt_all(hw)) {
- dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n");
+ max_location = ice_get_fdir_cnt_all(hw);
+ if (fsp->location >= max_location) {
+ dev_err(dev, "Failed to add filter. The number of ntuple filters or provided location exceed max %d.\n",
+ max_location);
return -ENOSPC;
}
@@ -1893,7 +1891,7 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port, TNL_ALL) ? 2 : 1;
if (!ice_fdir_find_fltr_by_idx(hw, fsp->location) &&
ice_fdir_num_avail_fltr(hw, pf->vsi[vsi->idx]) < fltrs_needed) {
- dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n");
+ dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n");
return -ENOSPC;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 5ce413965930..85c57ec315c4 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -1218,11 +1218,13 @@ ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks)
* @blk: HW block
* @fv: field vector to search for
* @masks: masks for FV
+ * @symm: symmetric setting for RSS flows
* @prof_id: receives the profile ID
*/
static int
ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
- struct ice_fv_word *fv, u16 *masks, u8 *prof_id)
+ struct ice_fv_word *fv, u16 *masks, bool symm,
+ u8 *prof_id)
{
struct ice_es *es = &hw->blk[blk].es;
u8 i;
@@ -1236,6 +1238,9 @@ ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
for (i = 0; i < (u8)es->count; i++) {
u16 off = i * es->fvw;
+ if (blk == ICE_BLK_RSS && es->symm[i] != symm)
+ continue;
+
if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
continue;
@@ -1716,15 +1721,16 @@ ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id,
}
/**
- * ice_write_es - write an extraction sequence to hardware
+ * ice_write_es - write an extraction sequence and symmetric setting to hardware
* @hw: pointer to the HW struct
* @blk: the block in which to write the extraction sequence
* @prof_id: the profile ID to write
* @fv: pointer to the extraction sequence to write - NULL to clear extraction
+ * @symm: symmetric setting for RSS profiles
*/
static void
ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
- struct ice_fv_word *fv)
+ struct ice_fv_word *fv, bool symm)
{
u16 off;
@@ -1737,6 +1743,9 @@ ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
memcpy(&hw->blk[blk].es.t[off], fv,
hw->blk[blk].es.fvw * sizeof(*fv));
}
+
+ if (blk == ICE_BLK_RSS)
+ hw->blk[blk].es.symm[prof_id] = symm;
}
/**
@@ -1753,7 +1762,7 @@ ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
if (hw->blk[blk].es.ref_count[prof_id] > 0) {
if (!--hw->blk[blk].es.ref_count[prof_id]) {
- ice_write_es(hw, blk, prof_id, NULL);
+ ice_write_es(hw, blk, prof_id, NULL, false);
ice_free_prof_masks(hw, blk, prof_id);
return ice_free_prof_id(hw, blk, prof_id);
}
@@ -2116,8 +2125,10 @@ void ice_free_hw_tbls(struct ice_hw *hw)
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_redir.t);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count);
+ devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.symm);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.mask_ena);
+ devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_id.id);
}
list_for_each_entry_safe(r, rt, &hw->rss_list_head, l_entry) {
@@ -2150,6 +2161,7 @@ void ice_clear_hw_tbls(struct ice_hw *hw)
for (i = 0; i < ICE_BLK_COUNT; i++) {
struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
+ struct ice_prof_id *prof_id = &hw->blk[i].prof_id;
struct ice_prof_tcam *prof = &hw->blk[i].prof;
struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
@@ -2178,8 +2190,11 @@ void ice_clear_hw_tbls(struct ice_hw *hw)
memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw);
memset(es->ref_count, 0, es->count * sizeof(*es->ref_count));
+ memset(es->symm, 0, es->count * sizeof(*es->symm));
memset(es->written, 0, es->count * sizeof(*es->written));
memset(es->mask_ena, 0, es->count * sizeof(*es->mask_ena));
+
+ memset(prof_id->id, 0, prof_id->count * sizeof(*prof_id->id));
}
}
@@ -2196,6 +2211,7 @@ int ice_init_hw_tbls(struct ice_hw *hw)
ice_init_all_prof_masks(hw);
for (i = 0; i < ICE_BLK_COUNT; i++) {
struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
+ struct ice_prof_id *prof_id = &hw->blk[i].prof_id;
struct ice_prof_tcam *prof = &hw->blk[i].prof;
struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
@@ -2292,6 +2308,11 @@ int ice_init_hw_tbls(struct ice_hw *hw)
if (!es->ref_count)
goto err;
+ es->symm = devm_kcalloc(ice_hw_to_dev(hw), es->count,
+ sizeof(*es->symm), GFP_KERNEL);
+ if (!es->symm)
+ goto err;
+
es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count,
sizeof(*es->written), GFP_KERNEL);
if (!es->written)
@@ -2301,6 +2322,12 @@ int ice_init_hw_tbls(struct ice_hw *hw)
sizeof(*es->mask_ena), GFP_KERNEL);
if (!es->mask_ena)
goto err;
+
+ prof_id->count = blk_sizes[i].prof_id;
+ prof_id->id = devm_kcalloc(ice_hw_to_dev(hw), prof_id->count,
+ sizeof(*prof_id->id), GFP_KERNEL);
+ if (!prof_id->id)
+ goto err;
}
return 0;
@@ -2963,6 +2990,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
* @attr_cnt: number of elements in attr array
* @es: extraction sequence (length of array is determined by the block)
* @masks: mask for extraction sequence
+ * @symm: symmetric setting for RSS profiles
*
* This function registers a profile, which matches a set of PTYPES with a
* particular extraction sequence. While the hardware profile is allocated
@@ -2972,7 +3000,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
int
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
const struct ice_ptype_attributes *attr, u16 attr_cnt,
- struct ice_fv_word *es, u16 *masks)
+ struct ice_fv_word *es, u16 *masks, bool symm)
{
u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
@@ -2986,7 +3014,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
mutex_lock(&hw->blk[blk].es.prof_map_lock);
/* search for existing profile */
- status = ice_find_prof_id_with_mask(hw, blk, es, masks, &prof_id);
+ status = ice_find_prof_id_with_mask(hw, blk, es, masks, symm, &prof_id);
if (status) {
/* allocate profile ID */
status = ice_alloc_prof_id(hw, blk, &prof_id);
@@ -3009,7 +3037,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
goto err_ice_add_prof;
/* and write new es */
- ice_write_es(hw, blk, prof_id, es);
+ ice_write_es(hw, blk, prof_id, es, symm);
}
ice_prof_inc_ref(hw, blk, prof_id);
@@ -3097,7 +3125,7 @@ err_ice_add_prof:
* This will search for a profile tracking ID which was previously added.
* The profile map lock should be held before calling this function.
*/
-static struct ice_prof_map *
+struct ice_prof_map *
ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
{
struct ice_prof_map *entry = NULL;
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index 7af7c8e9aa4e..b39d7cdc381f 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -42,7 +42,9 @@ bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype);
int
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
const struct ice_ptype_attributes *attr, u16 attr_cnt,
- struct ice_fv_word *es, u16 *masks);
+ struct ice_fv_word *es, u16 *masks, bool symm);
+struct ice_prof_map *
+ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id);
int
ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
int
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h
index 4f42e14ed3ae..d427a79d001a 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h
@@ -146,6 +146,7 @@ struct ice_es {
u32 *mask_ena;
struct list_head prof_map;
struct ice_fv_word *t;
+ u8 *symm; /* symmetric setting per profile (RSS blk)*/
struct mutex prof_map_lock; /* protect access to profiles list */
u8 *written;
u8 reverse; /* set to true to reverse FV order */
@@ -304,10 +305,16 @@ struct ice_masks {
struct ice_mask masks[ICE_PROF_MASK_COUNT];
};
+struct ice_prof_id {
+ unsigned long *id;
+ int count;
+};
+
/* Tables per block */
struct ice_blk_info {
struct ice_xlt1 xlt1;
struct ice_xlt2 xlt2;
+ struct ice_prof_id prof_id;
struct ice_prof_tcam prof;
struct ice_prof_redir prof_redir;
struct ice_es es;
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index fb8b925aaf8b..fc2b58f56279 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -1235,6 +1235,7 @@ ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
#define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
#define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
#define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
+#define ICE_FLOW_FIND_PROF_CHK_SYMM 0x00000008
/**
* ice_flow_find_prof_conds - Find a profile matching headers and conditions
@@ -1243,13 +1244,14 @@ ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
* @dir: flow direction
* @segs: array of one or more packet segments that describe the flow
* @segs_cnt: number of packet segments provided
+ * @symm: symmetric setting for RSS profiles
* @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
* @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
*/
static struct ice_flow_prof *
ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
- u8 segs_cnt, u16 vsi_handle, u32 conds)
+ u8 segs_cnt, bool symm, u16 vsi_handle, u32 conds)
{
struct ice_flow_prof *p, *prof = NULL;
@@ -1265,6 +1267,11 @@ ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
!test_bit(vsi_handle, p->vsis))
continue;
+ /* Check for symmetric settings */
+ if ((conds & ICE_FLOW_FIND_PROF_CHK_SYMM) &&
+ p->symm != symm)
+ continue;
+
/* Protocol headers must be checked. Matched fields are
* checked if specified.
*/
@@ -1328,26 +1335,33 @@ ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk,
* @hw: pointer to the HW struct
* @blk: classification stage
* @dir: flow direction
- * @prof_id: unique ID to identify this flow profile
* @segs: array of one or more packet segments that describe the flow
* @segs_cnt: number of packet segments provided
+ * @symm: symmetric setting for RSS profiles
* @prof: stores the returned flow profile added
*
* Assumption: the caller has acquired the lock to the profile list
*/
static int
ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
- enum ice_flow_dir dir, u64 prof_id,
+ enum ice_flow_dir dir,
struct ice_flow_seg_info *segs, u8 segs_cnt,
- struct ice_flow_prof **prof)
+ bool symm, struct ice_flow_prof **prof)
{
struct ice_flow_prof_params *params;
+ struct ice_prof_id *ids;
int status;
+ u64 prof_id;
u8 i;
if (!prof)
return -EINVAL;
+ ids = &hw->blk[blk].prof_id;
+ prof_id = find_first_zero_bit(ids->id, ids->count);
+ if (prof_id >= ids->count)
+ return -ENOSPC;
+
params = kzalloc(sizeof(*params), GFP_KERNEL);
if (!params)
return -ENOMEM;
@@ -1369,6 +1383,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
params->prof->id = prof_id;
params->prof->dir = dir;
params->prof->segs_cnt = segs_cnt;
+ params->prof->symm = symm;
/* Make a copy of the segments that need to be persistent in the flow
* profile instance
@@ -1385,7 +1400,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
/* Add a HW profile for this flow profile */
status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
params->attr, params->attr_cnt, params->es,
- params->mask);
+ params->mask, symm);
if (status) {
ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
goto out;
@@ -1393,6 +1408,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
INIT_LIST_HEAD(&params->prof->entries);
mutex_init(&params->prof->entries_lock);
+ set_bit(prof_id, ids->id);
*prof = params->prof;
out:
@@ -1436,6 +1452,7 @@ ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
/* Remove all hardware profiles associated with this flow profile */
status = ice_rem_prof(hw, blk, prof->id);
if (!status) {
+ clear_bit(prof->id, hw->blk[blk].prof_id.id);
list_del(&prof->l_entry);
mutex_destroy(&prof->entries_lock);
devm_kfree(ice_hw_to_dev(hw), prof);
@@ -1511,15 +1528,15 @@ ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
* @hw: pointer to the HW struct
* @blk: classification stage
* @dir: flow direction
- * @prof_id: unique ID to identify this flow profile
* @segs: array of one or more packet segments that describe the flow
* @segs_cnt: number of packet segments provided
+ * @symm: symmetric setting for RSS profiles
* @prof: stores the returned flow profile added
*/
int
ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
- u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
- struct ice_flow_prof **prof)
+ struct ice_flow_seg_info *segs, u8 segs_cnt,
+ bool symm, struct ice_flow_prof **prof)
{
int status;
@@ -1538,8 +1555,8 @@ ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
mutex_lock(&hw->fl_profs_locks[blk]);
- status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
- prof);
+ status = ice_flow_add_prof_sync(hw, blk, dir, segs, segs_cnt,
+ symm, prof);
if (!status)
list_add(&(*prof)->l_entry, &hw->fl_profs[blk]);
@@ -1855,37 +1872,49 @@ int ice_flow_rem_vsi_prof(struct ice_hw *hw, u16 vsi_handle, u64 prof_id)
/**
* ice_flow_set_rss_seg_info - setup packet segments for RSS
* @segs: pointer to the flow field segment(s)
- * @hash_fields: fields to be hashed on for the segment(s)
- * @flow_hdr: protocol header fields within a packet segment
+ * @seg_cnt: segment count
+ * @cfg: configure parameters
*
* Helper function to extract fields from hash bitmap and use flow
* header value to set flow field segment for further use in flow
* profile entry or removal.
*/
static int
-ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
- u32 flow_hdr)
+ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
+ const struct ice_rss_hash_cfg *cfg)
{
+ struct ice_flow_seg_info *seg;
u64 val;
- u8 i;
+ u16 i;
- for_each_set_bit(i, (unsigned long *)&hash_fields,
- ICE_FLOW_FIELD_IDX_MAX)
- ice_flow_set_fld(segs, (enum ice_flow_field)i,
+ /* set inner most segment */
+ seg = &segs[seg_cnt - 1];
+
+ for_each_set_bit(i, (const unsigned long *)&cfg->hash_flds,
+ (u16)ICE_FLOW_FIELD_IDX_MAX)
+ ice_flow_set_fld(seg, (enum ice_flow_field)i,
ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
ICE_FLOW_FLD_OFF_INVAL, false);
- ICE_FLOW_SET_HDRS(segs, flow_hdr);
+ ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs);
+
+ /* set outer most header */
+ if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4)
+ segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER;
+ else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6)
+ segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER;
- if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
+ if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER)
return -EINVAL;
- val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
+ val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
if (val && !is_power_of_2(val))
return -EIO;
- val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
+ val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
if (val && !is_power_of_2(val))
return -EIO;
@@ -1956,6 +1985,39 @@ int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
}
/**
+ * ice_get_rss_hdr_type - get a RSS profile's header type
+ * @prof: RSS flow profile
+ */
+static enum ice_rss_cfg_hdr_type
+ice_get_rss_hdr_type(struct ice_flow_prof *prof)
+{
+ if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) {
+ return ICE_RSS_OUTER_HEADERS;
+ } else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) {
+ const struct ice_flow_seg_info *s;
+
+ s = &prof->segs[ICE_RSS_OUTER_HEADERS];
+ if (s->hdrs == ICE_FLOW_SEG_HDR_NONE)
+ return ICE_RSS_INNER_HEADERS;
+ if (s->hdrs & ICE_FLOW_SEG_HDR_IPV4)
+ return ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
+ if (s->hdrs & ICE_FLOW_SEG_HDR_IPV6)
+ return ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
+ }
+
+ return ICE_RSS_ANY_HEADERS;
+}
+
+static bool
+ice_rss_match_prof(struct ice_rss_cfg *r, struct ice_flow_prof *prof,
+ enum ice_rss_cfg_hdr_type hdr_type)
+{
+ return (r->hash.hdr_type == hdr_type &&
+ r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
+ r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs);
+}
+
+/**
* ice_rem_rss_list - remove RSS configuration from list
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
@@ -1966,15 +2028,16 @@ int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
static void
ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
{
+ enum ice_rss_cfg_hdr_type hdr_type;
struct ice_rss_cfg *r, *tmp;
/* Search for RSS hash fields associated to the VSI that match the
* hash configurations associated to the flow profile. If found
* remove from the RSS entry list of the VSI context and delete entry.
*/
+ hdr_type = ice_get_rss_hdr_type(prof);
list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry)
- if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
- r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
+ if (ice_rss_match_prof(r, prof, hdr_type)) {
clear_bit(vsi_handle, r->vsis);
if (bitmap_empty(r->vsis, ICE_MAX_VSI)) {
list_del(&r->l_entry);
@@ -1995,11 +2058,12 @@ ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
static int
ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
{
+ enum ice_rss_cfg_hdr_type hdr_type;
struct ice_rss_cfg *r, *rss_cfg;
+ hdr_type = ice_get_rss_hdr_type(prof);
list_for_each_entry(r, &hw->rss_list_head, l_entry)
- if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
- r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
+ if (ice_rss_match_prof(r, prof, hdr_type)) {
set_bit(vsi_handle, r->vsis);
return 0;
}
@@ -2009,8 +2073,10 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
if (!rss_cfg)
return -ENOMEM;
- rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
- rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
+ rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match;
+ rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs;
+ rss_cfg->hash.hdr_type = hdr_type;
+ rss_cfg->hash.symm = prof->symm;
set_bit(vsi_handle, rss_cfg->vsis);
list_add_tail(&rss_cfg->l_entry, &hw->rss_list_head);
@@ -2018,65 +2084,177 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
return 0;
}
-#define ICE_FLOW_PROF_HASH_S 0
-#define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
-#define ICE_FLOW_PROF_HDR_S 32
-#define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
-#define ICE_FLOW_PROF_ENCAP_S 63
-#define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
+/**
+ * ice_rss_config_xor_word - set the HSYMM registers for one input set word
+ * @hw: pointer to the hardware structure
+ * @prof_id: RSS hardware profile id
+ * @src: the FV index used by the protocol's source field
+ * @dst: the FV index used by the protocol's destination field
+ *
+ * Write to the HSYMM register with the index of @src FV the value of the @dst
+ * FV index. This will tell the hardware to XOR HSYMM[src] with INSET[dst]
+ * while calculating the RSS input set.
+ */
+static void
+ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
+{
+ u32 val, reg, bits_shift;
+ u8 reg_idx;
+
+ reg_idx = src / GLQF_HSYMM_REG_SIZE;
+ bits_shift = ((src % GLQF_HSYMM_REG_SIZE) << 3);
+ val = dst | GLQF_HSYMM_ENABLE_BIT;
+
+ reg = rd32(hw, GLQF_HSYMM(prof_id, reg_idx));
+ reg = (reg & ~(0xff << bits_shift)) | (val << bits_shift);
+ wr32(hw, GLQF_HSYMM(prof_id, reg_idx), reg);
+}
-#define ICE_RSS_OUTER_HEADERS 1
-#define ICE_RSS_INNER_HEADERS 2
+/**
+ * ice_rss_config_xor - set the symmetric registers for a profile's protocol
+ * @hw: pointer to the hardware structure
+ * @prof_id: RSS hardware profile id
+ * @src: the FV index used by the protocol's source field
+ * @dst: the FV index used by the protocol's destination field
+ * @len: length of the source/destination fields in words
+ */
+static void
+ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
+{
+ int fv_last_word =
+ ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ ice_rss_config_xor_word(hw, prof_id,
+ /* Yes, field vector in GLQF_HSYMM and
+ * GLQF_HINSET is inversed!
+ */
+ fv_last_word - (src + i),
+ fv_last_word - (dst + i));
+ ice_rss_config_xor_word(hw, prof_id,
+ fv_last_word - (dst + i),
+ fv_last_word - (src + i));
+ }
+}
-/* Flow profile ID format:
- * [0:31] - Packet match fields
- * [32:62] - Protocol header
- * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
+/**
+ * ice_rss_set_symm - set the symmetric settings for an RSS profile
+ * @hw: pointer to the hardware structure
+ * @prof: pointer to flow profile
+ *
+ * The symmetric hash will result from XORing the protocol's fields with
+ * indexes in GLQF_HSYMM and GLQF_HINSET. This function configures the profile's
+ * GLQF_HSYMM registers.
*/
-#define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
- ((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
- (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
- ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0)))
+static void ice_rss_set_symm(struct ice_hw *hw, struct ice_flow_prof *prof)
+{
+ struct ice_prof_map *map;
+ u8 prof_id, m;
+
+ mutex_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
+ map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
+ if (map)
+ prof_id = map->prof_id;
+ mutex_unlock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
+
+ if (!map)
+ return;
+
+ /* clear to default */
+ for (m = 0; m < GLQF_HSYMM_REG_PER_PROF; m++)
+ wr32(hw, GLQF_HSYMM(prof_id, m), 0);
+
+ if (prof->symm) {
+ struct ice_flow_seg_xtrct *ipv4_src, *ipv4_dst;
+ struct ice_flow_seg_xtrct *ipv6_src, *ipv6_dst;
+ struct ice_flow_seg_xtrct *sctp_src, *sctp_dst;
+ struct ice_flow_seg_xtrct *tcp_src, *tcp_dst;
+ struct ice_flow_seg_xtrct *udp_src, *udp_dst;
+ struct ice_flow_seg_info *seg;
+
+ seg = &prof->segs[prof->segs_cnt - 1];
+
+ ipv4_src = &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
+ ipv4_dst = &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
+
+ ipv6_src = &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
+ ipv6_dst = &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
+
+ tcp_src = &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
+ tcp_dst = &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
+
+ udp_src = &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
+ udp_dst = &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
+
+ sctp_src = &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
+ sctp_dst = &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
+
+ /* xor IPv4 */
+ if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
+ ice_rss_config_xor(hw, prof_id,
+ ipv4_src->idx, ipv4_dst->idx, 2);
+
+ /* xor IPv6 */
+ if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
+ ice_rss_config_xor(hw, prof_id,
+ ipv6_src->idx, ipv6_dst->idx, 8);
+
+ /* xor TCP */
+ if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
+ ice_rss_config_xor(hw, prof_id,
+ tcp_src->idx, tcp_dst->idx, 1);
+
+ /* xor UDP */
+ if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
+ ice_rss_config_xor(hw, prof_id,
+ udp_src->idx, udp_dst->idx, 1);
+
+ /* xor SCTP */
+ if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
+ ice_rss_config_xor(hw, prof_id,
+ sctp_src->idx, sctp_dst->idx, 1);
+ }
+}
/**
* ice_add_rss_cfg_sync - add an RSS configuration
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
- * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
- * @addl_hdrs: protocol header fields
- * @segs_cnt: packet segment count
+ * @cfg: configure parameters
*
* Assumption: lock has already been acquired for RSS list
*/
static int
-ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
- u32 addl_hdrs, u8 segs_cnt)
+ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
+ const struct ice_rss_hash_cfg *cfg)
{
const enum ice_block blk = ICE_BLK_RSS;
struct ice_flow_prof *prof = NULL;
struct ice_flow_seg_info *segs;
+ u8 segs_cnt;
int status;
- if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
- return -EINVAL;
+ segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
+ ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
if (!segs)
return -ENOMEM;
/* Construct the packet segment info from the hashed fields */
- status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
- addl_hdrs);
+ status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
if (status)
goto exit;
- /* Search for a flow profile that has matching headers, hash fields
- * and has the input VSI associated to it. If found, no further
+ /* Search for a flow profile that has matching headers, hash fields,
+ * symm and has the input VSI associated to it. If found, no further
* operations required and exit.
*/
prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
- vsi_handle,
+ cfg->symm, vsi_handle,
ICE_FLOW_FIND_PROF_CHK_FLDS |
+ ICE_FLOW_FIND_PROF_CHK_SYMM |
ICE_FLOW_FIND_PROF_CHK_VSI);
if (prof)
goto exit;
@@ -2087,7 +2265,8 @@ ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
* the protocol header and new hash field configuration.
*/
prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
- vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
+ cfg->symm, vsi_handle,
+ ICE_FLOW_FIND_PROF_CHK_VSI);
if (prof) {
status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
if (!status)
@@ -2103,11 +2282,12 @@ ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
}
}
- /* Search for a profile that has same match fields only. If this
- * exists then associate the VSI to this profile.
+ /* Search for a profile that has the same match fields and symmetric
+ * setting. If this exists then associate the VSI to this profile.
*/
prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
- vsi_handle,
+ cfg->symm, vsi_handle,
+ ICE_FLOW_FIND_PROF_CHK_SYMM |
ICE_FLOW_FIND_PROF_CHK_FLDS);
if (prof) {
status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
@@ -2116,17 +2296,14 @@ ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
goto exit;
}
- /* Create a new flow profile with generated profile and packet
- * segment information.
- */
+ /* Create a new flow profile with packet segment information. */
status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
- ICE_FLOW_GEN_PROFID(hashed_flds,
- segs[segs_cnt - 1].hdrs,
- segs_cnt),
- segs, segs_cnt, &prof);
+ segs, segs_cnt, cfg->symm, &prof);
if (status)
goto exit;
+ prof->symm = cfg->symm;
+ ice_rss_set_symm(hw, prof);
status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
/* If association to a new flow profile failed then this profile can
* be removed.
@@ -2146,30 +2323,43 @@ exit:
/**
* ice_add_rss_cfg - add an RSS configuration with specified hashed fields
* @hw: pointer to the hardware structure
- * @vsi_handle: software VSI handle
- * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
- * @addl_hdrs: protocol header fields
+ * @vsi: VSI to add the RSS configuration to
+ * @cfg: configure parameters
*
* This function will generate a flow profile based on fields associated with
* the input fields to hash on, the flow type and use the VSI number to add
* a flow entry to the profile.
*/
int
-ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
- u32 addl_hdrs)
+ice_add_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi,
+ const struct ice_rss_hash_cfg *cfg)
{
+ struct ice_rss_hash_cfg local_cfg;
+ u16 vsi_handle;
int status;
- if (hashed_flds == ICE_HASH_INVALID ||
- !ice_is_vsi_valid(hw, vsi_handle))
+ if (!vsi)
+ return -EINVAL;
+
+ vsi_handle = vsi->idx;
+ if (!ice_is_vsi_valid(hw, vsi_handle) ||
+ !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
+ cfg->hash_flds == ICE_HASH_INVALID)
return -EINVAL;
mutex_lock(&hw->rss_locks);
- status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
- ICE_RSS_OUTER_HEADERS);
- if (!status)
- status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
- addl_hdrs, ICE_RSS_INNER_HEADERS);
+ local_cfg = *cfg;
+ if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
+ status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
+ } else {
+ local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
+ status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
+ if (!status) {
+ local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
+ status = ice_add_rss_cfg_sync(hw, vsi_handle,
+ &local_cfg);
+ }
+ }
mutex_unlock(&hw->rss_locks);
return status;
@@ -2179,33 +2369,33 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
* ice_rem_rss_cfg_sync - remove an existing RSS configuration
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
- * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
- * @addl_hdrs: Protocol header fields within a packet segment
- * @segs_cnt: packet segment count
+ * @cfg: configure parameters
*
* Assumption: lock has already been acquired for RSS list
*/
static int
-ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
- u32 addl_hdrs, u8 segs_cnt)
+ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
+ const struct ice_rss_hash_cfg *cfg)
{
const enum ice_block blk = ICE_BLK_RSS;
struct ice_flow_seg_info *segs;
struct ice_flow_prof *prof;
+ u8 segs_cnt;
int status;
+ segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
+ ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
if (!segs)
return -ENOMEM;
/* Construct the packet segment info from the hashed fields */
- status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
- addl_hdrs);
+ status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
if (status)
goto out;
prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
- vsi_handle,
+ cfg->symm, vsi_handle,
ICE_FLOW_FIND_PROF_CHK_FLDS);
if (!prof) {
status = -ENOENT;
@@ -2233,31 +2423,39 @@ out:
* ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
- * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
- * @addl_hdrs: Protocol header fields within a packet segment
+ * @cfg: configure parameters
*
* This function will lookup the flow profile based on the input
* hash field bitmap, iterate through the profile entry list of
* that profile and find entry associated with input VSI to be
- * removed. Calls are made to underlying flow s which will APIs
+ * removed. Calls are made to underlying flow apis which will in
* turn build or update buffers for RSS XLT1 section.
*/
-int __maybe_unused
-ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
- u32 addl_hdrs)
+int
+ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
+ const struct ice_rss_hash_cfg *cfg)
{
+ struct ice_rss_hash_cfg local_cfg;
int status;
- if (hashed_flds == ICE_HASH_INVALID ||
- !ice_is_vsi_valid(hw, vsi_handle))
+ if (!ice_is_vsi_valid(hw, vsi_handle) ||
+ !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
+ cfg->hash_flds == ICE_HASH_INVALID)
return -EINVAL;
mutex_lock(&hw->rss_locks);
- status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
- ICE_RSS_OUTER_HEADERS);
- if (!status)
- status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
- addl_hdrs, ICE_RSS_INNER_HEADERS);
+ local_cfg = *cfg;
+ if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
+ status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
+ } else {
+ local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
+ status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
+ if (!status) {
+ local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
+ status = ice_rem_rss_cfg_sync(hw, vsi_handle,
+ &local_cfg);
+ }
+ }
mutex_unlock(&hw->rss_locks);
return status;
@@ -2298,18 +2496,24 @@ ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
/**
* ice_add_avf_rss_cfg - add an RSS configuration for AVF driver
* @hw: pointer to the hardware structure
- * @vsi_handle: software VSI handle
+ * @vsi: VF's VSI
* @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure
*
* This function will take the hash bitmap provided by the AVF driver via a
* message, convert it to ICE-compatible values, and configure RSS flow
* profiles.
*/
-int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
+int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, u64 avf_hash)
{
+ struct ice_rss_hash_cfg hcfg;
+ u16 vsi_handle;
int status = 0;
u64 hash_flds;
+ if (!vsi)
+ return -EINVAL;
+
+ vsi_handle = vsi->idx;
if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
!ice_is_vsi_valid(hw, vsi_handle))
return -EINVAL;
@@ -2379,8 +2583,11 @@ int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
if (rss_hash == ICE_HASH_INVALID)
return -EIO;
- status = ice_add_rss_cfg(hw, vsi_handle, rss_hash,
- ICE_FLOW_SEG_HDR_NONE);
+ hcfg.addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
+ hcfg.hash_flds = rss_hash;
+ hcfg.hdr_type = ICE_RSS_ANY_HEADERS;
+ hcfg.symm = false;
+ status = ice_add_rss_cfg(hw, vsi, &hcfg);
if (status)
break;
}
@@ -2388,6 +2595,54 @@ int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
return status;
}
+static bool rss_cfg_symm_valid(u64 hfld)
+{
+ return !((!!(hfld & ICE_FLOW_HASH_FLD_IPV4_SA) ^
+ !!(hfld & ICE_FLOW_HASH_FLD_IPV4_DA)) ||
+ (!!(hfld & ICE_FLOW_HASH_FLD_IPV6_SA) ^
+ !!(hfld & ICE_FLOW_HASH_FLD_IPV6_DA)) ||
+ (!!(hfld & ICE_FLOW_HASH_FLD_TCP_SRC_PORT) ^
+ !!(hfld & ICE_FLOW_HASH_FLD_TCP_DST_PORT)) ||
+ (!!(hfld & ICE_FLOW_HASH_FLD_UDP_SRC_PORT) ^
+ !!(hfld & ICE_FLOW_HASH_FLD_UDP_DST_PORT)) ||
+ (!!(hfld & ICE_FLOW_HASH_FLD_SCTP_SRC_PORT) ^
+ !!(hfld & ICE_FLOW_HASH_FLD_SCTP_DST_PORT)));
+}
+
+/**
+ * ice_set_rss_cfg_symm - set symmtery for all VSI's RSS configurations
+ * @hw: pointer to the hardware structure
+ * @vsi: VSI to set/unset Symmetric RSS
+ * @symm: TRUE to set Symmetric RSS hashing
+ */
+int ice_set_rss_cfg_symm(struct ice_hw *hw, struct ice_vsi *vsi, bool symm)
+{
+ struct ice_rss_hash_cfg local;
+ struct ice_rss_cfg *r, *tmp;
+ u16 vsi_handle = vsi->idx;
+ int status = 0;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return -EINVAL;
+
+ mutex_lock(&hw->rss_locks);
+ list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry) {
+ if (test_bit(vsi_handle, r->vsis) && r->hash.symm != symm) {
+ local = r->hash;
+ local.symm = symm;
+ if (symm && !rss_cfg_symm_valid(r->hash.hash_flds))
+ continue;
+
+ status = ice_add_rss_cfg_sync(hw, vsi_handle, &local);
+ if (status)
+ break;
+ }
+ }
+ mutex_unlock(&hw->rss_locks);
+
+ return status;
+}
+
/**
* ice_replay_rss_cfg - replay RSS configurations associated with VSI
* @hw: pointer to the hardware structure
@@ -2404,16 +2659,7 @@ int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
mutex_lock(&hw->rss_locks);
list_for_each_entry(r, &hw->rss_list_head, l_entry) {
if (test_bit(vsi_handle, r->vsis)) {
- status = ice_add_rss_cfg_sync(hw, vsi_handle,
- r->hashed_flds,
- r->packet_hdr,
- ICE_RSS_OUTER_HEADERS);
- if (status)
- break;
- status = ice_add_rss_cfg_sync(hw, vsi_handle,
- r->hashed_flds,
- r->packet_hdr,
- ICE_RSS_INNER_HEADERS);
+ status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash);
if (status)
break;
}
@@ -2428,11 +2674,12 @@ int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
* @hdrs: protocol header type
+ * @symm: whether the RSS is symmetric (bool, output)
*
* This function will return the match fields of the first instance of flow
* profile having the given header types and containing input VSI
*/
-u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
+u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs, bool *symm)
{
u64 rss_hash = ICE_HASH_INVALID;
struct ice_rss_cfg *r;
@@ -2444,8 +2691,9 @@ u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
mutex_lock(&hw->rss_locks);
list_for_each_entry(r, &hw->rss_list_head, l_entry)
if (test_bit(vsi_handle, r->vsis) &&
- r->packet_hdr == hdrs) {
- rss_hash = r->hashed_flds;
+ r->hash.addl_hdrs == hdrs) {
+ rss_hash = r->hash.hash_flds;
+ *symm = r->hash.symm;
break;
}
mutex_unlock(&hw->rss_locks);
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h
index 96923ef0a5a8..ff82915ab497 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.h
+++ b/drivers/net/ethernet/intel/ice/ice_flow.h
@@ -34,6 +34,8 @@
#define ICE_HASH_TCP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_TCP_PORT)
#define ICE_HASH_UDP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT)
#define ICE_HASH_UDP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT)
+#define ICE_HASH_SCTP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_SCTP_PORT)
+#define ICE_HASH_SCTP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_SCTP_PORT)
#define ICE_FLOW_HASH_GTP_TEID \
(BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID))
@@ -227,6 +229,19 @@ enum ice_flow_field {
ICE_FLOW_FIELD_IDX_MAX
};
+#define ICE_FLOW_HASH_FLD_IPV4_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)
+#define ICE_FLOW_HASH_FLD_IPV6_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)
+#define ICE_FLOW_HASH_FLD_IPV4_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)
+#define ICE_FLOW_HASH_FLD_IPV6_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)
+#define ICE_FLOW_HASH_FLD_TCP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)
+#define ICE_FLOW_HASH_FLD_TCP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)
+#define ICE_FLOW_HASH_FLD_UDP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)
+#define ICE_FLOW_HASH_FLD_UDP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)
+#define ICE_FLOW_HASH_FLD_SCTP_SRC_PORT \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)
+#define ICE_FLOW_HASH_FLD_SCTP_DST_PORT \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)
+
/* Flow headers and fields for AVF support */
enum ice_flow_avf_hdr_field {
/* Values 0 - 28 are reserved for future use */
@@ -279,6 +294,24 @@ enum ice_flow_avf_hdr_field {
BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP))
+enum ice_rss_cfg_hdr_type {
+ ICE_RSS_OUTER_HEADERS, /* take outer headers as inputset. */
+ ICE_RSS_INNER_HEADERS, /* take inner headers as inputset. */
+ /* take inner headers as inputset for packet with outer ipv4. */
+ ICE_RSS_INNER_HEADERS_W_OUTER_IPV4,
+ /* take inner headers as inputset for packet with outer ipv6. */
+ ICE_RSS_INNER_HEADERS_W_OUTER_IPV6,
+ /* take outer headers first then inner headers as inputset */
+ ICE_RSS_ANY_HEADERS
+};
+
+struct ice_rss_hash_cfg {
+ u32 addl_hdrs; /* protocol header fields */
+ u64 hash_flds; /* hash bit field (ICE_FLOW_HASH_*) to configure */
+ enum ice_rss_cfg_hdr_type hdr_type; /* to specify inner or outer */
+ bool symm; /* symmetric or asymmetric hash */
+};
+
enum ice_flow_dir {
ICE_FLOW_RX = 0x02,
};
@@ -289,8 +322,10 @@ enum ice_flow_priority {
ICE_FLOW_PRIO_HIGH
};
+#define ICE_FLOW_SEG_SINGLE 1
#define ICE_FLOW_SEG_MAX 2
#define ICE_FLOW_SEG_RAW_FLD_MAX 2
+#define ICE_FLOW_SW_FIELD_VECTOR_MAX 48
#define ICE_FLOW_FV_EXTRACT_SZ 2
#define ICE_FLOW_SET_HDRS(seg, val) ((seg)->hdrs |= (u32)(val))
@@ -372,20 +407,21 @@ struct ice_flow_prof {
/* software VSI handles referenced by this flow profile */
DECLARE_BITMAP(vsis, ICE_MAX_VSI);
+
+ bool symm; /* Symmetric Hash for RSS */
};
struct ice_rss_cfg {
struct list_head l_entry;
/* bitmap of VSIs added to the RSS entry */
DECLARE_BITMAP(vsis, ICE_MAX_VSI);
- u64 hashed_flds;
- u32 packet_hdr;
+ struct ice_rss_hash_cfg hash;
};
int
ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
- u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
- struct ice_flow_prof **prof);
+ struct ice_flow_seg_info *segs, u8 segs_cnt,
+ bool symm, struct ice_flow_prof **prof);
int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
int
ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
@@ -401,13 +437,13 @@ ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
int ice_flow_rem_vsi_prof(struct ice_hw *hw, u16 vsi_handle, u64 prof_id);
void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle);
int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
-int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds);
+int ice_set_rss_cfg_symm(struct ice_hw *hw, struct ice_vsi *vsi, bool symm);
+int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi,
+ u64 hashed_flds);
int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
-int
-ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
- u32 addl_hdrs);
-int
-ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
- u32 addl_hdrs);
-u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs);
+int ice_add_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi,
+ const struct ice_rss_hash_cfg *cfg);
+int ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
+ const struct ice_rss_hash_cfg *cfg);
+u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs, bool *symm);
#endif /* _ICE_FLOW_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_fwlog.c b/drivers/net/ethernet/intel/ice/ice_fwlog.c
new file mode 100644
index 000000000000..92b5dac481cd
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_fwlog.c
@@ -0,0 +1,470 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022, Intel Corporation. */
+
+#include <linux/vmalloc.h>
+#include "ice.h"
+#include "ice_common.h"
+#include "ice_fwlog.h"
+
+bool ice_fwlog_ring_full(struct ice_fwlog_ring *rings)
+{
+ u16 head, tail;
+
+ head = rings->head;
+ tail = rings->tail;
+
+ if (head < tail && (tail - head == (rings->size - 1)))
+ return true;
+ else if (head > tail && (tail == (head - 1)))
+ return true;
+
+ return false;
+}
+
+bool ice_fwlog_ring_empty(struct ice_fwlog_ring *rings)
+{
+ return rings->head == rings->tail;
+}
+
+void ice_fwlog_ring_increment(u16 *item, u16 size)
+{
+ *item = (*item + 1) & (size - 1);
+}
+
+static int ice_fwlog_alloc_ring_buffs(struct ice_fwlog_ring *rings)
+{
+ int i, nr_bytes;
+ u8 *mem;
+
+ nr_bytes = rings->size * ICE_AQ_MAX_BUF_LEN;
+ mem = vzalloc(nr_bytes);
+ if (!mem)
+ return -ENOMEM;
+
+ for (i = 0; i < rings->size; i++) {
+ struct ice_fwlog_data *ring = &rings->rings[i];
+
+ ring->data_size = ICE_AQ_MAX_BUF_LEN;
+ ring->data = mem;
+ mem += ICE_AQ_MAX_BUF_LEN;
+ }
+
+ return 0;
+}
+
+static void ice_fwlog_free_ring_buffs(struct ice_fwlog_ring *rings)
+{
+ int i;
+
+ for (i = 0; i < rings->size; i++) {
+ struct ice_fwlog_data *ring = &rings->rings[i];
+
+ /* the first ring is the base memory for the whole range so
+ * free it
+ */
+ if (!i)
+ vfree(ring->data);
+
+ ring->data = NULL;
+ ring->data_size = 0;
+ }
+}
+
+#define ICE_FWLOG_INDEX_TO_BYTES(n) ((128 * 1024) << (n))
+/**
+ * ice_fwlog_realloc_rings - reallocate the FW log rings
+ * @hw: pointer to the HW structure
+ * @index: the new index to use to allocate memory for the log data
+ *
+ */
+void ice_fwlog_realloc_rings(struct ice_hw *hw, int index)
+{
+ struct ice_fwlog_ring ring;
+ int status, ring_size;
+
+ /* convert the number of bytes into a number of 4K buffers. externally
+ * the driver presents the interface to the FW log data as a number of
+ * bytes because that's easy for users to understand. internally the
+ * driver uses a ring of buffers because the driver doesn't know where
+ * the beginning and end of any line of log data is so the driver has
+ * to overwrite data as complete blocks. when the data is returned to
+ * the user the driver knows that the data is correct and the FW log
+ * can be correctly parsed by the tools
+ */
+ ring_size = ICE_FWLOG_INDEX_TO_BYTES(index) / ICE_AQ_MAX_BUF_LEN;
+ if (ring_size == hw->fwlog_ring.size)
+ return;
+
+ /* allocate space for the new rings and buffers then release the
+ * old rings and buffers. that way if we don't have enough
+ * memory then we at least have what we had before
+ */
+ ring.rings = kcalloc(ring_size, sizeof(*ring.rings), GFP_KERNEL);
+ if (!ring.rings)
+ return;
+
+ ring.size = ring_size;
+
+ status = ice_fwlog_alloc_ring_buffs(&ring);
+ if (status) {
+ dev_warn(ice_hw_to_dev(hw), "Unable to allocate memory for FW log ring data buffers\n");
+ ice_fwlog_free_ring_buffs(&ring);
+ kfree(ring.rings);
+ return;
+ }
+
+ ice_fwlog_free_ring_buffs(&hw->fwlog_ring);
+ kfree(hw->fwlog_ring.rings);
+
+ hw->fwlog_ring.rings = ring.rings;
+ hw->fwlog_ring.size = ring.size;
+ hw->fwlog_ring.index = index;
+ hw->fwlog_ring.head = 0;
+ hw->fwlog_ring.tail = 0;
+}
+
+/**
+ * ice_fwlog_init - Initialize FW logging configuration
+ * @hw: pointer to the HW structure
+ *
+ * This function should be called on driver initialization during
+ * ice_init_hw().
+ */
+int ice_fwlog_init(struct ice_hw *hw)
+{
+ /* only support fw log commands on PF 0 */
+ if (hw->bus.func)
+ return -EINVAL;
+
+ ice_fwlog_set_supported(hw);
+
+ if (ice_fwlog_supported(hw)) {
+ int status;
+
+ /* read the current config from the FW and store it */
+ status = ice_fwlog_get(hw, &hw->fwlog_cfg);
+ if (status)
+ return status;
+
+ hw->fwlog_ring.rings = kcalloc(ICE_FWLOG_RING_SIZE_DFLT,
+ sizeof(*hw->fwlog_ring.rings),
+ GFP_KERNEL);
+ if (!hw->fwlog_ring.rings) {
+ dev_warn(ice_hw_to_dev(hw), "Unable to allocate memory for FW log rings\n");
+ return -ENOMEM;
+ }
+
+ hw->fwlog_ring.size = ICE_FWLOG_RING_SIZE_DFLT;
+ hw->fwlog_ring.index = ICE_FWLOG_RING_SIZE_INDEX_DFLT;
+
+ status = ice_fwlog_alloc_ring_buffs(&hw->fwlog_ring);
+ if (status) {
+ dev_warn(ice_hw_to_dev(hw), "Unable to allocate memory for FW log ring data buffers\n");
+ ice_fwlog_free_ring_buffs(&hw->fwlog_ring);
+ kfree(hw->fwlog_ring.rings);
+ return status;
+ }
+
+ ice_debugfs_fwlog_init(hw->back);
+ } else {
+ dev_warn(ice_hw_to_dev(hw), "FW logging is not supported in this NVM image. Please update the NVM to get FW log support\n");
+ }
+
+ return 0;
+}
+
+/**
+ * ice_fwlog_deinit - unroll FW logging configuration
+ * @hw: pointer to the HW structure
+ *
+ * This function should be called in ice_deinit_hw().
+ */
+void ice_fwlog_deinit(struct ice_hw *hw)
+{
+ struct ice_pf *pf = hw->back;
+ int status;
+
+ /* only support fw log commands on PF 0 */
+ if (hw->bus.func)
+ return;
+
+ /* make sure FW logging is disabled to not put the FW in a weird state
+ * for the next driver load
+ */
+ hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_ARQ_ENA;
+ status = ice_fwlog_set(hw, &hw->fwlog_cfg);
+ if (status)
+ dev_warn(ice_hw_to_dev(hw), "Unable to turn off FW logging, status: %d\n",
+ status);
+
+ kfree(pf->ice_debugfs_pf_fwlog_modules);
+
+ pf->ice_debugfs_pf_fwlog_modules = NULL;
+
+ status = ice_fwlog_unregister(hw);
+ if (status)
+ dev_warn(ice_hw_to_dev(hw), "Unable to unregister FW logging, status: %d\n",
+ status);
+
+ if (hw->fwlog_ring.rings) {
+ ice_fwlog_free_ring_buffs(&hw->fwlog_ring);
+ kfree(hw->fwlog_ring.rings);
+ }
+}
+
+/**
+ * ice_fwlog_supported - Cached for whether FW supports FW logging or not
+ * @hw: pointer to the HW structure
+ *
+ * This will always return false if called before ice_init_hw(), so it must be
+ * called after ice_init_hw().
+ */
+bool ice_fwlog_supported(struct ice_hw *hw)
+{
+ return hw->fwlog_supported;
+}
+
+/**
+ * ice_aq_fwlog_set - Set FW logging configuration AQ command (0xFF30)
+ * @hw: pointer to the HW structure
+ * @entries: entries to configure
+ * @num_entries: number of @entries
+ * @options: options from ice_fwlog_cfg->options structure
+ * @log_resolution: logging resolution
+ */
+static int
+ice_aq_fwlog_set(struct ice_hw *hw, struct ice_fwlog_module_entry *entries,
+ u16 num_entries, u16 options, u16 log_resolution)
+{
+ struct ice_aqc_fw_log_cfg_resp *fw_modules;
+ struct ice_aqc_fw_log *cmd;
+ struct ice_aq_desc desc;
+ int status;
+ int i;
+
+ fw_modules = kcalloc(num_entries, sizeof(*fw_modules), GFP_KERNEL);
+ if (!fw_modules)
+ return -ENOMEM;
+
+ for (i = 0; i < num_entries; i++) {
+ fw_modules[i].module_identifier =
+ cpu_to_le16(entries[i].module_id);
+ fw_modules[i].log_level = entries[i].log_level;
+ }
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_config);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ cmd = &desc.params.fw_log;
+
+ cmd->cmd_flags = ICE_AQC_FW_LOG_CONF_SET_VALID;
+ cmd->ops.cfg.log_resolution = cpu_to_le16(log_resolution);
+ cmd->ops.cfg.mdl_cnt = cpu_to_le16(num_entries);
+
+ if (options & ICE_FWLOG_OPTION_ARQ_ENA)
+ cmd->cmd_flags |= ICE_AQC_FW_LOG_CONF_AQ_EN;
+ if (options & ICE_FWLOG_OPTION_UART_ENA)
+ cmd->cmd_flags |= ICE_AQC_FW_LOG_CONF_UART_EN;
+
+ status = ice_aq_send_cmd(hw, &desc, fw_modules,
+ sizeof(*fw_modules) * num_entries,
+ NULL);
+
+ kfree(fw_modules);
+
+ return status;
+}
+
+/**
+ * ice_fwlog_set - Set the firmware logging settings
+ * @hw: pointer to the HW structure
+ * @cfg: config used to set firmware logging
+ *
+ * This function should be called whenever the driver needs to set the firmware
+ * logging configuration. It can be called on initialization, reset, or during
+ * runtime.
+ *
+ * If the PF wishes to receive FW logging then it must register via
+ * ice_fwlog_register. Note, that ice_fwlog_register does not need to be called
+ * for init.
+ */
+int ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
+{
+ if (!ice_fwlog_supported(hw))
+ return -EOPNOTSUPP;
+
+ return ice_aq_fwlog_set(hw, cfg->module_entries,
+ ICE_AQC_FW_LOG_ID_MAX, cfg->options,
+ cfg->log_resolution);
+}
+
+/**
+ * ice_aq_fwlog_get - Get the current firmware logging configuration (0xFF32)
+ * @hw: pointer to the HW structure
+ * @cfg: firmware logging configuration to populate
+ */
+static int ice_aq_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
+{
+ struct ice_aqc_fw_log_cfg_resp *fw_modules;
+ struct ice_aqc_fw_log *cmd;
+ struct ice_aq_desc desc;
+ u16 module_id_cnt;
+ int status;
+ void *buf;
+ int i;
+
+ memset(cfg, 0, sizeof(*cfg));
+
+ buf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_query);
+ cmd = &desc.params.fw_log;
+
+ cmd->cmd_flags = ICE_AQC_FW_LOG_AQ_QUERY;
+
+ status = ice_aq_send_cmd(hw, &desc, buf, ICE_AQ_MAX_BUF_LEN, NULL);
+ if (status) {
+ ice_debug(hw, ICE_DBG_FW_LOG, "Failed to get FW log configuration\n");
+ goto status_out;
+ }
+
+ module_id_cnt = le16_to_cpu(cmd->ops.cfg.mdl_cnt);
+ if (module_id_cnt < ICE_AQC_FW_LOG_ID_MAX) {
+ ice_debug(hw, ICE_DBG_FW_LOG, "FW returned less than the expected number of FW log module IDs\n");
+ } else if (module_id_cnt > ICE_AQC_FW_LOG_ID_MAX) {
+ ice_debug(hw, ICE_DBG_FW_LOG, "FW returned more than expected number of FW log module IDs, setting module_id_cnt to software expected max %u\n",
+ ICE_AQC_FW_LOG_ID_MAX);
+ module_id_cnt = ICE_AQC_FW_LOG_ID_MAX;
+ }
+
+ cfg->log_resolution = le16_to_cpu(cmd->ops.cfg.log_resolution);
+ if (cmd->cmd_flags & ICE_AQC_FW_LOG_CONF_AQ_EN)
+ cfg->options |= ICE_FWLOG_OPTION_ARQ_ENA;
+ if (cmd->cmd_flags & ICE_AQC_FW_LOG_CONF_UART_EN)
+ cfg->options |= ICE_FWLOG_OPTION_UART_ENA;
+ if (cmd->cmd_flags & ICE_AQC_FW_LOG_QUERY_REGISTERED)
+ cfg->options |= ICE_FWLOG_OPTION_IS_REGISTERED;
+
+ fw_modules = (struct ice_aqc_fw_log_cfg_resp *)buf;
+
+ for (i = 0; i < module_id_cnt; i++) {
+ struct ice_aqc_fw_log_cfg_resp *fw_module = &fw_modules[i];
+
+ cfg->module_entries[i].module_id =
+ le16_to_cpu(fw_module->module_identifier);
+ cfg->module_entries[i].log_level = fw_module->log_level;
+ }
+
+status_out:
+ kfree(buf);
+ return status;
+}
+
+/**
+ * ice_fwlog_get - Get the firmware logging settings
+ * @hw: pointer to the HW structure
+ * @cfg: config to populate based on current firmware logging settings
+ */
+int ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
+{
+ if (!ice_fwlog_supported(hw))
+ return -EOPNOTSUPP;
+
+ return ice_aq_fwlog_get(hw, cfg);
+}
+
+/**
+ * ice_aq_fwlog_register - Register PF for firmware logging events (0xFF31)
+ * @hw: pointer to the HW structure
+ * @reg: true to register and false to unregister
+ */
+static int ice_aq_fwlog_register(struct ice_hw *hw, bool reg)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_register);
+
+ if (reg)
+ desc.params.fw_log.cmd_flags = ICE_AQC_FW_LOG_AQ_REGISTER;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
+ * ice_fwlog_register - Register the PF for firmware logging
+ * @hw: pointer to the HW structure
+ *
+ * After this call the PF will start to receive firmware logging based on the
+ * configuration set in ice_fwlog_set.
+ */
+int ice_fwlog_register(struct ice_hw *hw)
+{
+ int status;
+
+ if (!ice_fwlog_supported(hw))
+ return -EOPNOTSUPP;
+
+ status = ice_aq_fwlog_register(hw, true);
+ if (status)
+ ice_debug(hw, ICE_DBG_FW_LOG, "Failed to register for firmware logging events over ARQ\n");
+ else
+ hw->fwlog_cfg.options |= ICE_FWLOG_OPTION_IS_REGISTERED;
+
+ return status;
+}
+
+/**
+ * ice_fwlog_unregister - Unregister the PF from firmware logging
+ * @hw: pointer to the HW structure
+ */
+int ice_fwlog_unregister(struct ice_hw *hw)
+{
+ int status;
+
+ if (!ice_fwlog_supported(hw))
+ return -EOPNOTSUPP;
+
+ status = ice_aq_fwlog_register(hw, false);
+ if (status)
+ ice_debug(hw, ICE_DBG_FW_LOG, "Failed to unregister from firmware logging events over ARQ\n");
+ else
+ hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_IS_REGISTERED;
+
+ return status;
+}
+
+/**
+ * ice_fwlog_set_supported - Set if FW logging is supported by FW
+ * @hw: pointer to the HW struct
+ *
+ * If FW returns success to the ice_aq_fwlog_get call then it supports FW
+ * logging, else it doesn't. Set the fwlog_supported flag accordingly.
+ *
+ * This function is only meant to be called during driver init to determine if
+ * the FW support FW logging.
+ */
+void ice_fwlog_set_supported(struct ice_hw *hw)
+{
+ struct ice_fwlog_cfg *cfg;
+ int status;
+
+ hw->fwlog_supported = false;
+
+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg)
+ return;
+
+ /* don't call ice_fwlog_get() because that would check to see if FW
+ * logging is supported which is what the driver is determining now
+ */
+ status = ice_aq_fwlog_get(hw, cfg);
+ if (status)
+ ice_debug(hw, ICE_DBG_FW_LOG, "ice_aq_fwlog_get failed, FW logging is not supported on this version of FW, status %d\n",
+ status);
+ else
+ hw->fwlog_supported = true;
+
+ kfree(cfg);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_fwlog.h b/drivers/net/ethernet/intel/ice/ice_fwlog.h
new file mode 100644
index 000000000000..287e71fa4b86
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_fwlog.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2022, Intel Corporation. */
+
+#ifndef _ICE_FWLOG_H_
+#define _ICE_FWLOG_H_
+#include "ice_adminq_cmd.h"
+
+struct ice_hw;
+
+/* Only a single log level should be set and all log levels under the set value
+ * are enabled, e.g. if log level is set to ICE_FW_LOG_LEVEL_VERBOSE, then all
+ * other log levels are included (except ICE_FW_LOG_LEVEL_NONE)
+ */
+enum ice_fwlog_level {
+ ICE_FWLOG_LEVEL_NONE = 0,
+ ICE_FWLOG_LEVEL_ERROR = 1,
+ ICE_FWLOG_LEVEL_WARNING = 2,
+ ICE_FWLOG_LEVEL_NORMAL = 3,
+ ICE_FWLOG_LEVEL_VERBOSE = 4,
+ ICE_FWLOG_LEVEL_INVALID, /* all values >= this entry are invalid */
+};
+
+struct ice_fwlog_module_entry {
+ /* module ID for the corresponding firmware logging event */
+ u16 module_id;
+ /* verbosity level for the module_id */
+ u8 log_level;
+};
+
+struct ice_fwlog_cfg {
+ /* list of modules for configuring log level */
+ struct ice_fwlog_module_entry module_entries[ICE_AQC_FW_LOG_ID_MAX];
+ /* options used to configure firmware logging */
+ u16 options;
+#define ICE_FWLOG_OPTION_ARQ_ENA BIT(0)
+#define ICE_FWLOG_OPTION_UART_ENA BIT(1)
+ /* set before calling ice_fwlog_init() so the PF registers for firmware
+ * logging on initialization
+ */
+#define ICE_FWLOG_OPTION_REGISTER_ON_INIT BIT(2)
+ /* set in the ice_fwlog_get() response if the PF is registered for FW
+ * logging events over ARQ
+ */
+#define ICE_FWLOG_OPTION_IS_REGISTERED BIT(3)
+
+ /* minimum number of log events sent per Admin Receive Queue event */
+ u16 log_resolution;
+};
+
+struct ice_fwlog_data {
+ u16 data_size;
+ u8 *data;
+};
+
+struct ice_fwlog_ring {
+ struct ice_fwlog_data *rings;
+ u16 index;
+ u16 size;
+ u16 head;
+ u16 tail;
+};
+
+#define ICE_FWLOG_RING_SIZE_INDEX_DFLT 3
+#define ICE_FWLOG_RING_SIZE_DFLT 256
+#define ICE_FWLOG_RING_SIZE_MAX 512
+
+bool ice_fwlog_ring_full(struct ice_fwlog_ring *rings);
+bool ice_fwlog_ring_empty(struct ice_fwlog_ring *rings);
+void ice_fwlog_ring_increment(u16 *item, u16 size);
+void ice_fwlog_set_supported(struct ice_hw *hw);
+bool ice_fwlog_supported(struct ice_hw *hw);
+int ice_fwlog_init(struct ice_hw *hw);
+void ice_fwlog_deinit(struct ice_hw *hw);
+int ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg);
+int ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg);
+int ice_fwlog_register(struct ice_hw *hw);
+int ice_fwlog_unregister(struct ice_hw *hw);
+void ice_fwlog_realloc_rings(struct ice_hw *hw, int index);
+#endif /* _ICE_FWLOG_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 86936b758ade..e126dbbd7b2c 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -404,6 +404,10 @@
#define GLQF_HMASK_SEL(_i) (0x00410000 + ((_i) * 4))
#define GLQF_HMASK_SEL_MAX_INDEX 127
#define GLQF_HMASK_SEL_MASK_SEL_S 0
+#define GLQF_HSYMM(_i, _j) (0x0040F000 + ((_i) * 4 + (_j) * 512))
+#define GLQF_HSYMM_REG_SIZE 4
+#define GLQF_HSYMM_REG_PER_PROF 6
+#define GLQF_HSYMM_ENABLE_BIT BIT(7)
#define E800_PFQF_FD_CNT_FD_GCNT_M GENMASK(14, 0)
#define E830_PFQF_FD_CNT_FD_GCNT_M GENMASK(15, 0)
#define E800_PFQF_FD_CNT_FD_BCNT_M GENMASK(30, 16)
diff --git a/drivers/net/ethernet/intel/ice/ice_hwmon.c b/drivers/net/ethernet/intel/ice/ice_hwmon.c
new file mode 100644
index 000000000000..e4c2c1bff6c0
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_hwmon.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_hwmon.h"
+#include "ice_adminq_cmd.h"
+
+#include <linux/hwmon.h>
+
+#define TEMP_FROM_REG(reg) ((reg) * 1000)
+
+static const struct hwmon_channel_info *ice_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX |
+ HWMON_T_CRIT | HWMON_T_EMERGENCY),
+ NULL
+};
+
+static int ice_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct ice_aqc_get_sensor_reading_resp resp;
+ struct ice_pf *pf = dev_get_drvdata(dev);
+ int ret;
+
+ if (type != hwmon_temp)
+ return -EOPNOTSUPP;
+
+ ret = ice_aq_get_sensor_reading(&pf->hw, &resp);
+ if (ret) {
+ dev_warn_ratelimited(dev,
+ "%s HW read failure (%d)\n",
+ __func__,
+ ret);
+ return ret;
+ }
+
+ switch (attr) {
+ case hwmon_temp_input:
+ *val = TEMP_FROM_REG(resp.data.s0f0.temp);
+ break;
+ case hwmon_temp_max:
+ *val = TEMP_FROM_REG(resp.data.s0f0.temp_warning_threshold);
+ break;
+ case hwmon_temp_crit:
+ *val = TEMP_FROM_REG(resp.data.s0f0.temp_critical_threshold);
+ break;
+ case hwmon_temp_emergency:
+ *val = TEMP_FROM_REG(resp.data.s0f0.temp_fatal_threshold);
+ break;
+ default:
+ dev_dbg(dev, "%s unsupported attribute (%d)\n",
+ __func__, attr);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static umode_t ice_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ if (type != hwmon_temp)
+ return 0;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_crit:
+ case hwmon_temp_max:
+ case hwmon_temp_emergency:
+ return 0444;
+ }
+
+ return 0;
+}
+
+static const struct hwmon_ops ice_hwmon_ops = {
+ .is_visible = ice_hwmon_is_visible,
+ .read = ice_hwmon_read
+};
+
+static const struct hwmon_chip_info ice_chip_info = {
+ .ops = &ice_hwmon_ops,
+ .info = ice_hwmon_info
+};
+
+static bool ice_is_internal_reading_supported(struct ice_pf *pf)
+{
+ /* Only the first PF will report temperature for a chip.
+ * Note that internal temp reading is not supported
+ * for older FW (< v4.30).
+ */
+ if (pf->hw.pf_id)
+ return false;
+
+ unsigned long sensors = pf->hw.dev_caps.supported_sensors;
+
+ return _test_bit(ICE_SENSOR_SUPPORT_E810_INT_TEMP_BIT, &sensors);
+};
+
+void ice_hwmon_init(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct device *hdev;
+
+ if (!ice_is_internal_reading_supported(pf))
+ return;
+
+ hdev = hwmon_device_register_with_info(dev, "ice", pf, &ice_chip_info,
+ NULL);
+ if (IS_ERR(hdev)) {
+ dev_warn(dev,
+ "hwmon_device_register_with_info returns error (%ld)",
+ PTR_ERR(hdev));
+ return;
+ }
+ pf->hwmon_dev = hdev;
+}
+
+void ice_hwmon_exit(struct ice_pf *pf)
+{
+ if (!pf->hwmon_dev)
+ return;
+ hwmon_device_unregister(pf->hwmon_dev);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_hwmon.h b/drivers/net/ethernet/intel/ice/ice_hwmon.h
new file mode 100644
index 000000000000..d66d40354f5a
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_hwmon.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2023, Intel Corporation. */
+
+#ifndef _ICE_HWMON_H_
+#define _ICE_HWMON_H_
+
+#ifdef CONFIG_ICE_HWMON
+void ice_hwmon_init(struct ice_pf *pf);
+void ice_hwmon_exit(struct ice_pf *pf);
+#else /* CONFIG_ICE_HWMON */
+static inline void ice_hwmon_init(struct ice_pf *pf) { }
+static inline void ice_hwmon_exit(struct ice_pf *pf) { }
+#endif /* CONFIG_ICE_HWMON */
+
+#endif /* _ICE_HWMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index cd065ec48c87..280994ee5933 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -570,6 +570,50 @@ resume_traffic:
}
/**
+ * ice_lag_build_netdev_list - populate the lag struct's netdev list
+ * @lag: local lag struct
+ * @ndlist: pointer to netdev list to populate
+ */
+static void ice_lag_build_netdev_list(struct ice_lag *lag,
+ struct ice_lag_netdev_list *ndlist)
+{
+ struct ice_lag_netdev_list *nl;
+ struct net_device *tmp_nd;
+
+ INIT_LIST_HEAD(&ndlist->node);
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
+ nl = kzalloc(sizeof(*nl), GFP_ATOMIC);
+ if (!nl)
+ break;
+
+ nl->netdev = tmp_nd;
+ list_add(&nl->node, &ndlist->node);
+ }
+ rcu_read_unlock();
+ lag->netdev_head = &ndlist->node;
+}
+
+/**
+ * ice_lag_destroy_netdev_list - free lag struct's netdev list
+ * @lag: pointer to local lag struct
+ * @ndlist: pointer to lag struct netdev list
+ */
+static void ice_lag_destroy_netdev_list(struct ice_lag *lag,
+ struct ice_lag_netdev_list *ndlist)
+{
+ struct ice_lag_netdev_list *entry, *n;
+
+ rcu_read_lock();
+ list_for_each_entry_safe(entry, n, &ndlist->node, node) {
+ list_del(&entry->node);
+ kfree(entry);
+ }
+ rcu_read_unlock();
+ lag->netdev_head = NULL;
+}
+
+/**
* ice_lag_move_single_vf_nodes - Move Tx scheduling nodes for single VF
* @lag: primary interface LAG struct
* @oldport: lport of previous interface
@@ -597,7 +641,6 @@ ice_lag_move_single_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport,
void ice_lag_move_new_vf_nodes(struct ice_vf *vf)
{
struct ice_lag_netdev_list ndlist;
- struct list_head *tmp, *n;
u8 pri_port, act_port;
struct ice_lag *lag;
struct ice_vsi *vsi;
@@ -621,38 +664,15 @@ void ice_lag_move_new_vf_nodes(struct ice_vf *vf)
pri_port = pf->hw.port_info->lport;
act_port = lag->active_port;
- if (lag->upper_netdev) {
- struct ice_lag_netdev_list *nl;
- struct net_device *tmp_nd;
-
- INIT_LIST_HEAD(&ndlist.node);
- rcu_read_lock();
- for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
- nl = kzalloc(sizeof(*nl), GFP_ATOMIC);
- if (!nl)
- break;
-
- nl->netdev = tmp_nd;
- list_add(&nl->node, &ndlist.node);
- }
- rcu_read_unlock();
- }
-
- lag->netdev_head = &ndlist.node;
+ if (lag->upper_netdev)
+ ice_lag_build_netdev_list(lag, &ndlist);
if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) &&
lag->bonded && lag->primary && pri_port != act_port &&
!list_empty(lag->netdev_head))
ice_lag_move_single_vf_nodes(lag, pri_port, act_port, vsi->idx);
- list_for_each_safe(tmp, n, &ndlist.node) {
- struct ice_lag_netdev_list *entry;
-
- entry = list_entry(tmp, struct ice_lag_netdev_list, node);
- list_del(&entry->node);
- kfree(entry);
- }
- lag->netdev_head = NULL;
+ ice_lag_destroy_netdev_list(lag, &ndlist);
new_vf_unlock:
mutex_unlock(&pf->lag_mutex);
@@ -679,6 +699,29 @@ static void ice_lag_move_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport)
ice_lag_move_single_vf_nodes(lag, oldport, newport, i);
}
+/**
+ * ice_lag_move_vf_nodes_cfg - move vf nodes outside LAG netdev event context
+ * @lag: local lag struct
+ * @src_prt: lport value for source port
+ * @dst_prt: lport value for destination port
+ *
+ * This function is used to move nodes during an out-of-netdev-event situation,
+ * primarily when the driver needs to reconfigure or recreate resources.
+ *
+ * Must be called while holding the lag_mutex to avoid lag events from
+ * processing while out-of-sync moves are happening. Also, paired moves,
+ * such as used in a reset flow, should both be called under the same mutex
+ * lock to avoid changes between start of reset and end of reset.
+ */
+void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt)
+{
+ struct ice_lag_netdev_list ndlist;
+
+ ice_lag_build_netdev_list(lag, &ndlist);
+ ice_lag_move_vf_nodes(lag, src_prt, dst_prt);
+ ice_lag_destroy_netdev_list(lag, &ndlist);
+}
+
#define ICE_LAG_SRIOV_CP_RECIPE 10
#define ICE_LAG_SRIOV_TRAIN_PKT_LEN 16
@@ -2051,7 +2094,6 @@ void ice_lag_rebuild(struct ice_pf *pf)
{
struct ice_lag_netdev_list ndlist;
struct ice_lag *lag, *prim_lag;
- struct list_head *tmp, *n;
u8 act_port, loc_port;
if (!pf->lag || !pf->lag->bonded)
@@ -2063,21 +2105,7 @@ void ice_lag_rebuild(struct ice_pf *pf)
if (lag->primary) {
prim_lag = lag;
} else {
- struct ice_lag_netdev_list *nl;
- struct net_device *tmp_nd;
-
- INIT_LIST_HEAD(&ndlist.node);
- rcu_read_lock();
- for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
- nl = kzalloc(sizeof(*nl), GFP_ATOMIC);
- if (!nl)
- break;
-
- nl->netdev = tmp_nd;
- list_add(&nl->node, &ndlist.node);
- }
- rcu_read_unlock();
- lag->netdev_head = &ndlist.node;
+ ice_lag_build_netdev_list(lag, &ndlist);
prim_lag = ice_lag_find_primary(lag);
}
@@ -2107,13 +2135,7 @@ void ice_lag_rebuild(struct ice_pf *pf)
ice_clear_rdma_cap(pf);
lag_rebuild_out:
- list_for_each_safe(tmp, n, &ndlist.node) {
- struct ice_lag_netdev_list *entry;
-
- entry = list_entry(tmp, struct ice_lag_netdev_list, node);
- list_del(&entry->node);
- kfree(entry);
- }
+ ice_lag_destroy_netdev_list(lag, &ndlist);
mutex_unlock(&pf->lag_mutex);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h
index 9557e8605a07..ede833dfa658 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.h
+++ b/drivers/net/ethernet/intel/ice/ice_lag.h
@@ -65,4 +65,5 @@ int ice_init_lag(struct ice_pf *pf);
void ice_deinit_lag(struct ice_pf *pf);
void ice_lag_rebuild(struct ice_pf *pf);
bool ice_lag_is_switchdev_running(struct ice_pf *pf);
+void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt);
#endif /* _ICE_LAG_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index d826b5afa143..711e4fb62cb7 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -1191,12 +1191,10 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
case ICE_VSI_PF:
/* PF VSI will inherit RSS instance of PF */
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
- hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
break;
case ICE_VSI_VF:
/* VF VSI will gets a small RSS table which is a VSI LUT type */
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
- hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
break;
default:
dev_dbg(dev, "Unsupported VSI type %s\n",
@@ -1204,9 +1202,12 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
return;
}
- ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
- ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
- (hash_type & ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
+ hash_type = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
+ vsi->rss_hfunc = hash_type;
+
+ ctxt->info.q_opt_rss =
+ FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_LUT_M, lut_type) |
+ FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hash_type);
}
static void
@@ -1605,12 +1606,44 @@ static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
return;
}
- status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA);
+ status = ice_add_avf_rss_cfg(&pf->hw, vsi, ICE_DEFAULT_RSS_HENA);
if (status)
dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n",
vsi->vsi_num, status);
}
+static const struct ice_rss_hash_cfg default_rss_cfgs[] = {
+ /* configure RSS for IPv4 with input set IP src/dst */
+ {ICE_FLOW_SEG_HDR_IPV4, ICE_FLOW_HASH_IPV4, ICE_RSS_ANY_HEADERS, false},
+ /* configure RSS for IPv6 with input set IPv6 src/dst */
+ {ICE_FLOW_SEG_HDR_IPV6, ICE_FLOW_HASH_IPV6, ICE_RSS_ANY_HEADERS, false},
+ /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
+ {ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4,
+ ICE_HASH_TCP_IPV4, ICE_RSS_ANY_HEADERS, false},
+ /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
+ {ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4,
+ ICE_HASH_UDP_IPV4, ICE_RSS_ANY_HEADERS, false},
+ /* configure RSS for sctp4 with input set IP src/dst - only support
+ * RSS on SCTPv4 on outer headers (non-tunneled)
+ */
+ {ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4,
+ ICE_HASH_SCTP_IPV4, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
+ {ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6,
+ ICE_HASH_TCP_IPV6, ICE_RSS_ANY_HEADERS, false},
+ /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
+ {ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6,
+ ICE_HASH_UDP_IPV6, ICE_RSS_ANY_HEADERS, false},
+ /* configure RSS for sctp6 with input set IPv6 src/dst - only support
+ * RSS on SCTPv6 on outer headers (non-tunneled)
+ */
+ {ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6,
+ ICE_HASH_SCTP_IPV6, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for IPSEC ESP SPI with input set MAC_IPV4_SPI */
+ {ICE_FLOW_SEG_HDR_ESP,
+ ICE_FLOW_HASH_ESP_SPI, ICE_RSS_OUTER_HEADERS, false},
+};
+
/**
* ice_vsi_set_rss_flow_fld - Sets RSS input set for different flows
* @vsi: VSI to be configured
@@ -1624,11 +1657,12 @@ static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
*/
static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
{
- u16 vsi_handle = vsi->idx, vsi_num = vsi->vsi_num;
+ u16 vsi_num = vsi->vsi_num;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
struct device *dev;
int status;
+ u32 i;
dev = ice_pf_to_dev(pf);
if (ice_is_safe_mode(pf)) {
@@ -1636,67 +1670,15 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
vsi_num);
return;
}
- /* configure RSS for IPv4 with input set IP src/dst */
- status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
- ICE_FLOW_SEG_HDR_IPV4);
- if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %d\n",
- vsi_num, status);
-
- /* configure RSS for IPv6 with input set IPv6 src/dst */
- status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
- ICE_FLOW_SEG_HDR_IPV6);
- if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %d\n",
- vsi_num, status);
-
- /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
- status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV4,
- ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
- if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %d\n",
- vsi_num, status);
-
- /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
- status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV4,
- ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
- if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %d\n",
- vsi_num, status);
-
- /* configure RSS for sctp4 with input set IP src/dst */
- status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
- ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4);
- if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %d\n",
- vsi_num, status);
-
- /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
- status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV6,
- ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6);
- if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %d\n",
- vsi_num, status);
-
- /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
- status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV6,
- ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6);
- if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %d\n",
- vsi_num, status);
-
- /* configure RSS for sctp6 with input set IPv6 src/dst */
- status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
- ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6);
- if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n",
- vsi_num, status);
+ for (i = 0; i < ARRAY_SIZE(default_rss_cfgs); i++) {
+ const struct ice_rss_hash_cfg *cfg = &default_rss_cfgs[i];
- status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_ESP_SPI,
- ICE_FLOW_SEG_HDR_ESP);
- if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for esp/spi flow, vsi = %d, error = %d\n",
- vsi_num, status);
+ status = ice_add_rss_cfg(hw, vsi, cfg);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed, addl_hdrs = %x, hash_flds = %llx, hdr_type = %d, symm = %d\n",
+ cfg->addl_hdrs, cfg->hash_flds,
+ cfg->hdr_type, cfg->symm);
+ }
}
/**
@@ -2452,6 +2434,10 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
goto unroll_vector_base;
ice_vsi_map_rings_to_vectors(vsi);
+
+ /* Associate q_vector rings to napi */
+ ice_vsi_set_napi_queues(vsi, true);
+
vsi->stat_offsets_loaded = false;
if (ice_is_xdp_ena_vsi(vsi)) {
@@ -2932,6 +2918,71 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi)
}
/**
+ * ice_queue_set_napi - Set the napi instance for the queue
+ * @dev: device to which NAPI and queue belong
+ * @queue_index: Index of queue
+ * @type: queue type as RX or TX
+ * @napi: NAPI context
+ * @locked: is the rtnl_lock already held
+ *
+ * Set the napi instance for the queue
+ */
+static void
+ice_queue_set_napi(struct net_device *dev, unsigned int queue_index,
+ enum netdev_queue_type type, struct napi_struct *napi,
+ bool locked)
+{
+ if (!locked)
+ rtnl_lock();
+ netif_queue_set_napi(dev, queue_index, type, napi);
+ if (!locked)
+ rtnl_unlock();
+}
+
+/**
+ * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
+ * @q_vector: q_vector pointer
+ * @locked: is the rtnl_lock already held
+ *
+ * Associate the q_vector napi with all the queue[s] on the vector
+ */
+void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked)
+{
+ struct ice_rx_ring *rx_ring;
+ struct ice_tx_ring *tx_ring;
+
+ ice_for_each_rx_ring(rx_ring, q_vector->rx)
+ ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index,
+ NETDEV_QUEUE_TYPE_RX, &q_vector->napi,
+ locked);
+
+ ice_for_each_tx_ring(tx_ring, q_vector->tx)
+ ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index,
+ NETDEV_QUEUE_TYPE_TX, &q_vector->napi,
+ locked);
+ /* Also set the interrupt number for the NAPI */
+ netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
+}
+
+/**
+ * ice_vsi_set_napi_queues
+ * @vsi: VSI pointer
+ * @locked: is the rtnl_lock already held
+ *
+ * Associate queue[s] with napi for all vectors
+ */
+void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked)
+{
+ int i;
+
+ if (!vsi->netdev)
+ return;
+
+ ice_for_each_q_vector(vsi, i)
+ ice_q_vector_set_napi_queues(vsi->q_vectors[i], locked);
+}
+
+/**
* ice_vsi_release - Delete a VSI and free its resources
* @vsi: the VSI being removed
*
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index f24f5d1e6f9c..71bd27244941 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -91,6 +91,10 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params);
+void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked);
+
+void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked);
+
int ice_vsi_release(struct ice_vsi *vsi);
void ice_vsi_close(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 43ba3e55b8c1..9b0c04d595ce 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -14,6 +14,7 @@
#include "ice_dcb_lib.h"
#include "ice_dcb_nl.h"
#include "ice_devlink.h"
+#include "ice_hwmon.h"
/* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
* ice tracepoint functions. This must be done exactly once across the
* ice driver.
@@ -1252,6 +1253,32 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
}
/**
+ * ice_get_fwlog_data - copy the FW log data from ARQ event
+ * @pf: PF that the FW log event is associated with
+ * @event: event structure containing FW log data
+ */
+static void
+ice_get_fwlog_data(struct ice_pf *pf, struct ice_rq_event_info *event)
+{
+ struct ice_fwlog_data *fwlog;
+ struct ice_hw *hw = &pf->hw;
+
+ fwlog = &hw->fwlog_ring.rings[hw->fwlog_ring.tail];
+
+ memset(fwlog->data, 0, PAGE_SIZE);
+ fwlog->data_size = le16_to_cpu(event->desc.datalen);
+
+ memcpy(fwlog->data, event->msg_buf, fwlog->data_size);
+ ice_fwlog_ring_increment(&hw->fwlog_ring.tail, hw->fwlog_ring.size);
+
+ if (ice_fwlog_ring_full(&hw->fwlog_ring)) {
+ /* the rings are full so bump the head to create room */
+ ice_fwlog_ring_increment(&hw->fwlog_ring.head,
+ hw->fwlog_ring.size);
+ }
+}
+
+/**
* ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware
* @pf: pointer to the PF private structure
* @task: intermediate helper storage and identifier for waiting
@@ -1532,8 +1559,8 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
ice_vc_process_vf_msg(pf, &event, &data);
break;
- case ice_aqc_opc_fw_logging:
- ice_output_fw_log(hw, &event.desc, event.msg_buf);
+ case ice_aqc_opc_fw_logs_event:
+ ice_get_fwlog_data(pf, &event);
break;
case ice_aqc_opc_lldp_set_mib_change:
ice_dcb_process_lldp_set_mib_change(pf, &event);
@@ -3150,7 +3177,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
if (oicr & PFINT_OICR_TSYN_TX_M) {
ena_mask &= ~PFINT_OICR_TSYN_TX_M;
- if (!hw->reset_ongoing && ice_ptp_pf_handles_tx_interrupt(pf))
+ if (ice_ptp_pf_handles_tx_interrupt(pf))
set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
}
@@ -3375,9 +3402,11 @@ static void ice_napi_add(struct ice_vsi *vsi)
if (!vsi->netdev)
return;
- ice_for_each_q_vector(vsi, v_idx)
+ ice_for_each_q_vector(vsi, v_idx) {
netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
ice_napi_poll);
+ ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false);
+ }
}
/**
@@ -4361,6 +4390,19 @@ static void ice_print_wake_reason(struct ice_pf *pf)
}
/**
+ * ice_pf_fwlog_update_module - update 1 module
+ * @pf: pointer to the PF struct
+ * @log_level: log_level to use for the @module
+ * @module: module to update
+ */
+void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module)
+{
+ struct ice_hw *hw = &pf->hw;
+
+ hw->fwlog_cfg.module_entries[module].log_level = log_level;
+}
+
+/**
* ice_register_netdev - register netdev
* @vsi: pointer to the VSI struct
*/
@@ -4685,6 +4727,8 @@ static void ice_init_features(struct ice_pf *pf)
if (ice_init_lag(pf))
dev_warn(dev, "Failed to init link aggregation support\n");
+
+ ice_hwmon_init(pf);
}
static void ice_deinit_features(struct ice_pf *pf)
@@ -5205,11 +5249,15 @@ static void ice_remove(struct pci_dev *pdev)
msleep(100);
}
+ ice_debugfs_exit();
+
if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
set_bit(ICE_VF_RESETS_DISABLED, pf->state);
ice_free_vfs(pf);
}
+ ice_hwmon_exit(pf);
+
ice_service_task_stop(pf);
ice_aq_cancel_waiting_tasks(pf);
set_bit(ICE_DOWN, pf->state);
@@ -5674,6 +5722,8 @@ static int __init ice_module_init(void)
goto err_dest_wq;
}
+ ice_debugfs_init();
+
status = pci_register_driver(&ice_driver);
if (status) {
pr_err("failed to register PCI driver, err %d\n", status);
@@ -5684,6 +5734,7 @@ static int __init ice_module_init(void)
err_dest_lag_wq:
destroy_workqueue(ice_lag_wq);
+ ice_debugfs_exit();
err_dest_wq:
destroy_workqueue(ice_wq);
return status;
@@ -7706,6 +7757,59 @@ int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
}
/**
+ * ice_set_rss_hfunc - Set RSS HASH function
+ * @vsi: Pointer to VSI structure
+ * @hfunc: hash function (ICE_AQ_VSI_Q_OPT_RSS_*)
+ *
+ * Returns 0 on success, negative on failure
+ */
+int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctx;
+ bool symm;
+ int err;
+
+ if (hfunc == vsi->rss_hfunc)
+ return 0;
+
+ if (hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ &&
+ hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ)
+ return -EOPNOTSUPP;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
+ ctx->info.q_opt_rss = vsi->info.q_opt_rss;
+ ctx->info.q_opt_rss &= ~ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
+ ctx->info.q_opt_rss |=
+ FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hfunc);
+ ctx->info.q_opt_tc = vsi->info.q_opt_tc;
+ ctx->info.q_opt_flags = vsi->info.q_opt_rss;
+
+ err = ice_update_vsi(hw, vsi->idx, ctx, NULL);
+ if (err) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to configure RSS hash for VSI %d, error %d\n",
+ vsi->vsi_num, err);
+ } else {
+ vsi->info.q_opt_rss = ctx->info.q_opt_rss;
+ vsi->rss_hfunc = hfunc;
+ netdev_info(vsi->netdev, "Hash function set to: %sToeplitz\n",
+ hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ ?
+ "Symmetric " : "");
+ }
+ kfree(ctx);
+ if (err)
+ return err;
+
+ /* Fix the symmetry setting for all existing RSS configurations */
+ symm = !!(hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ);
+ return ice_set_rss_cfg_symm(hw, vsi, symm);
+}
+
+/**
* ice_bridge_getlink - Get the hardware bridge mode
* @skb: skb buff
* @pid: process ID
@@ -8140,13 +8244,12 @@ static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi)
for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
enum ice_flow_priority prio;
- u64 prof_id;
/* add this VSI to FDir profile for this flow */
prio = ICE_FLOW_PRIO_NORMAL;
prof = hw->fdir_prof[flow];
- prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
- status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id,
+ status = ice_flow_add_entry(hw, ICE_BLK_FD,
+ prof->prof_id[tun],
prof->vsi_h[0], vsi->idx,
prio, prof->fdir_seg[tun],
&entry_h);
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 71f405f8a6fe..e9e59f4b5580 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -7,7 +7,7 @@
#define E810_OUT_PROP_DELAY_NS 1
-#define UNKNOWN_INCVAL_E822 0x100000000ULL
+#define UNKNOWN_INCVAL_E82X 0x100000000ULL
static const struct ptp_pin_desc ice_pin_desc_e810t[] = {
/* name idx func chan */
@@ -705,7 +705,9 @@ static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf)
/* Read the Tx ready status first */
err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
- if (err || tstamp_ready)
+ if (err)
+ break;
+ else if (tstamp_ready)
return ICE_TX_TSTAMP_WORK_PENDING;
}
@@ -875,7 +877,7 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
}
/**
- * ice_ptp_init_tx_e822 - Initialize tracking for Tx timestamps
+ * ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps
* @pf: Board private structure
* @tx: the Tx tracking structure to initialize
* @port: the port this structure tracks
@@ -886,11 +888,11 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
* registers into chunks based on the port number.
*/
static int
-ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
+ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
{
tx->block = port / ICE_PORTS_PER_QUAD;
- tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E822;
- tx->len = INDEX_PER_PORT_E822;
+ tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X;
+ tx->len = INDEX_PER_PORT_E82X;
tx->verify_cached = 0;
return ice_ptp_alloc_tx_tracker(tx);
@@ -1093,10 +1095,10 @@ static u64 ice_base_incval(struct ice_pf *pf)
if (ice_is_e810(hw))
incval = ICE_PTP_NOMINAL_INCVAL_E810;
- else if (ice_e822_time_ref(hw) < NUM_ICE_TIME_REF_FREQ)
- incval = ice_e822_nominal_incval(ice_e822_time_ref(hw));
+ else if (ice_e82x_time_ref(hw) < NUM_ICE_TIME_REF_FREQ)
+ incval = ice_e82x_nominal_incval(ice_e82x_time_ref(hw));
else
- incval = UNKNOWN_INCVAL_E822;
+ incval = UNKNOWN_INCVAL_E82X;
dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n",
incval);
@@ -1125,10 +1127,10 @@ static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)
/* need to read FIFO state */
if (offs == 0 || offs == 1)
- err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO01_STATUS,
+ err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO01_STATUS,
&val);
else
- err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO23_STATUS,
+ err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO23_STATUS,
&val);
if (err) {
@@ -1156,7 +1158,7 @@ static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)
dev_dbg(ice_pf_to_dev(pf),
"Port %d Tx FIFO still not empty; resetting quad %d\n",
port->port_num, quad);
- ice_ptp_reset_ts_memory_quad_e822(hw, quad);
+ ice_ptp_reset_ts_memory_quad_e82x(hw, quad);
port->tx_fifo_busy_cnt = FIFO_OK;
return 0;
}
@@ -1201,8 +1203,8 @@ static void ice_ptp_wait_for_offsets(struct kthread_work *work)
tx_err = ice_ptp_check_tx_fifo(port);
if (!tx_err)
- tx_err = ice_phy_cfg_tx_offset_e822(hw, port->port_num);
- rx_err = ice_phy_cfg_rx_offset_e822(hw, port->port_num);
+ tx_err = ice_phy_cfg_tx_offset_e82x(hw, port->port_num);
+ rx_err = ice_phy_cfg_rx_offset_e82x(hw, port->port_num);
if (tx_err || rx_err) {
/* Tx and/or Rx offset not yet configured, try again later */
kthread_queue_delayed_work(pf->ptp.kworker,
@@ -1231,7 +1233,7 @@ ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
- err = ice_stop_phy_timer_e822(hw, port, true);
+ err = ice_stop_phy_timer_e82x(hw, port, true);
if (err)
dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n",
port, err);
@@ -1274,7 +1276,7 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
ptp_port->tx_fifo_busy_cnt = 0;
/* Start the PHY timer in Vernier mode */
- err = ice_start_phy_timer_e822(hw, port);
+ err = ice_start_phy_timer_e82x(hw, port);
if (err)
goto out_unlock;
@@ -1323,7 +1325,7 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
case ICE_PHY_E810:
/* Do not reconfigure E810 PHY */
return;
- case ICE_PHY_E822:
+ case ICE_PHY_E82X:
ice_ptp_port_phy_restart(ptp_port);
return;
default:
@@ -1349,7 +1351,7 @@ static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold)
ice_ptp_reset_ts_memory(hw);
for (quad = 0; quad < ICE_MAX_QUAD; quad++) {
- err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG,
+ err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG,
&val);
if (err)
break;
@@ -1363,7 +1365,7 @@ static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold)
val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
}
- err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG,
+ err = ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG,
val);
if (err)
break;
@@ -1601,7 +1603,7 @@ static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
if (ice_is_e810(hw))
start_time -= E810_OUT_PROP_DELAY_NS;
else
- start_time -= ice_e822_pps_delay(ice_e822_time_ref(hw));
+ start_time -= ice_e82x_pps_delay(ice_e82x_time_ref(hw));
/* 2. Write TARGET time */
wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time));
@@ -1840,7 +1842,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
ice_ptp_enable_all_clkout(pf);
/* Recalibrate and re-enable timestamp blocks for E822/E823 */
- if (hw->phy_model == ICE_PHY_E822)
+ if (hw->phy_model == ICE_PHY_E82X)
ice_ptp_restart_all_phy(pf);
exit:
if (err) {
@@ -2440,6 +2442,54 @@ enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf)
}
}
+/**
+ * ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt
+ * @pf: Board private structure
+ *
+ * The device PHY issues Tx timestamp interrupts to the driver for processing
+ * timestamp data from the PHY. It will not interrupt again until all
+ * current timestamp data is read. In rare circumstances, it is possible that
+ * the driver fails to read all outstanding data.
+ *
+ * To avoid getting permanently stuck, periodically check if the PHY has
+ * outstanding timestamp data. If so, trigger an interrupt from software to
+ * process this data.
+ */
+static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ bool trigger_oicr = false;
+ unsigned int i;
+
+ if (ice_is_e810(hw))
+ return;
+
+ if (!ice_pf_src_tmr_owned(pf))
+ return;
+
+ for (i = 0; i < ICE_MAX_QUAD; i++) {
+ u64 tstamp_ready;
+ int err;
+
+ err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
+ if (!err && tstamp_ready) {
+ trigger_oicr = true;
+ break;
+ }
+ }
+
+ if (trigger_oicr) {
+ /* Trigger a software interrupt, to ensure this data
+ * gets processed.
+ */
+ dev_dbg(dev, "PTP periodic task detected waiting timestamps. Triggering Tx timestamp interrupt now.\n");
+
+ wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
+ ice_flush(hw);
+ }
+}
+
static void ice_ptp_periodic_work(struct kthread_work *work)
{
struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
@@ -2451,6 +2501,8 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
err = ice_ptp_update_cached_phctime(pf);
+ ice_ptp_maybe_trigger_tx_interrupt(pf);
+
/* Run twice a second or reschedule if phc update failed */
kthread_queue_delayed_work(ptp->kworker, &ptp->work,
msecs_to_jiffies(err ? 10 : 500));
@@ -2468,12 +2520,10 @@ void ice_ptp_reset(struct ice_pf *pf)
int err, itr = 1;
u64 time_diff;
- if (test_bit(ICE_PFR_REQ, pf->state))
+ if (test_bit(ICE_PFR_REQ, pf->state) ||
+ !ice_pf_src_tmr_owned(pf))
goto pfr;
- if (!ice_pf_src_tmr_owned(pf))
- goto reset_ts;
-
err = ice_ptp_init_phc(hw);
if (err)
goto err;
@@ -2517,10 +2567,6 @@ void ice_ptp_reset(struct ice_pf *pf)
goto err;
}
-reset_ts:
- /* Restart the PHY timestamping block */
- ice_ptp_reset_phy_timestamping(pf);
-
pfr:
/* Init Tx structures */
if (ice_is_e810(&pf->hw)) {
@@ -2528,7 +2574,7 @@ pfr:
} else {
kthread_init_delayed_work(&ptp->port.ov_work,
ice_ptp_wait_for_offsets);
- err = ice_ptp_init_tx_e822(pf, &ptp->port.tx,
+ err = ice_ptp_init_tx_e82x(pf, &ptp->port.tx,
ptp->port.port_num);
}
if (err)
@@ -2536,6 +2582,11 @@ pfr:
set_bit(ICE_FLAG_PTP, pf->flags);
+ /* Restart the PHY timestamping block */
+ if (!test_bit(ICE_PFR_REQ, pf->state) &&
+ ice_pf_src_tmr_owned(pf))
+ ice_ptp_restart_all_phy(pf);
+
/* Start periodic work going */
kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
@@ -2896,11 +2947,11 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
switch (hw->phy_model) {
case ICE_PHY_E810:
return ice_ptp_init_tx_e810(pf, &ptp_port->tx);
- case ICE_PHY_E822:
+ case ICE_PHY_E82X:
kthread_init_delayed_work(&ptp_port->ov_work,
ice_ptp_wait_for_offsets);
- return ice_ptp_init_tx_e822(pf, &ptp_port->tx,
+ return ice_ptp_init_tx_e82x(pf, &ptp_port->tx,
ptp_port->port_num);
default:
return -ENODEV;
@@ -2987,7 +3038,7 @@ static void ice_ptp_remove_auxbus_device(struct ice_pf *pf)
static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf)
{
switch (pf->hw.phy_model) {
- case ICE_PHY_E822:
+ case ICE_PHY_E82X:
/* E822 based PHY has the clock owner process the interrupt
* for all ports.
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index 06a330867fc9..d79281061409 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -147,7 +147,7 @@ struct ice_ptp_tx {
/* Quad and port information for initializing timestamp blocks */
#define INDEX_PER_QUAD 64
-#define INDEX_PER_PORT_E822 16
+#define INDEX_PER_PORT_E82X 16
#define INDEX_PER_PORT_E810 64
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
index 4109aa3b2fcd..2c4dab0c48ab 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
@@ -9,17 +9,17 @@
*/
/* Constants defined for the PTP 1588 clock hardware. */
-/* struct ice_time_ref_info_e822
+/* struct ice_time_ref_info_e82x
*
* E822 hardware can use different sources as the reference for the PTP
* hardware clock. Each clock has different characteristics such as a slightly
* different frequency, etc.
*
* This lookup table defines several constants that depend on the current time
- * reference. See the struct ice_time_ref_info_e822 for information about the
+ * reference. See the struct ice_time_ref_info_e82x for information about the
* meaning of each constant.
*/
-const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ] = {
+const struct ice_time_ref_info_e82x e822_time_ref[NUM_ICE_TIME_REF_FREQ] = {
/* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */
{
/* pll_freq */
@@ -81,7 +81,7 @@ const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ] = {
},
};
-const struct ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = {
+const struct ice_cgu_pll_params_e82x e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = {
/* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */
{
/* refclk_pre_div */
@@ -155,7 +155,7 @@ const struct ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = {
},
};
-/* struct ice_vernier_info_e822
+/* struct ice_vernier_info_e82x
*
* E822 hardware calibrates the delay of the timestamp indication from the
* actual packet transmission or reception during the initialization of the
@@ -168,7 +168,7 @@ const struct ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = {
* used by this link speed, and that the register should be cleared by writing
* 0. Other values specify the clock frequency in Hz.
*/
-const struct ice_vernier_info_e822 e822_vernier[NUM_ICE_PTP_LNK_SPD] = {
+const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD] = {
/* ICE_PTP_LNK_SPD_1G */
{
/* tx_par_clk */
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index a00b55e14aac..187ce9b54e1a 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -284,19 +284,19 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
*/
/**
- * ice_fill_phy_msg_e822 - Fill message data for a PHY register access
+ * ice_fill_phy_msg_e82x - Fill message data for a PHY register access
* @msg: the PHY message buffer to fill in
* @port: the port to access
* @offset: the register offset
*/
static void
-ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
+ice_fill_phy_msg_e82x(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
{
int phy_port, phy, quadtype;
- phy_port = port % ICE_PORTS_PER_PHY_E822;
- phy = port / ICE_PORTS_PER_PHY_E822;
- quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_QUADS_PER_PHY_E822;
+ phy_port = port % ICE_PORTS_PER_PHY_E82X;
+ phy = port / ICE_PORTS_PER_PHY_E82X;
+ quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_QUADS_PER_PHY_E82X;
if (quadtype == 0) {
msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port);
@@ -315,7 +315,7 @@ ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
}
/**
- * ice_is_64b_phy_reg_e822 - Check if this is a 64bit PHY register
+ * ice_is_64b_phy_reg_e82x - Check if this is a 64bit PHY register
* @low_addr: the low address to check
* @high_addr: on return, contains the high address of the 64bit register
*
@@ -323,7 +323,7 @@ ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
* represented as two 32bit registers. If it is, return the appropriate high
* register offset to use.
*/
-static bool ice_is_64b_phy_reg_e822(u16 low_addr, u16 *high_addr)
+static bool ice_is_64b_phy_reg_e82x(u16 low_addr, u16 *high_addr)
{
switch (low_addr) {
case P_REG_PAR_PCS_TX_OFFSET_L:
@@ -368,7 +368,7 @@ static bool ice_is_64b_phy_reg_e822(u16 low_addr, u16 *high_addr)
}
/**
- * ice_is_40b_phy_reg_e822 - Check if this is a 40bit PHY register
+ * ice_is_40b_phy_reg_e82x - Check if this is a 40bit PHY register
* @low_addr: the low address to check
* @high_addr: on return, contains the high address of the 40bit value
*
@@ -377,7 +377,7 @@ static bool ice_is_64b_phy_reg_e822(u16 low_addr, u16 *high_addr)
* upper 32 bits in the high register. If it is, return the appropriate high
* register offset to use.
*/
-static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr)
+static bool ice_is_40b_phy_reg_e82x(u16 low_addr, u16 *high_addr)
{
switch (low_addr) {
case P_REG_TIMETUS_L:
@@ -413,7 +413,7 @@ static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr)
}
/**
- * ice_read_phy_reg_e822 - Read a PHY register
+ * ice_read_phy_reg_e82x - Read a PHY register
* @hw: pointer to the HW struct
* @port: PHY port to read from
* @offset: PHY register offset to read
@@ -422,12 +422,12 @@ static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr)
* Read a PHY register for the given port over the device sideband queue.
*/
static int
-ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
+ice_read_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
{
struct ice_sbq_msg_input msg = {0};
int err;
- ice_fill_phy_msg_e822(&msg, port, offset);
+ ice_fill_phy_msg_e82x(&msg, port, offset);
msg.opcode = ice_sbq_msg_rd;
err = ice_sbq_rw_reg(hw, &msg);
@@ -443,7 +443,7 @@ ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
}
/**
- * ice_read_64b_phy_reg_e822 - Read a 64bit value from PHY registers
+ * ice_read_64b_phy_reg_e82x - Read a 64bit value from PHY registers
* @hw: pointer to the HW struct
* @port: PHY port to read from
* @low_addr: offset of the lower register to read from
@@ -455,7 +455,7 @@ ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
* known to be two parts of a 64bit value.
*/
static int
-ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
+ice_read_64b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
{
u32 low, high;
u16 high_addr;
@@ -464,20 +464,20 @@ ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
/* Only operate on registers known to be split into two 32bit
* registers.
*/
- if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
+ if (!ice_is_64b_phy_reg_e82x(low_addr, &high_addr)) {
ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
low_addr);
return -EINVAL;
}
- err = ice_read_phy_reg_e822(hw, port, low_addr, &low);
+ err = ice_read_phy_reg_e82x(hw, port, low_addr, &low);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register 0x%08x\n, err %d",
low_addr, err);
return err;
}
- err = ice_read_phy_reg_e822(hw, port, high_addr, &high);
+ err = ice_read_phy_reg_e82x(hw, port, high_addr, &high);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register 0x%08x\n, err %d",
high_addr, err);
@@ -490,7 +490,7 @@ ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
}
/**
- * ice_write_phy_reg_e822 - Write a PHY register
+ * ice_write_phy_reg_e82x - Write a PHY register
* @hw: pointer to the HW struct
* @port: PHY port to write to
* @offset: PHY register offset to write
@@ -499,12 +499,12 @@ ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
* Write a PHY register for the given port over the device sideband queue.
*/
static int
-ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val)
+ice_write_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 val)
{
struct ice_sbq_msg_input msg = {0};
int err;
- ice_fill_phy_msg_e822(&msg, port, offset);
+ ice_fill_phy_msg_e82x(&msg, port, offset);
msg.opcode = ice_sbq_msg_wr;
msg.data = val;
@@ -519,7 +519,7 @@ ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val)
}
/**
- * ice_write_40b_phy_reg_e822 - Write a 40b value to the PHY
+ * ice_write_40b_phy_reg_e82x - Write a 40b value to the PHY
* @hw: pointer to the HW struct
* @port: port to write to
* @low_addr: offset of the low register
@@ -529,7 +529,7 @@ ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val)
* it up into two chunks, the lower 8 bits and the upper 32 bits.
*/
static int
-ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
+ice_write_40b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
{
u32 low, high;
u16 high_addr;
@@ -538,7 +538,7 @@ ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
/* Only operate on registers known to be split into a lower 8 bit
* register and an upper 32 bit register.
*/
- if (!ice_is_40b_phy_reg_e822(low_addr, &high_addr)) {
+ if (!ice_is_40b_phy_reg_e82x(low_addr, &high_addr)) {
ice_debug(hw, ICE_DBG_PTP, "Invalid 40b register addr 0x%08x\n",
low_addr);
return -EINVAL;
@@ -547,14 +547,14 @@ ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
low = (u32)(val & P_REG_40B_LOW_M);
high = (u32)(val >> P_REG_40B_HIGH_S);
- err = ice_write_phy_reg_e822(hw, port, low_addr, low);
+ err = ice_write_phy_reg_e82x(hw, port, low_addr, low);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
low_addr, err);
return err;
}
- err = ice_write_phy_reg_e822(hw, port, high_addr, high);
+ err = ice_write_phy_reg_e82x(hw, port, high_addr, high);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
high_addr, err);
@@ -565,7 +565,7 @@ ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
}
/**
- * ice_write_64b_phy_reg_e822 - Write a 64bit value to PHY registers
+ * ice_write_64b_phy_reg_e82x - Write a 64bit value to PHY registers
* @hw: pointer to the HW struct
* @port: PHY port to read from
* @low_addr: offset of the lower register to read from
@@ -577,7 +577,7 @@ ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
* a 64bit value.
*/
static int
-ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
+ice_write_64b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
{
u32 low, high;
u16 high_addr;
@@ -586,7 +586,7 @@ ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
/* Only operate on registers known to be split into two 32bit
* registers.
*/
- if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
+ if (!ice_is_64b_phy_reg_e82x(low_addr, &high_addr)) {
ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
low_addr);
return -EINVAL;
@@ -595,14 +595,14 @@ ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
low = lower_32_bits(val);
high = upper_32_bits(val);
- err = ice_write_phy_reg_e822(hw, port, low_addr, low);
+ err = ice_write_phy_reg_e82x(hw, port, low_addr, low);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
low_addr, err);
return err;
}
- err = ice_write_phy_reg_e822(hw, port, high_addr, high);
+ err = ice_write_phy_reg_e82x(hw, port, high_addr, high);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
high_addr, err);
@@ -613,7 +613,7 @@ ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
}
/**
- * ice_fill_quad_msg_e822 - Fill message data for quad register access
+ * ice_fill_quad_msg_e82x - Fill message data for quad register access
* @msg: the PHY message buffer to fill in
* @quad: the quad to access
* @offset: the register offset
@@ -622,7 +622,7 @@ ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
* multiple PHYs.
*/
static int
-ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
+ice_fill_quad_msg_e82x(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
{
u32 addr;
@@ -631,7 +631,7 @@ ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
msg->dest_dev = rmn_0;
- if ((quad % ICE_QUADS_PER_PHY_E822) == 0)
+ if ((quad % ICE_QUADS_PER_PHY_E82X) == 0)
addr = Q_0_BASE + offset;
else
addr = Q_1_BASE + offset;
@@ -643,7 +643,7 @@ ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
}
/**
- * ice_read_quad_reg_e822 - Read a PHY quad register
+ * ice_read_quad_reg_e82x - Read a PHY quad register
* @hw: pointer to the HW struct
* @quad: quad to read from
* @offset: quad register offset to read
@@ -653,12 +653,12 @@ ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
* shared between multiple PHYs.
*/
int
-ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
+ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
{
struct ice_sbq_msg_input msg = {0};
int err;
- err = ice_fill_quad_msg_e822(&msg, quad, offset);
+ err = ice_fill_quad_msg_e82x(&msg, quad, offset);
if (err)
return err;
@@ -677,7 +677,7 @@ ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
}
/**
- * ice_write_quad_reg_e822 - Write a PHY quad register
+ * ice_write_quad_reg_e82x - Write a PHY quad register
* @hw: pointer to the HW struct
* @quad: quad to write to
* @offset: quad register offset to write
@@ -687,12 +687,12 @@ ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
* shared between multiple PHYs.
*/
int
-ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
+ice_write_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
{
struct ice_sbq_msg_input msg = {0};
int err;
- err = ice_fill_quad_msg_e822(&msg, quad, offset);
+ err = ice_fill_quad_msg_e82x(&msg, quad, offset);
if (err)
return err;
@@ -710,7 +710,7 @@ ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
}
/**
- * ice_read_phy_tstamp_e822 - Read a PHY timestamp out of the quad block
+ * ice_read_phy_tstamp_e82x - Read a PHY timestamp out of the quad block
* @hw: pointer to the HW struct
* @quad: the quad to read from
* @idx: the timestamp index to read
@@ -721,7 +721,7 @@ ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
* family of devices.
*/
static int
-ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
+ice_read_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
{
u16 lo_addr, hi_addr;
u32 lo, hi;
@@ -730,14 +730,14 @@ ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
- err = ice_read_quad_reg_e822(hw, quad, lo_addr, &lo);
+ err = ice_read_quad_reg_e82x(hw, quad, lo_addr, &lo);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
err);
return err;
}
- err = ice_read_quad_reg_e822(hw, quad, hi_addr, &hi);
+ err = ice_read_quad_reg_e82x(hw, quad, hi_addr, &hi);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
err);
@@ -754,7 +754,7 @@ ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
}
/**
- * ice_clear_phy_tstamp_e822 - Clear a timestamp from the quad block
+ * ice_clear_phy_tstamp_e82x - Clear a timestamp from the quad block
* @hw: pointer to the HW struct
* @quad: the quad to read from
* @idx: the timestamp index to reset
@@ -770,18 +770,18 @@ ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
*
* To directly clear the contents of the timestamp block entirely, discarding
* all timestamp data at once, software should instead use
- * ice_ptp_reset_ts_memory_quad_e822().
+ * ice_ptp_reset_ts_memory_quad_e82x().
*
* This function should only be called on an idx whose bit is set according to
* ice_get_phy_tx_tstamp_ready().
*/
static int
-ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx)
+ice_clear_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx)
{
u64 unused_tstamp;
int err;
- err = ice_read_phy_tstamp_e822(hw, quad, idx, &unused_tstamp);
+ err = ice_read_phy_tstamp_e82x(hw, quad, idx, &unused_tstamp);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read the timestamp register for quad %u, idx %u, err %d\n",
quad, idx, err);
@@ -792,33 +792,33 @@ ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx)
}
/**
- * ice_ptp_reset_ts_memory_quad_e822 - Clear all timestamps from the quad block
+ * ice_ptp_reset_ts_memory_quad_e82x - Clear all timestamps from the quad block
* @hw: pointer to the HW struct
* @quad: the quad to read from
*
* Clear all timestamps from the PHY quad block that is shared between the
* internal PHYs on the E822 devices.
*/
-void ice_ptp_reset_ts_memory_quad_e822(struct ice_hw *hw, u8 quad)
+void ice_ptp_reset_ts_memory_quad_e82x(struct ice_hw *hw, u8 quad)
{
- ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, Q_REG_TS_CTRL_M);
- ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, ~(u32)Q_REG_TS_CTRL_M);
+ ice_write_quad_reg_e82x(hw, quad, Q_REG_TS_CTRL, Q_REG_TS_CTRL_M);
+ ice_write_quad_reg_e82x(hw, quad, Q_REG_TS_CTRL, ~(u32)Q_REG_TS_CTRL_M);
}
/**
- * ice_ptp_reset_ts_memory_e822 - Clear all timestamps from all quad blocks
+ * ice_ptp_reset_ts_memory_e82x - Clear all timestamps from all quad blocks
* @hw: pointer to the HW struct
*/
-static void ice_ptp_reset_ts_memory_e822(struct ice_hw *hw)
+static void ice_ptp_reset_ts_memory_e82x(struct ice_hw *hw)
{
unsigned int quad;
for (quad = 0; quad < ICE_MAX_QUAD; quad++)
- ice_ptp_reset_ts_memory_quad_e822(hw, quad);
+ ice_ptp_reset_ts_memory_quad_e82x(hw, quad);
}
/**
- * ice_read_cgu_reg_e822 - Read a CGU register
+ * ice_read_cgu_reg_e82x - Read a CGU register
* @hw: pointer to the HW struct
* @addr: Register address to read
* @val: storage for register value read
@@ -827,7 +827,7 @@ static void ice_ptp_reset_ts_memory_e822(struct ice_hw *hw)
* applicable to E822 devices.
*/
static int
-ice_read_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 *val)
+ice_read_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 *val)
{
struct ice_sbq_msg_input cgu_msg;
int err;
@@ -850,7 +850,7 @@ ice_read_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 *val)
}
/**
- * ice_write_cgu_reg_e822 - Write a CGU register
+ * ice_write_cgu_reg_e82x - Write a CGU register
* @hw: pointer to the HW struct
* @addr: Register address to write
* @val: value to write into the register
@@ -859,7 +859,7 @@ ice_read_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 *val)
* applicable to E822 devices.
*/
static int
-ice_write_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 val)
+ice_write_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 val)
{
struct ice_sbq_msg_input cgu_msg;
int err;
@@ -925,7 +925,7 @@ static const char *ice_clk_src_str(u8 clk_src)
}
/**
- * ice_cfg_cgu_pll_e822 - Configure the Clock Generation Unit
+ * ice_cfg_cgu_pll_e82x - Configure the Clock Generation Unit
* @hw: pointer to the HW struct
* @clk_freq: Clock frequency to program
* @clk_src: Clock source to select (TIME_REF, or TCX0)
@@ -934,7 +934,7 @@ static const char *ice_clk_src_str(u8 clk_src)
* time reference, enabling the PLL which drives the PTP hardware clock.
*/
static int
-ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
+ice_cfg_cgu_pll_e82x(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
enum ice_clk_src clk_src)
{
union tspll_ro_bwm_lf bwm_lf;
@@ -963,15 +963,15 @@ ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
return -EINVAL;
}
- err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD9, &dw9.val);
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val);
if (err)
return err;
- err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val);
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
if (err)
return err;
- err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
+ err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
if (err)
return err;
@@ -986,43 +986,43 @@ ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
if (dw24.field.ts_pll_enable) {
dw24.field.ts_pll_enable = 0;
- err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
if (err)
return err;
}
/* Set the frequency */
dw9.field.time_ref_freq_sel = clk_freq;
- err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD9, dw9.val);
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val);
if (err)
return err;
/* Configure the TS PLL feedback divisor */
- err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD19, &dw19.val);
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val);
if (err)
return err;
dw19.field.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div;
dw19.field.tspll_ndivratio = 1;
- err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD19, dw19.val);
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val);
if (err)
return err;
/* Configure the TS PLL post divisor */
- err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD22, &dw22.val);
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val);
if (err)
return err;
dw22.field.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div;
dw22.field.time1588clk_sel_div2 = 0;
- err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD22, dw22.val);
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val);
if (err)
return err;
/* Configure the TS PLL pre divisor and clock source */
- err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val);
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
if (err)
return err;
@@ -1030,21 +1030,21 @@ ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
dw24.field.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div;
dw24.field.time_ref_sel = clk_src;
- err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
if (err)
return err;
/* Finally, enable the PLL */
dw24.field.ts_pll_enable = 1;
- err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
if (err)
return err;
/* Wait to verify if the PLL locks */
usleep_range(1000, 5000);
- err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
+ err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
if (err)
return err;
@@ -1064,18 +1064,18 @@ ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
}
/**
- * ice_init_cgu_e822 - Initialize CGU with settings from firmware
+ * ice_init_cgu_e82x - Initialize CGU with settings from firmware
* @hw: pointer to the HW structure
*
* Initialize the Clock Generation Unit of the E822 device.
*/
-static int ice_init_cgu_e822(struct ice_hw *hw)
+static int ice_init_cgu_e82x(struct ice_hw *hw)
{
struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
union tspll_cntr_bist_settings cntr_bist;
int err;
- err = ice_read_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
+ err = ice_read_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
&cntr_bist.val);
if (err)
return err;
@@ -1084,7 +1084,7 @@ static int ice_init_cgu_e822(struct ice_hw *hw)
cntr_bist.field.i_plllock_sel_0 = 0;
cntr_bist.field.i_plllock_sel_1 = 0;
- err = ice_write_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
+ err = ice_write_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
cntr_bist.val);
if (err)
return err;
@@ -1092,7 +1092,7 @@ static int ice_init_cgu_e822(struct ice_hw *hw)
/* Configure the CGU PLL using the parameters from the function
* capabilities.
*/
- err = ice_cfg_cgu_pll_e822(hw, ts_info->time_ref,
+ err = ice_cfg_cgu_pll_e82x(hw, ts_info->time_ref,
(enum ice_clk_src)ts_info->clk_src);
if (err)
return err;
@@ -1113,7 +1113,7 @@ static int ice_ptp_set_vernier_wl(struct ice_hw *hw)
for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
int err;
- err = ice_write_phy_reg_e822(hw, port, P_REG_WL,
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_WL,
PTP_VERNIER_WL);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to set vernier window length for port %u, err %d\n",
@@ -1126,12 +1126,12 @@ static int ice_ptp_set_vernier_wl(struct ice_hw *hw)
}
/**
- * ice_ptp_init_phc_e822 - Perform E822 specific PHC initialization
+ * ice_ptp_init_phc_e82x - Perform E822 specific PHC initialization
* @hw: pointer to HW struct
*
* Perform PHC initialization steps specific to E822 devices.
*/
-static int ice_ptp_init_phc_e822(struct ice_hw *hw)
+static int ice_ptp_init_phc_e82x(struct ice_hw *hw)
{
int err;
u32 regval;
@@ -1145,7 +1145,7 @@ static int ice_ptp_init_phc_e822(struct ice_hw *hw)
wr32(hw, PF_SB_REM_DEV_CTL, regval);
/* Initialize the Clock Generation Unit */
- err = ice_init_cgu_e822(hw);
+ err = ice_init_cgu_e82x(hw);
if (err)
return err;
@@ -1154,7 +1154,7 @@ static int ice_ptp_init_phc_e822(struct ice_hw *hw)
}
/**
- * ice_ptp_prep_phy_time_e822 - Prepare PHY port with initial time
+ * ice_ptp_prep_phy_time_e82x - Prepare PHY port with initial time
* @hw: pointer to the HW struct
* @time: Time to initialize the PHY port clocks to
*
@@ -1164,7 +1164,7 @@ static int ice_ptp_init_phc_e822(struct ice_hw *hw)
* units of nominal nanoseconds.
*/
static int
-ice_ptp_prep_phy_time_e822(struct ice_hw *hw, u32 time)
+ice_ptp_prep_phy_time_e82x(struct ice_hw *hw, u32 time)
{
u64 phy_time;
u8 port;
@@ -1177,14 +1177,14 @@ ice_ptp_prep_phy_time_e822(struct ice_hw *hw, u32 time)
for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
/* Tx case */
- err = ice_write_64b_phy_reg_e822(hw, port,
+ err = ice_write_64b_phy_reg_e82x(hw, port,
P_REG_TX_TIMER_INC_PRE_L,
phy_time);
if (err)
goto exit_err;
/* Rx case */
- err = ice_write_64b_phy_reg_e822(hw, port,
+ err = ice_write_64b_phy_reg_e82x(hw, port,
P_REG_RX_TIMER_INC_PRE_L,
phy_time);
if (err)
@@ -1201,7 +1201,7 @@ exit_err:
}
/**
- * ice_ptp_prep_port_adj_e822 - Prepare a single port for time adjust
+ * ice_ptp_prep_port_adj_e82x - Prepare a single port for time adjust
* @hw: pointer to HW struct
* @port: Port number to be programmed
* @time: time in cycles to adjust the port Tx and Rx clocks
@@ -1216,7 +1216,7 @@ exit_err:
* Negative adjustments are supported using 2s complement arithmetic.
*/
static int
-ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time)
+ice_ptp_prep_port_adj_e82x(struct ice_hw *hw, u8 port, s64 time)
{
u32 l_time, u_time;
int err;
@@ -1225,23 +1225,23 @@ ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time)
u_time = upper_32_bits(time);
/* Tx case */
- err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_L,
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TIMER_INC_PRE_L,
l_time);
if (err)
goto exit_err;
- err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_U,
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TIMER_INC_PRE_U,
u_time);
if (err)
goto exit_err;
/* Rx case */
- err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_L,
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TIMER_INC_PRE_L,
l_time);
if (err)
goto exit_err;
- err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_U,
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TIMER_INC_PRE_U,
u_time);
if (err)
goto exit_err;
@@ -1255,7 +1255,7 @@ exit_err:
}
/**
- * ice_ptp_prep_phy_adj_e822 - Prep PHY ports for a time adjustment
+ * ice_ptp_prep_phy_adj_e82x - Prep PHY ports for a time adjustment
* @hw: pointer to HW struct
* @adj: adjustment in nanoseconds
*
@@ -1264,7 +1264,7 @@ exit_err:
* ICE_PTP_ADJ_TIME or ICE_PTP_ADJ_TIME_AT_TIME sync command.
*/
static int
-ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj)
+ice_ptp_prep_phy_adj_e82x(struct ice_hw *hw, s32 adj)
{
s64 cycles;
u8 port;
@@ -1281,7 +1281,7 @@ ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj)
for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
int err;
- err = ice_ptp_prep_port_adj_e822(hw, port, cycles);
+ err = ice_ptp_prep_port_adj_e82x(hw, port, cycles);
if (err)
return err;
}
@@ -1290,7 +1290,7 @@ ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj)
}
/**
- * ice_ptp_prep_phy_incval_e822 - Prepare PHY ports for time adjustment
+ * ice_ptp_prep_phy_incval_e82x - Prepare PHY ports for time adjustment
* @hw: pointer to HW struct
* @incval: new increment value to prepare
*
@@ -1299,13 +1299,13 @@ ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj)
* issuing an ICE_PTP_INIT_INCVAL command.
*/
static int
-ice_ptp_prep_phy_incval_e822(struct ice_hw *hw, u64 incval)
+ice_ptp_prep_phy_incval_e82x(struct ice_hw *hw, u64 incval)
{
int err;
u8 port;
for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
- err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L,
+ err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_TIMETUS_L,
incval);
if (err)
goto exit_err;
@@ -1337,7 +1337,7 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
int err;
/* Tx case */
- err = ice_read_64b_phy_reg_e822(hw, port, P_REG_TX_CAPTURE_L, tx_ts);
+ err = ice_read_64b_phy_reg_e82x(hw, port, P_REG_TX_CAPTURE_L, tx_ts);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read REG_TX_CAPTURE, err %d\n",
err);
@@ -1348,7 +1348,7 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
(unsigned long long)*tx_ts);
/* Rx case */
- err = ice_read_64b_phy_reg_e822(hw, port, P_REG_RX_CAPTURE_L, rx_ts);
+ err = ice_read_64b_phy_reg_e82x(hw, port, P_REG_RX_CAPTURE_L, rx_ts);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_CAPTURE, err %d\n",
err);
@@ -1362,7 +1362,7 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
}
/**
- * ice_ptp_write_port_cmd_e822 - Prepare a single PHY port for a timer command
+ * ice_ptp_write_port_cmd_e82x - Prepare a single PHY port for a timer command
* @hw: pointer to HW struct
* @port: Port to which cmd has to be sent
* @cmd: Command to be sent to the port
@@ -1372,8 +1372,8 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
* Do not use this function directly. If you want to configure exactly one
* port, use ice_ptp_one_port_cmd() instead.
*/
-static int
-ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd)
+static int ice_ptp_write_port_cmd_e82x(struct ice_hw *hw, u8 port,
+ enum ice_ptp_tmr_cmd cmd)
{
u32 cmd_val, val;
u8 tmr_idx;
@@ -1403,7 +1403,7 @@ ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd
/* Tx case */
/* Read, modify, write */
- err = ice_read_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, &val);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_TMR_CMD, &val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_TMR_CMD, err %d\n",
err);
@@ -1414,7 +1414,7 @@ ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd
val &= ~TS_CMD_MASK;
val |= cmd_val;
- err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TMR_CMD, val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n",
err);
@@ -1423,7 +1423,7 @@ ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd
/* Rx case */
/* Read, modify, write */
- err = ice_read_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, &val);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, &val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_TMR_CMD, err %d\n",
err);
@@ -1434,7 +1434,7 @@ ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd
val &= ~TS_CMD_MASK;
val |= cmd_val;
- err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n",
err);
@@ -1469,7 +1469,7 @@ ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port,
else
cmd = ICE_PTP_NOP;
- err = ice_ptp_write_port_cmd_e822(hw, port, cmd);
+ err = ice_ptp_write_port_cmd_e82x(hw, port, cmd);
if (err)
return err;
}
@@ -1478,7 +1478,7 @@ ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port,
}
/**
- * ice_ptp_port_cmd_e822 - Prepare all ports for a timer command
+ * ice_ptp_port_cmd_e82x - Prepare all ports for a timer command
* @hw: pointer to the HW struct
* @cmd: timer command to prepare
*
@@ -1486,14 +1486,14 @@ ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port,
* command.
*/
static int
-ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+ice_ptp_port_cmd_e82x(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
{
u8 port;
for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
int err;
- err = ice_ptp_write_port_cmd_e822(hw, port, cmd);
+ err = ice_ptp_write_port_cmd_e82x(hw, port, cmd);
if (err)
return err;
}
@@ -1509,7 +1509,7 @@ ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
*/
/**
- * ice_phy_get_speed_and_fec_e822 - Get link speed and FEC based on serdes mode
+ * ice_phy_get_speed_and_fec_e82x - Get link speed and FEC based on serdes mode
* @hw: pointer to HW struct
* @port: the port to read from
* @link_out: if non-NULL, holds link speed on success
@@ -1519,7 +1519,7 @@ ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
* algorithm.
*/
static int
-ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port,
+ice_phy_get_speed_and_fec_e82x(struct ice_hw *hw, u8 port,
enum ice_ptp_link_spd *link_out,
enum ice_ptp_fec_mode *fec_out)
{
@@ -1528,7 +1528,7 @@ ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port,
u32 serdes;
int err;
- err = ice_read_phy_reg_e822(hw, port, P_REG_LINK_SPEED, &serdes);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_LINK_SPEED, &serdes);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read serdes info\n");
return err;
@@ -1585,18 +1585,18 @@ ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port,
}
/**
- * ice_phy_cfg_lane_e822 - Configure PHY quad for single/multi-lane timestamp
+ * ice_phy_cfg_lane_e82x - Configure PHY quad for single/multi-lane timestamp
* @hw: pointer to HW struct
* @port: to configure the quad for
*/
-static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port)
+static void ice_phy_cfg_lane_e82x(struct ice_hw *hw, u8 port)
{
enum ice_ptp_link_spd link_spd;
int err;
u32 val;
u8 quad;
- err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, NULL);
+ err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, NULL);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to get PHY link speed, err %d\n",
err);
@@ -1605,7 +1605,7 @@ static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port)
quad = port / ICE_PORTS_PER_QUAD;
- err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val);
+ err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEM_GLB_CFG, err %d\n",
err);
@@ -1617,7 +1617,7 @@ static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port)
else
val |= Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M;
- err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, val);
+ err = ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_MEM_GBL_CFG, err %d\n",
err);
@@ -1626,7 +1626,7 @@ static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port)
}
/**
- * ice_phy_cfg_uix_e822 - Configure Serdes UI to TU conversion for E822
+ * ice_phy_cfg_uix_e82x - Configure Serdes UI to TU conversion for E822
* @hw: pointer to the HW structure
* @port: the port to configure
*
@@ -1671,12 +1671,12 @@ static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port)
* a divide by 390,625,000. This does lose some precision, but avoids
* miscalculation due to arithmetic overflow.
*/
-static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port)
+static int ice_phy_cfg_uix_e82x(struct ice_hw *hw, u8 port)
{
u64 cur_freq, clk_incval, tu_per_sec, uix;
int err;
- cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
clk_incval = ice_ptp_read_src_incval(hw);
/* Calculate TUs per second divided by 256 */
@@ -1688,7 +1688,7 @@ static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port)
/* Program the 10Gb/40Gb conversion ratio */
uix = div_u64(tu_per_sec * LINE_UI_10G_40G, 390625000);
- err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_10G_40G_L,
+ err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_UIX66_10G_40G_L,
uix);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_10G_40G, err %d\n",
@@ -1699,7 +1699,7 @@ static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port)
/* Program the 25Gb/100Gb conversion ratio */
uix = div_u64(tu_per_sec * LINE_UI_25G_100G, 390625000);
- err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_25G_100G_L,
+ err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_UIX66_25G_100G_L,
uix);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_25G_100G, err %d\n",
@@ -1711,7 +1711,7 @@ static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port)
}
/**
- * ice_phy_cfg_parpcs_e822 - Configure TUs per PAR/PCS clock cycle
+ * ice_phy_cfg_parpcs_e82x - Configure TUs per PAR/PCS clock cycle
* @hw: pointer to the HW struct
* @port: port to configure
*
@@ -1753,18 +1753,18 @@ static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port)
* frequency is ~29 bits, so multiplying them together should fit within the
* 64 bit arithmetic.
*/
-static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
+static int ice_phy_cfg_parpcs_e82x(struct ice_hw *hw, u8 port)
{
u64 cur_freq, clk_incval, tu_per_sec, phy_tus;
enum ice_ptp_link_spd link_spd;
enum ice_ptp_fec_mode fec_mode;
int err;
- err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
+ err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, &fec_mode);
if (err)
return err;
- cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
clk_incval = ice_ptp_read_src_incval(hw);
/* Calculate TUs per cycle of the PHC clock */
@@ -1784,7 +1784,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
else
phy_tus = 0;
- err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_TX_TUS_L,
+ err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PAR_TX_TUS_L,
phy_tus);
if (err)
return err;
@@ -1796,7 +1796,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
else
phy_tus = 0;
- err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_RX_TUS_L,
+ err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PAR_RX_TUS_L,
phy_tus);
if (err)
return err;
@@ -1808,7 +1808,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
else
phy_tus = 0;
- err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_TX_TUS_L,
+ err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PCS_TX_TUS_L,
phy_tus);
if (err)
return err;
@@ -1820,7 +1820,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
else
phy_tus = 0;
- err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_RX_TUS_L,
+ err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PCS_RX_TUS_L,
phy_tus);
if (err)
return err;
@@ -1832,7 +1832,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
else
phy_tus = 0;
- err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_TX_TUS_L,
+ err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PAR_TX_TUS_L,
phy_tus);
if (err)
return err;
@@ -1844,7 +1844,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
else
phy_tus = 0;
- err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_RX_TUS_L,
+ err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PAR_RX_TUS_L,
phy_tus);
if (err)
return err;
@@ -1856,7 +1856,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
else
phy_tus = 0;
- err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_TX_TUS_L,
+ err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PCS_TX_TUS_L,
phy_tus);
if (err)
return err;
@@ -1868,23 +1868,23 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
else
phy_tus = 0;
- return ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_RX_TUS_L,
+ return ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PCS_RX_TUS_L,
phy_tus);
}
/**
- * ice_calc_fixed_tx_offset_e822 - Calculated Fixed Tx offset for a port
+ * ice_calc_fixed_tx_offset_e82x - Calculated Fixed Tx offset for a port
* @hw: pointer to the HW struct
* @link_spd: the Link speed to calculate for
*
* Calculate the fixed offset due to known static latency data.
*/
static u64
-ice_calc_fixed_tx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
+ice_calc_fixed_tx_offset_e82x(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
{
u64 cur_freq, clk_incval, tu_per_sec, fixed_offset;
- cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
clk_incval = ice_ptp_read_src_incval(hw);
/* Calculate TUs per second */
@@ -1904,7 +1904,7 @@ ice_calc_fixed_tx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
}
/**
- * ice_phy_cfg_tx_offset_e822 - Configure total Tx timestamp offset
+ * ice_phy_cfg_tx_offset_e82x - Configure total Tx timestamp offset
* @hw: pointer to the HW struct
* @port: the PHY port to configure
*
@@ -1926,7 +1926,7 @@ ice_calc_fixed_tx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
* Returns zero on success, -EBUSY if the hardware vernier offset
* calibration has not completed, or another error code on failure.
*/
-int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
+int ice_phy_cfg_tx_offset_e82x(struct ice_hw *hw, u8 port)
{
enum ice_ptp_link_spd link_spd;
enum ice_ptp_fec_mode fec_mode;
@@ -1935,7 +1935,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
u32 reg;
/* Nothing to do if we've already programmed the offset */
- err = ice_read_phy_reg_e822(hw, port, P_REG_TX_OR, &reg);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_OR, &reg);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OR for port %u, err %d\n",
port, err);
@@ -1945,7 +1945,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
if (reg)
return 0;
- err = ice_read_phy_reg_e822(hw, port, P_REG_TX_OV_STATUS, &reg);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_OV_STATUS, &reg);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OV_STATUS for port %u, err %d\n",
port, err);
@@ -1955,11 +1955,11 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
if (!(reg & P_REG_TX_OV_STATUS_OV_M))
return -EBUSY;
- err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
+ err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, &fec_mode);
if (err)
return err;
- total_offset = ice_calc_fixed_tx_offset_e822(hw, link_spd);
+ total_offset = ice_calc_fixed_tx_offset_e82x(hw, link_spd);
/* Read the first Vernier offset from the PHY register and add it to
* the total offset.
@@ -1970,7 +1970,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
link_spd == ICE_PTP_LNK_SPD_25G_RS ||
link_spd == ICE_PTP_LNK_SPD_40G ||
link_spd == ICE_PTP_LNK_SPD_50G) {
- err = ice_read_64b_phy_reg_e822(hw, port,
+ err = ice_read_64b_phy_reg_e82x(hw, port,
P_REG_PAR_PCS_TX_OFFSET_L,
&val);
if (err)
@@ -1985,7 +1985,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
*/
if (link_spd == ICE_PTP_LNK_SPD_50G_RS ||
link_spd == ICE_PTP_LNK_SPD_100G_RS) {
- err = ice_read_64b_phy_reg_e822(hw, port,
+ err = ice_read_64b_phy_reg_e82x(hw, port,
P_REG_PAR_TX_TIME_L,
&val);
if (err)
@@ -1998,12 +1998,12 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
* PHY and indicate that the Tx offset is ready. After this,
* timestamps will be enabled.
*/
- err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_TX_OFFSET_L,
+ err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_TOTAL_TX_OFFSET_L,
total_offset);
if (err)
return err;
- err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 1);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_OR, 1);
if (err)
return err;
@@ -2014,7 +2014,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
}
/**
- * ice_phy_calc_pmd_adj_e822 - Calculate PMD adjustment for Rx
+ * ice_phy_calc_pmd_adj_e82x - Calculate PMD adjustment for Rx
* @hw: pointer to the HW struct
* @port: the PHY port to adjust for
* @link_spd: the current link speed of the PHY
@@ -2026,7 +2026,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
* various delays caused when receiving a packet.
*/
static int
-ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port,
+ice_phy_calc_pmd_adj_e82x(struct ice_hw *hw, u8 port,
enum ice_ptp_link_spd link_spd,
enum ice_ptp_fec_mode fec_mode, u64 *pmd_adj)
{
@@ -2035,7 +2035,7 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port,
u32 val;
int err;
- err = ice_read_phy_reg_e822(hw, port, P_REG_PMD_ALIGNMENT, &val);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_PMD_ALIGNMENT, &val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read PMD alignment, err %d\n",
err);
@@ -2044,7 +2044,7 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port,
pmd_align = (u8)val;
- cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
clk_incval = ice_ptp_read_src_incval(hw);
/* Calculate TUs per second */
@@ -2123,7 +2123,7 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port,
u64 cycle_adj;
u8 rx_cycle;
- err = ice_read_phy_reg_e822(hw, port, P_REG_RX_40_TO_160_CNT,
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_40_TO_160_CNT,
&val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read 25G-RS Rx cycle count, err %d\n",
@@ -2145,7 +2145,7 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port,
u64 cycle_adj;
u8 rx_cycle;
- err = ice_read_phy_reg_e822(hw, port, P_REG_RX_80_TO_160_CNT,
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_80_TO_160_CNT,
&val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read 50G-RS Rx cycle count, err %d\n",
@@ -2172,18 +2172,18 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port,
}
/**
- * ice_calc_fixed_rx_offset_e822 - Calculated the fixed Rx offset for a port
+ * ice_calc_fixed_rx_offset_e82x - Calculated the fixed Rx offset for a port
* @hw: pointer to HW struct
* @link_spd: The Link speed to calculate for
*
* Determine the fixed Rx latency for a given link speed.
*/
static u64
-ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
+ice_calc_fixed_rx_offset_e82x(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
{
u64 cur_freq, clk_incval, tu_per_sec, fixed_offset;
- cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
clk_incval = ice_ptp_read_src_incval(hw);
/* Calculate TUs per second */
@@ -2203,7 +2203,7 @@ ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
}
/**
- * ice_phy_cfg_rx_offset_e822 - Configure total Rx timestamp offset
+ * ice_phy_cfg_rx_offset_e82x - Configure total Rx timestamp offset
* @hw: pointer to the HW struct
* @port: the PHY port to configure
*
@@ -2229,7 +2229,7 @@ ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
* Returns zero on success, -EBUSY if the hardware vernier offset
* calibration has not completed, or another error code on failure.
*/
-int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
+int ice_phy_cfg_rx_offset_e82x(struct ice_hw *hw, u8 port)
{
enum ice_ptp_link_spd link_spd;
enum ice_ptp_fec_mode fec_mode;
@@ -2238,7 +2238,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
u32 reg;
/* Nothing to do if we've already programmed the offset */
- err = ice_read_phy_reg_e822(hw, port, P_REG_RX_OR, &reg);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_OR, &reg);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OR for port %u, err %d\n",
port, err);
@@ -2248,7 +2248,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
if (reg)
return 0;
- err = ice_read_phy_reg_e822(hw, port, P_REG_RX_OV_STATUS, &reg);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_OV_STATUS, &reg);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OV_STATUS for port %u, err %d\n",
port, err);
@@ -2258,16 +2258,16 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
if (!(reg & P_REG_RX_OV_STATUS_OV_M))
return -EBUSY;
- err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
+ err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, &fec_mode);
if (err)
return err;
- total_offset = ice_calc_fixed_rx_offset_e822(hw, link_spd);
+ total_offset = ice_calc_fixed_rx_offset_e82x(hw, link_spd);
/* Read the first Vernier offset from the PHY register and add it to
* the total offset.
*/
- err = ice_read_64b_phy_reg_e822(hw, port,
+ err = ice_read_64b_phy_reg_e82x(hw, port,
P_REG_PAR_PCS_RX_OFFSET_L,
&val);
if (err)
@@ -2282,7 +2282,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
link_spd == ICE_PTP_LNK_SPD_50G ||
link_spd == ICE_PTP_LNK_SPD_50G_RS ||
link_spd == ICE_PTP_LNK_SPD_100G_RS) {
- err = ice_read_64b_phy_reg_e822(hw, port,
+ err = ice_read_64b_phy_reg_e82x(hw, port,
P_REG_PAR_RX_TIME_L,
&val);
if (err)
@@ -2292,7 +2292,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
}
/* In addition, Rx must account for the PMD alignment */
- err = ice_phy_calc_pmd_adj_e822(hw, port, link_spd, fec_mode, &pmd);
+ err = ice_phy_calc_pmd_adj_e82x(hw, port, link_spd, fec_mode, &pmd);
if (err)
return err;
@@ -2308,12 +2308,12 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
* PHY and indicate that the Rx offset is ready. After this,
* timestamps will be enabled.
*/
- err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_RX_OFFSET_L,
+ err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_TOTAL_RX_OFFSET_L,
total_offset);
if (err)
return err;
- err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 1);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_OR, 1);
if (err)
return err;
@@ -2324,7 +2324,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
}
/**
- * ice_read_phy_and_phc_time_e822 - Simultaneously capture PHC and PHY time
+ * ice_read_phy_and_phc_time_e82x - Simultaneously capture PHC and PHY time
* @hw: pointer to the HW struct
* @port: the PHY port to read
* @phy_time: on return, the 64bit PHY timer value
@@ -2334,7 +2334,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
* and PHC timer values.
*/
static int
-ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time,
+ice_read_phy_and_phc_time_e82x(struct ice_hw *hw, u8 port, u64 *phy_time,
u64 *phc_time)
{
u64 tx_time, rx_time;
@@ -2381,7 +2381,7 @@ ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time,
}
/**
- * ice_sync_phy_timer_e822 - Synchronize the PHY timer with PHC timer
+ * ice_sync_phy_timer_e82x - Synchronize the PHY timer with PHC timer
* @hw: pointer to the HW struct
* @port: the PHY port to synchronize
*
@@ -2392,7 +2392,7 @@ ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time,
* to the PHY timer in order to ensure it reads the same value as the
* primary PHC timer.
*/
-static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
+static int ice_sync_phy_timer_e82x(struct ice_hw *hw, u8 port)
{
u64 phc_time, phy_time, difference;
int err;
@@ -2402,7 +2402,7 @@ static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
return -EBUSY;
}
- err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time);
+ err = ice_read_phy_and_phc_time_e82x(hw, port, &phy_time, &phc_time);
if (err)
goto err_unlock;
@@ -2416,7 +2416,7 @@ static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
*/
difference = phc_time - phy_time;
- err = ice_ptp_prep_port_adj_e822(hw, port, (s64)difference);
+ err = ice_ptp_prep_port_adj_e82x(hw, port, (s64)difference);
if (err)
goto err_unlock;
@@ -2433,7 +2433,7 @@ static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
/* Re-capture the timer values to flush the command registers and
* verify that the time was properly adjusted.
*/
- err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time);
+ err = ice_read_phy_and_phc_time_e82x(hw, port, &phy_time, &phc_time);
if (err)
goto err_unlock;
@@ -2452,7 +2452,7 @@ err_unlock:
}
/**
- * ice_stop_phy_timer_e822 - Stop the PHY clock timer
+ * ice_stop_phy_timer_e82x - Stop the PHY clock timer
* @hw: pointer to the HW struct
* @port: the PHY port to stop
* @soft_reset: if true, hold the SOFT_RESET bit of P_REG_PS
@@ -2462,36 +2462,36 @@ err_unlock:
* initialized or when link speed changes.
*/
int
-ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset)
+ice_stop_phy_timer_e82x(struct ice_hw *hw, u8 port, bool soft_reset)
{
int err;
u32 val;
- err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 0);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_OR, 0);
if (err)
return err;
- err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 0);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_OR, 0);
if (err)
return err;
- err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_PS, &val);
if (err)
return err;
val &= ~P_REG_PS_START_M;
- err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
if (err)
return err;
val &= ~P_REG_PS_ENA_CLK_M;
- err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
if (err)
return err;
if (soft_reset) {
val |= P_REG_PS_SFT_RESET_M;
- err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
if (err)
return err;
}
@@ -2502,7 +2502,7 @@ ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset)
}
/**
- * ice_start_phy_timer_e822 - Start the PHY clock timer
+ * ice_start_phy_timer_e82x - Start the PHY clock timer
* @hw: pointer to the HW struct
* @port: the PHY port to start
*
@@ -2512,7 +2512,7 @@ ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset)
*
* Hardware will take Vernier measurements on Tx or Rx of packets.
*/
-int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
+int ice_start_phy_timer_e82x(struct ice_hw *hw, u8 port)
{
u32 lo, hi, val;
u64 incval;
@@ -2521,17 +2521,17 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
tmr_idx = ice_get_ptp_src_clock_index(hw);
- err = ice_stop_phy_timer_e822(hw, port, false);
+ err = ice_stop_phy_timer_e82x(hw, port, false);
if (err)
return err;
- ice_phy_cfg_lane_e822(hw, port);
+ ice_phy_cfg_lane_e82x(hw, port);
- err = ice_phy_cfg_uix_e822(hw, port);
+ err = ice_phy_cfg_uix_e82x(hw, port);
if (err)
return err;
- err = ice_phy_cfg_parpcs_e822(hw, port);
+ err = ice_phy_cfg_parpcs_e82x(hw, port);
if (err)
return err;
@@ -2539,7 +2539,7 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
incval = (u64)hi << 32 | lo;
- err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L, incval);
+ err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_TIMETUS_L, incval);
if (err)
return err;
@@ -2552,22 +2552,22 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
ice_ptp_exec_tmr_cmd(hw);
- err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_PS, &val);
if (err)
return err;
val |= P_REG_PS_SFT_RESET_M;
- err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
if (err)
return err;
val |= P_REG_PS_START_M;
- err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
if (err)
return err;
val &= ~P_REG_PS_SFT_RESET_M;
- err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
if (err)
return err;
@@ -2578,18 +2578,18 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
ice_ptp_exec_tmr_cmd(hw);
val |= P_REG_PS_ENA_CLK_M;
- err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
if (err)
return err;
val |= P_REG_PS_LOAD_OFFSET_M;
- err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
if (err)
return err;
ice_ptp_exec_tmr_cmd(hw);
- err = ice_sync_phy_timer_e822(hw, port);
+ err = ice_sync_phy_timer_e82x(hw, port);
if (err)
return err;
@@ -2599,7 +2599,7 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
}
/**
- * ice_get_phy_tx_tstamp_ready_e822 - Read Tx memory status register
+ * ice_get_phy_tx_tstamp_ready_e82x - Read Tx memory status register
* @hw: pointer to the HW struct
* @quad: the timestamp quad to read from
* @tstamp_ready: contents of the Tx memory status register
@@ -2609,19 +2609,19 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
* ready to be captured from the PHY timestamp block.
*/
static int
-ice_get_phy_tx_tstamp_ready_e822(struct ice_hw *hw, u8 quad, u64 *tstamp_ready)
+ice_get_phy_tx_tstamp_ready_e82x(struct ice_hw *hw, u8 quad, u64 *tstamp_ready)
{
u32 hi, lo;
int err;
- err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEMORY_STATUS_U, &hi);
+ err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEMORY_STATUS_U, &hi);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS_U for quad %u, err %d\n",
quad, err);
return err;
}
- err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEMORY_STATUS_L, &lo);
+ err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEMORY_STATUS_L, &lo);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS_L for quad %u, err %d\n",
quad, err);
@@ -3307,7 +3307,7 @@ void ice_ptp_init_phy_model(struct ice_hw *hw)
if (ice_is_e810(hw))
hw->phy_model = ICE_PHY_E810;
else
- hw->phy_model = ICE_PHY_E822;
+ hw->phy_model = ICE_PHY_E82X;
}
/**
@@ -3332,8 +3332,8 @@ static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
case ICE_PHY_E810:
err = ice_ptp_port_cmd_e810(hw, cmd);
break;
- case ICE_PHY_E822:
- err = ice_ptp_port_cmd_e822(hw, cmd);
+ case ICE_PHY_E82X:
+ err = ice_ptp_port_cmd_e82x(hw, cmd);
break;
default:
err = -EOPNOTSUPP;
@@ -3384,8 +3384,8 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time)
case ICE_PHY_E810:
err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
break;
- case ICE_PHY_E822:
- err = ice_ptp_prep_phy_time_e822(hw, time & 0xFFFFFFFF);
+ case ICE_PHY_E82X:
+ err = ice_ptp_prep_phy_time_e82x(hw, time & 0xFFFFFFFF);
break;
default:
err = -EOPNOTSUPP;
@@ -3426,8 +3426,8 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
case ICE_PHY_E810:
err = ice_ptp_prep_phy_incval_e810(hw, incval);
break;
- case ICE_PHY_E822:
- err = ice_ptp_prep_phy_incval_e822(hw, incval);
+ case ICE_PHY_E82X:
+ err = ice_ptp_prep_phy_incval_e82x(hw, incval);
break;
default:
err = -EOPNOTSUPP;
@@ -3492,8 +3492,8 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
case ICE_PHY_E810:
err = ice_ptp_prep_phy_adj_e810(hw, adj);
break;
- case ICE_PHY_E822:
- err = ice_ptp_prep_phy_adj_e822(hw, adj);
+ case ICE_PHY_E82X:
+ err = ice_ptp_prep_phy_adj_e82x(hw, adj);
break;
default:
err = -EOPNOTSUPP;
@@ -3521,8 +3521,8 @@ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
switch (hw->phy_model) {
case ICE_PHY_E810:
return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
- case ICE_PHY_E822:
- return ice_read_phy_tstamp_e822(hw, block, idx, tstamp);
+ case ICE_PHY_E82X:
+ return ice_read_phy_tstamp_e82x(hw, block, idx, tstamp);
default:
return -EOPNOTSUPP;
}
@@ -3549,8 +3549,8 @@ int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
switch (hw->phy_model) {
case ICE_PHY_E810:
return ice_clear_phy_tstamp_e810(hw, block, idx);
- case ICE_PHY_E822:
- return ice_clear_phy_tstamp_e822(hw, block, idx);
+ case ICE_PHY_E82X:
+ return ice_clear_phy_tstamp_e82x(hw, block, idx);
default:
return -EOPNOTSUPP;
}
@@ -3608,8 +3608,8 @@ static int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx)
void ice_ptp_reset_ts_memory(struct ice_hw *hw)
{
switch (hw->phy_model) {
- case ICE_PHY_E822:
- ice_ptp_reset_ts_memory_e822(hw);
+ case ICE_PHY_E82X:
+ ice_ptp_reset_ts_memory_e82x(hw);
break;
case ICE_PHY_E810:
default:
@@ -3636,8 +3636,8 @@ int ice_ptp_init_phc(struct ice_hw *hw)
switch (hw->phy_model) {
case ICE_PHY_E810:
return ice_ptp_init_phc_e810(hw);
- case ICE_PHY_E822:
- return ice_ptp_init_phc_e822(hw);
+ case ICE_PHY_E82X:
+ return ice_ptp_init_phc_e82x(hw);
default:
return -EOPNOTSUPP;
}
@@ -3660,8 +3660,8 @@ int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready)
case ICE_PHY_E810:
return ice_get_phy_tx_tstamp_ready_e810(hw, block,
tstamp_ready);
- case ICE_PHY_E822:
- return ice_get_phy_tx_tstamp_ready_e822(hw, block,
+ case ICE_PHY_E82X:
+ return ice_get_phy_tx_tstamp_ready_e82x(hw, block,
tstamp_ready);
break;
default:
@@ -3942,7 +3942,7 @@ int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num)
case ICE_DEV_ID_E823C_QSFP:
case ICE_DEV_ID_E823C_SFP:
case ICE_DEV_ID_E823C_SGMII:
- *pin_num = ICE_E822_RCLK_PINS_NUM;
+ *pin_num = ICE_E82X_RCLK_PINS_NUM;
ret = 0;
if (hw->cgu_part_number ==
ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032)
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index cf76701566c7..e976138e934a 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -42,7 +42,7 @@ enum ice_ptp_fec_mode {
};
/**
- * struct ice_time_ref_info_e822
+ * struct ice_time_ref_info_e82x
* @pll_freq: Frequency of PLL that drives timer ticks in Hz
* @nominal_incval: increment to generate nanoseconds in GLTSYN_TIME_L
* @pps_delay: propagation delay of the PPS output signal
@@ -50,14 +50,14 @@ enum ice_ptp_fec_mode {
* Characteristic information for the various TIME_REF sources possible in the
* E822 devices
*/
-struct ice_time_ref_info_e822 {
+struct ice_time_ref_info_e82x {
u64 pll_freq;
u64 nominal_incval;
u8 pps_delay;
};
/**
- * struct ice_vernier_info_e822
+ * struct ice_vernier_info_e82x
* @tx_par_clk: Frequency used to calculate P_REG_PAR_TX_TUS
* @rx_par_clk: Frequency used to calculate P_REG_PAR_RX_TUS
* @tx_pcs_clk: Frequency used to calculate P_REG_PCS_TX_TUS
@@ -80,7 +80,7 @@ struct ice_time_ref_info_e822 {
* different link speeds, either the deskew marker for multi-lane link speeds
* or the Reed Solomon gearbox marker for RS-FEC.
*/
-struct ice_vernier_info_e822 {
+struct ice_vernier_info_e82x {
u32 tx_par_clk;
u32 rx_par_clk;
u32 tx_pcs_clk;
@@ -95,7 +95,7 @@ struct ice_vernier_info_e822 {
};
/**
- * struct ice_cgu_pll_params_e822
+ * struct ice_cgu_pll_params_e82x
* @refclk_pre_div: Reference clock pre-divisor
* @feedback_div: Feedback divisor
* @frac_n_div: Fractional divisor
@@ -104,7 +104,7 @@ struct ice_vernier_info_e822 {
* Clock Generation Unit parameters used to program the PLL based on the
* selected TIME_REF frequency.
*/
-struct ice_cgu_pll_params_e822 {
+struct ice_cgu_pll_params_e82x {
u32 refclk_pre_div;
u32 feedback_div;
u32 frac_n_div;
@@ -124,7 +124,7 @@ enum ice_phy_rclk_pins {
};
#define ICE_E810_RCLK_PINS_NUM (ICE_RCLKB_PIN + 1)
-#define ICE_E822_RCLK_PINS_NUM (ICE_RCLKA_PIN + 1)
+#define ICE_E82X_RCLK_PINS_NUM (ICE_RCLKA_PIN + 1)
#define E810T_CGU_INPUT_C827(_phy, _pin) ((_phy) * ICE_E810_RCLK_PINS_NUM + \
(_pin) + ZL_REF1P)
@@ -183,16 +183,16 @@ struct ice_cgu_pin_desc {
};
extern const struct
-ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ];
+ice_cgu_pll_params_e82x e822_cgu_params[NUM_ICE_TIME_REF_FREQ];
#define E810C_QSFP_C827_0_HANDLE 2
#define E810C_QSFP_C827_1_HANDLE 3
/* Table of constants related to possible TIME_REF sources */
-extern const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ];
+extern const struct ice_time_ref_info_e82x e822_time_ref[NUM_ICE_TIME_REF_FREQ];
/* Table of constants for Vernier calibration on E822 */
-extern const struct ice_vernier_info_e822 e822_vernier[NUM_ICE_PTP_LNK_SPD];
+extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD];
/* Increment value to generate nanoseconds in the GLTSYN_TIME_L register for
* the E810 devices. Based off of a PLL with an 812.5 MHz frequency.
@@ -215,23 +215,23 @@ int ice_ptp_init_phc(struct ice_hw *hw);
int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready);
/* E822 family functions */
-int ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val);
-int ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val);
-void ice_ptp_reset_ts_memory_quad_e822(struct ice_hw *hw, u8 quad);
+int ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val);
+int ice_write_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 val);
+void ice_ptp_reset_ts_memory_quad_e82x(struct ice_hw *hw, u8 quad);
/**
- * ice_e822_time_ref - Get the current TIME_REF from capabilities
+ * ice_e82x_time_ref - Get the current TIME_REF from capabilities
* @hw: pointer to the HW structure
*
* Returns the current TIME_REF from the capabilities structure.
*/
-static inline enum ice_time_ref_freq ice_e822_time_ref(struct ice_hw *hw)
+static inline enum ice_time_ref_freq ice_e82x_time_ref(struct ice_hw *hw)
{
return hw->func_caps.ts_func_info.time_ref;
}
/**
- * ice_set_e822_time_ref - Set new TIME_REF
+ * ice_set_e82x_time_ref - Set new TIME_REF
* @hw: pointer to the HW structure
* @time_ref: new TIME_REF to set
*
@@ -239,31 +239,31 @@ static inline enum ice_time_ref_freq ice_e822_time_ref(struct ice_hw *hw)
* change, such as an update to the CGU registers.
*/
static inline void
-ice_set_e822_time_ref(struct ice_hw *hw, enum ice_time_ref_freq time_ref)
+ice_set_e82x_time_ref(struct ice_hw *hw, enum ice_time_ref_freq time_ref)
{
hw->func_caps.ts_func_info.time_ref = time_ref;
}
-static inline u64 ice_e822_pll_freq(enum ice_time_ref_freq time_ref)
+static inline u64 ice_e82x_pll_freq(enum ice_time_ref_freq time_ref)
{
return e822_time_ref[time_ref].pll_freq;
}
-static inline u64 ice_e822_nominal_incval(enum ice_time_ref_freq time_ref)
+static inline u64 ice_e82x_nominal_incval(enum ice_time_ref_freq time_ref)
{
return e822_time_ref[time_ref].nominal_incval;
}
-static inline u64 ice_e822_pps_delay(enum ice_time_ref_freq time_ref)
+static inline u64 ice_e82x_pps_delay(enum ice_time_ref_freq time_ref)
{
return e822_time_ref[time_ref].pps_delay;
}
/* E822 Vernier calibration functions */
-int ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset);
-int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port);
-int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port);
-int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port);
+int ice_stop_phy_timer_e82x(struct ice_hw *hw, u8 port, bool soft_reset);
+int ice_start_phy_timer_e82x(struct ice_hw *hw, u8 port);
+int ice_phy_cfg_tx_offset_e82x(struct ice_hw *hw, u8 port);
+int ice_phy_cfg_rx_offset_e82x(struct ice_hw *hw, u8 port);
/* E810 family functions */
int ice_ptp_init_phy_e810(struct ice_hw *hw);
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 5a45bd5ce6ad..6d33dd647c78 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -375,16 +375,11 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
*/
int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
{
- struct ice_pf *pf;
-
if (!vf || !q_vector)
return -EINVAL;
- pf = vf->pf;
-
/* always add one to account for the OICR being the first MSIX */
- return pf->sriov_base_vector + pf->vfs.num_msix_per * vf->vf_id +
- q_vector->v_idx + 1;
+ return vf->first_vector_idx + q_vector->v_idx + 1;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index a18ca0ff879f..6df7c4487ad0 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -17,6 +17,7 @@
#include "ice_protocol_type.h"
#include "ice_sbq_cmd.h"
#include "ice_vlan_mode.h"
+#include "ice_fwlog.h"
static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
{
@@ -246,6 +247,7 @@ struct ice_fd_hw_prof {
int cnt;
u64 entry_h[ICE_MAX_FDIR_VSI_PER_FILTER][ICE_FD_HW_SEG_MAX];
u16 vsi_h[ICE_MAX_FDIR_VSI_PER_FILTER];
+ u64 prof_id[ICE_FD_HW_SEG_MAX];
};
/* Common HW capabilities for SW use */
@@ -377,6 +379,8 @@ struct ice_hw_func_caps {
struct ice_ts_func_info ts_func_info;
};
+#define ICE_SENSOR_SUPPORT_E810_INT_TEMP_BIT 0
+
/* Device wide capabilities */
struct ice_hw_dev_caps {
struct ice_hw_common_caps common_cap;
@@ -385,6 +389,11 @@ struct ice_hw_dev_caps {
u32 num_flow_director_fltr; /* Number of FD filters available */
struct ice_ts_dev_info ts_dev_info;
u32 num_funcs;
+ /* bitmap of supported sensors
+ * bit 0 - internal temperature sensor
+ * bit 31:1 - Reserved
+ */
+ u32 supported_sensors;
};
/* MAC info */
@@ -730,24 +739,6 @@ struct ice_switch_info {
DECLARE_BITMAP(prof_res_bm[ICE_MAX_NUM_PROFILES], ICE_MAX_FV_WORDS);
};
-/* FW logging configuration */
-struct ice_fw_log_evnt {
- u8 cfg : 4; /* New event enables to configure */
- u8 cur : 4; /* Current/active event enables */
-};
-
-struct ice_fw_log_cfg {
- u8 cq_en : 1; /* FW logging is enabled via the control queue */
- u8 uart_en : 1; /* FW logging is enabled via UART for all PFs */
- u8 actv_evnts; /* Cumulation of currently enabled log events */
-
-#define ICE_FW_LOG_EVNT_INFO (ICE_AQC_FW_LOG_INFO_EN >> ICE_AQC_FW_LOG_EN_S)
-#define ICE_FW_LOG_EVNT_INIT (ICE_AQC_FW_LOG_INIT_EN >> ICE_AQC_FW_LOG_EN_S)
-#define ICE_FW_LOG_EVNT_FLOW (ICE_AQC_FW_LOG_FLOW_EN >> ICE_AQC_FW_LOG_EN_S)
-#define ICE_FW_LOG_EVNT_ERR (ICE_AQC_FW_LOG_ERR_EN >> ICE_AQC_FW_LOG_EN_S)
- struct ice_fw_log_evnt evnts[ICE_AQC_FW_LOG_ID_MAX];
-};
-
/* Enum defining the different states of the mailbox snapshot in the
* PF-VF mailbox overflow detection algorithm. The snapshot can be in
* states:
@@ -827,7 +818,7 @@ struct ice_mbx_data {
enum ice_phy_model {
ICE_PHY_UNSUP = -1,
ICE_PHY_E810 = 1,
- ICE_PHY_E822,
+ ICE_PHY_E82X,
};
/* Port hardware description */
@@ -889,7 +880,9 @@ struct ice_hw {
u8 fw_patch; /* firmware patch version */
u32 fw_build; /* firmware build number */
- struct ice_fw_log_cfg fw_log;
+ struct ice_fwlog_cfg fwlog_cfg;
+ bool fwlog_supported; /* does hardware support FW logging? */
+ struct ice_fwlog_ring fwlog_ring;
/* Device max aggregate bandwidths corresponding to the GL_PWR_MODE_CTL
* register. Used for determining the ITR/INTRL granularity during
@@ -910,10 +903,9 @@ struct ice_hw {
/* INTRL granularity in 1 us */
u8 intrl_gran;
-#define ICE_PHY_PER_NAC_E822 1
#define ICE_MAX_QUAD 2
-#define ICE_QUADS_PER_PHY_E822 2
-#define ICE_PORTS_PER_PHY_E822 8
+#define ICE_QUADS_PER_PHY_E82X 2
+#define ICE_PORTS_PER_PHY_E82X 8
#define ICE_PORTS_PER_QUAD 4
#define ICE_PORTS_PER_PHY_E810 4
#define ICE_NUM_EXTERNAL_PORTS (ICE_MAX_QUAD * ICE_PORTS_PER_QUAD)
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index d2a99a20c4ad..d6f74513b495 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -828,12 +828,16 @@ static void ice_notify_vf_reset(struct ice_vf *vf)
int ice_reset_vf(struct ice_vf *vf, u32 flags)
{
struct ice_pf *pf = vf->pf;
+ struct ice_lag *lag;
struct ice_vsi *vsi;
+ u8 act_prt, pri_prt;
struct device *dev;
int err = 0;
bool rsd;
dev = ice_pf_to_dev(pf);
+ act_prt = ICE_LAG_INVALID_PORT;
+ pri_prt = pf->hw.port_info->lport;
if (flags & ICE_VF_RESET_NOTIFY)
ice_notify_vf_reset(vf);
@@ -844,6 +848,17 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
return 0;
}
+ lag = pf->lag;
+ mutex_lock(&pf->lag_mutex);
+ if (lag && lag->bonded && lag->primary) {
+ act_prt = lag->active_port;
+ if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
+ lag->upper_netdev)
+ ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
+ else
+ act_prt = ICE_LAG_INVALID_PORT;
+ }
+
if (flags & ICE_VF_RESET_LOCK)
mutex_lock(&vf->cfg_lock);
else
@@ -936,6 +951,11 @@ out_unlock:
if (flags & ICE_VF_RESET_LOCK)
mutex_unlock(&vf->cfg_lock);
+ if (lag && lag->bonded && lag->primary &&
+ act_prt != ICE_LAG_INVALID_PORT)
+ ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
+ mutex_unlock(&pf->lag_mutex);
+
return err;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
index d7b10dc67f03..80dc4bcdd3a4 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
@@ -32,7 +32,6 @@ static void ice_port_vlan_on(struct ice_vsi *vsi)
/* setup outer VLAN ops */
vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan;
- vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan;
/* setup inner VLAN ops */
vlan_ops = &vsi->inner_vlan_ops;
@@ -47,8 +46,13 @@ static void ice_port_vlan_on(struct ice_vsi *vsi)
vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan;
vlan_ops->clear_port_vlan = ice_vsi_clear_inner_port_vlan;
- vlan_ops->clear_port_vlan = ice_vsi_clear_inner_port_vlan;
}
+
+ /* all Rx traffic should be in the domain of the assigned port VLAN,
+ * so prevent disabling Rx VLAN filtering
+ */
+ vlan_ops->dis_rx_filtering = noop_vlan;
+
vlan_ops->ena_rx_filtering = ice_vsi_ena_rx_vlan_filtering;
}
@@ -77,6 +81,8 @@ static void ice_port_vlan_off(struct ice_vsi *vsi)
vlan_ops->del_vlan = ice_vsi_del_vlan;
}
+ vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
+
if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
vlan_ops->ena_rx_filtering = noop_vlan;
else
@@ -141,7 +147,6 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
&vsi->outer_vlan_ops : &vsi->inner_vlan_ops;
vlan_ops->add_vlan = ice_vsi_add_vlan;
- vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering;
vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index cdf17b1e2f25..d4ad0739b57b 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -689,9 +689,7 @@ out:
* a specific virtchnl RSS cfg
* @hw: pointer to the hardware
* @rss_cfg: pointer to the virtchnl RSS cfg
- * @addl_hdrs: pointer to the protocol header fields (ICE_FLOW_SEG_HDR_*)
- * to configure
- * @hash_flds: pointer to the hash bit fields (ICE_FLOW_HASH_*) to configure
+ * @hash_cfg: pointer to the HW hash configuration
*
* Return true if all the protocol header and hash fields in the RSS cfg could
* be parsed, else return false
@@ -699,13 +697,23 @@ out:
* This function parses the virtchnl RSS cfg to be the intended
* hash fields and the intended header for RSS configuration
*/
-static bool
-ice_vc_parse_rss_cfg(struct ice_hw *hw, struct virtchnl_rss_cfg *rss_cfg,
- u32 *addl_hdrs, u64 *hash_flds)
+static bool ice_vc_parse_rss_cfg(struct ice_hw *hw,
+ struct virtchnl_rss_cfg *rss_cfg,
+ struct ice_rss_hash_cfg *hash_cfg)
{
const struct ice_vc_hash_field_match_type *hf_list;
const struct ice_vc_hdr_match_type *hdr_list;
int i, hf_list_len, hdr_list_len;
+ u32 *addl_hdrs = &hash_cfg->addl_hdrs;
+ u64 *hash_flds = &hash_cfg->hash_flds;
+
+ /* set outer layer RSS as default */
+ hash_cfg->hdr_type = ICE_RSS_OUTER_HEADERS;
+
+ if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
+ hash_cfg->symm = true;
+ else
+ hash_cfg->symm = false;
hf_list = ice_vc_hash_field_list;
hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list);
@@ -823,8 +831,8 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
int status;
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
- hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_XOR :
- ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
+ hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_HASH_XOR :
+ ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
@@ -832,11 +840,9 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
goto error_param;
}
- ctx->info.q_opt_rss = ((lut_type <<
- ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
- ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
- (hash_type &
- ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
+ ctx->info.q_opt_rss =
+ FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_LUT_M, lut_type) |
+ FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hash_type);
/* Preserve existing queueing option setting */
ctx->info.q_opt_rss |= (vsi->info.q_opt_rss &
@@ -858,18 +864,24 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
kfree(ctx);
} else {
- u32 addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
- u64 hash_flds = ICE_HASH_INVALID;
+ struct ice_rss_hash_cfg cfg;
- if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &addl_hdrs,
- &hash_flds)) {
+ /* Only check for none raw pattern case */
+ if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+ cfg.addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
+ cfg.hash_flds = ICE_HASH_INVALID;
+ cfg.hdr_type = ICE_RSS_ANY_HEADERS;
+
+ if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &cfg)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (add) {
- if (ice_add_rss_cfg(hw, vsi->idx, hash_flds,
- addl_hdrs)) {
+ if (ice_add_rss_cfg(hw, vsi, &cfg)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n",
vsi->vsi_num, v_ret);
@@ -877,8 +889,7 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
} else {
int status;
- status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds,
- addl_hdrs);
+ status = ice_rem_rss_cfg(hw, vsi->idx, &cfg);
/* We just ignore -ENOENT, because if two configurations
* share the same profile remove one of them actually
* removes both, since the profile is deleted.
@@ -989,6 +1000,51 @@ error_param:
}
/**
+ * ice_vc_config_rss_hfunc
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS Hash function
+ */
+static int ice_vc_config_rss_hfunc(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_rss_hfunc *vrh = (struct virtchnl_rss_hfunc *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ u8 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vrh->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vrh->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
+ hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ;
+
+ if (ice_set_rss_hfunc(vsi, hfunc))
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_HFUNC, v_ret,
+ NULL, 0);
+}
+
+/**
* ice_vc_cfg_promiscuous_mode_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -1523,7 +1579,6 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
u16 num_q_vectors_mapped, vsi_id, vector_id;
struct virtchnl_irq_map_info *irqmap_info;
struct virtchnl_vector_map *map;
- struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
int i;
@@ -1535,7 +1590,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
* there is actually at least a single VF queue vector mapped
*/
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
- pf->vfs.num_msix_per < num_q_vectors_mapped ||
+ vf->num_msix < num_q_vectors_mapped ||
!num_q_vectors_mapped) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -1557,7 +1612,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
/* vector_id is always 0-based for each VF, and can never be
* larger than or equal to the max allowed interrupts per VF
*/
- if (!(vector_id < pf->vfs.num_msix_per) ||
+ if (!(vector_id < vf->num_msix) ||
!ice_vc_isvalid_vsi_id(vf, vsi_id) ||
(!vector_id && (map->rxq_map || map->txq_map))) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -1603,9 +1658,24 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
(struct virtchnl_vsi_queue_config_info *)msg;
struct virtchnl_queue_pair_info *qpi;
struct ice_pf *pf = vf->pf;
+ struct ice_lag *lag;
struct ice_vsi *vsi;
+ u8 act_prt, pri_prt;
int i = -1, q_idx;
+ lag = pf->lag;
+ mutex_lock(&pf->lag_mutex);
+ act_prt = ICE_LAG_INVALID_PORT;
+ pri_prt = pf->hw.port_info->lport;
+ if (lag && lag->bonded && lag->primary) {
+ act_prt = lag->active_port;
+ if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
+ lag->upper_netdev)
+ ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
+ else
+ act_prt = ICE_LAG_INVALID_PORT;
+ }
+
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
goto error_param;
@@ -1729,6 +1799,11 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
}
}
+ if (lag && lag->bonded && lag->primary &&
+ act_prt != ICE_LAG_INVALID_PORT)
+ ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
+ mutex_unlock(&pf->lag_mutex);
+
/* send the response to the VF */
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
VIRTCHNL_STATUS_SUCCESS, NULL, 0);
@@ -1743,6 +1818,11 @@ error_param:
vf->vf_id, i);
}
+ if (lag && lag->bonded && lag->primary &&
+ act_prt != ICE_LAG_INVALID_PORT)
+ ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
+ mutex_unlock(&pf->lag_mutex);
+
ice_lag_move_new_vf_nodes(vf);
/* send the response to the VF */
@@ -2610,7 +2690,7 @@ static int ice_vc_set_rss_hena(struct ice_vf *vf, u8 *msg)
}
if (vrh->hena) {
- status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, vrh->hena);
+ status = ice_add_avf_rss_cfg(&pf->hw, vsi, vrh->hena);
v_ret = ice_err_to_virt_err(status);
}
@@ -3731,6 +3811,7 @@ static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
.cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
.config_rss_key = ice_vc_config_rss_key,
.config_rss_lut = ice_vc_config_rss_lut,
+ .config_rss_hfunc = ice_vc_config_rss_hfunc,
.get_stats_msg = ice_vc_get_stats_msg,
.cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg,
.add_vlan_msg = ice_vc_add_vlan_msg,
@@ -3860,6 +3941,7 @@ static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = {
.cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
.config_rss_key = ice_vc_config_rss_key,
.config_rss_lut = ice_vc_config_rss_lut,
+ .config_rss_hfunc = ice_vc_config_rss_hfunc,
.get_stats_msg = ice_vc_get_stats_msg,
.cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode,
.add_vlan_msg = ice_vc_add_vlan_msg,
@@ -4042,6 +4124,9 @@ error_handler:
case VIRTCHNL_OP_CONFIG_RSS_LUT:
err = ops->config_rss_lut(vf, msg);
break;
+ case VIRTCHNL_OP_CONFIG_RSS_HFUNC:
+ err = ops->config_rss_hfunc(vf, msg);
+ break;
case VIRTCHNL_OP_GET_STATS:
err = ops->get_stats_msg(vf, msg);
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
index cd747718de73..60dfbe05980a 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
@@ -32,6 +32,7 @@ struct ice_virtchnl_ops {
int (*cfg_irq_map_msg)(struct ice_vf *vf, u8 *msg);
int (*config_rss_key)(struct ice_vf *vf, u8 *msg);
int (*config_rss_lut)(struct ice_vf *vf, u8 *msg);
+ int (*config_rss_hfunc)(struct ice_vf *vf, u8 *msg);
int (*get_stats_msg)(struct ice_vf *vf, u8 *msg);
int (*cfg_promiscuous_mode_msg)(struct ice_vf *vf, u8 *msg);
int (*add_vlan_msg)(struct ice_vf *vf, u8 *msg);
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
index 7d547fa616fa..5e19d48a05b4 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
@@ -68,6 +68,7 @@ static const u32 vlan_v2_allowlist_opcodes[] = {
static const u32 rss_pf_allowlist_opcodes[] = {
VIRTCHNL_OP_CONFIG_RSS_KEY, VIRTCHNL_OP_CONFIG_RSS_LUT,
VIRTCHNL_OP_GET_RSS_HENA_CAPS, VIRTCHNL_OP_SET_RSS_HENA,
+ VIRTCHNL_OP_CONFIG_RSS_HFUNC,
};
/* VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
index 24b23b7ef04a..9ee7ab207b37 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -10,19 +10,6 @@
#define to_fltr_conf_from_desc(p) \
container_of(p, struct virtchnl_fdir_fltr_conf, input)
-#define ICE_FLOW_PROF_TYPE_S 0
-#define ICE_FLOW_PROF_TYPE_M (0xFFFFFFFFULL << ICE_FLOW_PROF_TYPE_S)
-#define ICE_FLOW_PROF_VSI_S 32
-#define ICE_FLOW_PROF_VSI_M (0xFFFFFFFFULL << ICE_FLOW_PROF_VSI_S)
-
-/* Flow profile ID format:
- * [0:31] - flow type, flow + tun_offs
- * [32:63] - VSI index
- */
-#define ICE_FLOW_PROF_FD(vsi, flow, tun_offs) \
- ((u64)(((((flow) + (tun_offs)) & ICE_FLOW_PROF_TYPE_M)) | \
- (((u64)(vsi) << ICE_FLOW_PROF_VSI_S) & ICE_FLOW_PROF_VSI_M)))
-
#define GTPU_TEID_OFFSET 4
#define GTPU_EH_QFI_OFFSET 1
#define GTPU_EH_QFI_MASK 0x3F
@@ -493,6 +480,7 @@ ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun)
return;
vf_prof = fdir->fdir_prof[flow];
+ prof_id = vf_prof->prof_id[tun];
vf_vsi = ice_get_vf_vsi(vf);
if (!vf_vsi) {
@@ -503,9 +491,6 @@ ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun)
if (!fdir->prof_entry_cnt[flow][tun])
return;
- prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num,
- flow, tun ? ICE_FLTR_PTYPE_MAX : 0);
-
for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++)
if (vf_prof->entry_h[i][tun]) {
u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]);
@@ -647,7 +632,6 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
struct ice_hw *hw;
u64 entry1_h = 0;
u64 entry2_h = 0;
- u64 prof_id;
int ret;
pf = vf->pf;
@@ -681,18 +665,15 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
ice_vc_fdir_rem_prof(vf, flow, tun);
}
- prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, flow,
- tun ? ICE_FLTR_PTYPE_MAX : 0);
-
- ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
- tun + 1, &prof);
+ ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, seg,
+ tun + 1, false, &prof);
if (ret) {
dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n",
flow, vf->vf_id);
goto err_exit;
}
- ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
+ ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx,
vf_vsi->idx, ICE_FLOW_PRIO_NORMAL,
seg, &entry1_h);
if (ret) {
@@ -701,7 +682,7 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
goto err_prof;
}
- ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
+ ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx,
ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
seg, &entry2_h);
if (ret) {
@@ -725,14 +706,16 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
vf_prof->cnt++;
fdir->prof_entry_cnt[flow][tun]++;
+ vf_prof->prof_id[tun] = prof->id;
+
return 0;
err_entry_1:
ice_rem_prof_id_flow(hw, ICE_BLK_FD,
- ice_get_hw_vsi_num(hw, vf_vsi->idx), prof_id);
+ ice_get_hw_vsi_num(hw, vf_vsi->idx), prof->id);
ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
err_prof:
- ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
+ ice_flow_rem_prof(hw, ICE_BLK_FD, prof->id);
err_exit:
return ret;
}