aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c41
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c272
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c96
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c40
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_mbx.c92
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c37
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c25
17 files changed, 482 insertions, 200 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index f88ee051e71c..2f0b604abc5e 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -320,6 +320,11 @@ enum ice_vsi_state {
ICE_VSI_STATE_NBITS /* must be last */
};
+struct ice_vsi_stats {
+ struct ice_ring_stats **tx_ring_stats; /* Tx ring stats array */
+ struct ice_ring_stats **rx_ring_stats; /* Rx ring stats array */
+};
+
/* struct that defines a VSI, associated with a dev */
struct ice_vsi {
struct net_device *netdev;
@@ -373,6 +378,7 @@ struct ice_vsi {
/* VSI stats */
struct rtnl_link_stats64 net_stats;
+ struct rtnl_link_stats64 net_stats_prev;
struct ice_eth_stats eth_stats;
struct ice_eth_stats eth_stats_prev;
@@ -540,6 +546,7 @@ struct ice_pf {
u16 ctrl_vsi_idx; /* control VSI index in pf->vsi array */
struct ice_vsi **vsi; /* VSIs created by the driver */
+ struct ice_vsi_stats **vsi_stats;
struct ice_sw *first_sw; /* first switch created by firmware */
u16 eswitch_mode; /* current mode of eswitch */
struct ice_vfs vfs;
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index e864634d66bc..554095b25f44 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -389,7 +389,7 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
* Indicates the starting address of the descriptor queue defined in
* 128 Byte units.
*/
- rlan_ctx.base = ring->dma >> 7;
+ rlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S;
rlan_ctx.qlen = ring->count;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 216370ec60d4..d02b55b6aa9c 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -2948,8 +2948,8 @@ bool ice_is_100m_speed_supported(struct ice_hw *hw)
* Note: In the structure of [phy_type_low, phy_type_high], there should
* be one bit set, as this function will convert one PHY type to its
* speed.
- * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
- * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
+ * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
+ * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
*/
static u16
ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
@@ -5515,3 +5515,40 @@ bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
ICE_FW_API_REPORT_DFLT_CFG_MIN,
ICE_FW_API_REPORT_DFLT_CFG_PATCH);
}
+
+/* each of the indexes into the following array match the speed of a return
+ * value from the list of AQ returned speeds like the range:
+ * ICE_AQ_LINK_SPEED_10MB .. ICE_AQ_LINK_SPEED_100GB excluding
+ * ICE_AQ_LINK_SPEED_UNKNOWN which is BIT(15) and maps to BIT(14) in this
+ * array. The array is defined as 15 elements long because the link_speed
+ * returned by the firmware is a 16 bit * value, but is indexed
+ * by [fls(speed) - 1]
+ */
+static const u32 ice_aq_to_link_speed[15] = {
+ SPEED_10, /* BIT(0) */
+ SPEED_100,
+ SPEED_1000,
+ SPEED_2500,
+ SPEED_5000,
+ SPEED_10000,
+ SPEED_20000,
+ SPEED_25000,
+ SPEED_40000,
+ SPEED_50000,
+ SPEED_100000, /* BIT(10) */
+ 0,
+ 0,
+ 0,
+ 0 /* BIT(14) */
+};
+
+/**
+ * ice_get_link_speed - get integer speed from table
+ * @index: array index from fls(aq speed) - 1
+ *
+ * Returns: u32 value containing integer speed
+ */
+u32 ice_get_link_speed(u16 index)
+{
+ return ice_aq_to_link_speed[index];
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 8b6712b92e84..4c6a0b5c9304 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -163,6 +163,7 @@ int
ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
bool write, struct ice_sq_cd *cd);
+u32 ice_get_link_speed(u16 index);
int
ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index 9defb9d0fe88..4f24d441c35e 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -881,6 +881,9 @@ void ice_update_dcb_stats(struct ice_pf *pf)
prev_ps = &pf->stats_prev;
cur_ps = &pf->stats;
+ if (ice_is_reset_in_progress(pf->state))
+ pf->stat_prev_loaded = false;
+
for (i = 0; i < 8; i++) {
ice_stat_update32(hw, GLPRT_PXOFFRXC(port, i),
pf->stat_prev_loaded,
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index f71a7521c7bd..4191994d8f3a 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -1544,9 +1544,9 @@ __ice_get_ethtool_stats(struct net_device *netdev,
ice_for_each_alloc_txq(vsi, j) {
tx_ring = READ_ONCE(vsi->tx_rings[j]);
- if (tx_ring) {
- data[i++] = tx_ring->stats.pkts;
- data[i++] = tx_ring->stats.bytes;
+ if (tx_ring && tx_ring->ring_stats) {
+ data[i++] = tx_ring->ring_stats->stats.pkts;
+ data[i++] = tx_ring->ring_stats->stats.bytes;
} else {
data[i++] = 0;
data[i++] = 0;
@@ -1555,9 +1555,9 @@ __ice_get_ethtool_stats(struct net_device *netdev,
ice_for_each_alloc_rxq(vsi, j) {
rx_ring = READ_ONCE(vsi->rx_rings[j]);
- if (rx_ring) {
- data[i++] = rx_ring->stats.pkts;
- data[i++] = rx_ring->stats.bytes;
+ if (rx_ring && rx_ring->ring_stats) {
+ data[i++] = rx_ring->ring_stats->stats.pkts;
+ data[i++] = rx_ring->ring_stats->stats.bytes;
} else {
data[i++] = 0;
data[i++] = 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index b3baf7c3f910..89f986a75cc8 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -908,17 +908,5 @@ static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
return ice_ptype_lkup[ptype];
}
-#define ICE_LINK_SPEED_UNKNOWN 0
-#define ICE_LINK_SPEED_10MBPS 10
-#define ICE_LINK_SPEED_100MBPS 100
-#define ICE_LINK_SPEED_1000MBPS 1000
-#define ICE_LINK_SPEED_2500MBPS 2500
-#define ICE_LINK_SPEED_5000MBPS 5000
-#define ICE_LINK_SPEED_10000MBPS 10000
-#define ICE_LINK_SPEED_20000MBPS 20000
-#define ICE_LINK_SPEED_25000MBPS 25000
-#define ICE_LINK_SPEED_40000MBPS 40000
-#define ICE_LINK_SPEED_50000MBPS 50000
-#define ICE_LINK_SPEED_100000MBPS 100000
#endif /* _ICE_LAN_TX_RX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 7276badfa19e..94aa834cd9a6 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -448,6 +448,49 @@ static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *d
}
/**
+ * ice_vsi_alloc_stat_arrays - Allocate statistics arrays
+ * @vsi: VSI pointer
+ */
+static int ice_vsi_alloc_stat_arrays(struct ice_vsi *vsi)
+{
+ struct ice_vsi_stats *vsi_stat;
+ struct ice_pf *pf = vsi->back;
+
+ if (vsi->type == ICE_VSI_CHNL)
+ return 0;
+ if (!pf->vsi_stats)
+ return -ENOENT;
+
+ vsi_stat = kzalloc(sizeof(*vsi_stat), GFP_KERNEL);
+ if (!vsi_stat)
+ return -ENOMEM;
+
+ vsi_stat->tx_ring_stats =
+ kcalloc(vsi->alloc_txq, sizeof(*vsi_stat->tx_ring_stats),
+ GFP_KERNEL);
+ if (!vsi_stat->tx_ring_stats)
+ goto err_alloc_tx;
+
+ vsi_stat->rx_ring_stats =
+ kcalloc(vsi->alloc_rxq, sizeof(*vsi_stat->rx_ring_stats),
+ GFP_KERNEL);
+ if (!vsi_stat->rx_ring_stats)
+ goto err_alloc_rx;
+
+ pf->vsi_stats[vsi->idx] = vsi_stat;
+
+ return 0;
+
+err_alloc_rx:
+ kfree(vsi_stat->rx_ring_stats);
+err_alloc_tx:
+ kfree(vsi_stat->tx_ring_stats);
+ kfree(vsi_stat);
+ pf->vsi_stats[vsi->idx] = NULL;
+ return -ENOMEM;
+}
+
+/**
* ice_vsi_alloc - Allocates the next available struct VSI in the PF
* @pf: board private structure
* @vsi_type: type of VSI
@@ -560,6 +603,11 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type,
if (vsi->type == ICE_VSI_CTRL && vf)
vf->ctrl_vsi_idx = vsi->idx;
+
+ /* allocate memory for Tx/Rx ring stat pointers */
+ if (ice_vsi_alloc_stat_arrays(vsi))
+ goto err_rings;
+
goto unlock_pf;
err_rings:
@@ -1536,6 +1584,106 @@ err_out:
}
/**
+ * ice_vsi_free_stats - Free the ring statistics structures
+ * @vsi: VSI pointer
+ */
+static void ice_vsi_free_stats(struct ice_vsi *vsi)
+{
+ struct ice_vsi_stats *vsi_stat;
+ struct ice_pf *pf = vsi->back;
+ int i;
+
+ if (vsi->type == ICE_VSI_CHNL)
+ return;
+ if (!pf->vsi_stats)
+ return;
+
+ vsi_stat = pf->vsi_stats[vsi->idx];
+ if (!vsi_stat)
+ return;
+
+ ice_for_each_alloc_txq(vsi, i) {
+ if (vsi_stat->tx_ring_stats[i]) {
+ kfree_rcu(vsi_stat->tx_ring_stats[i], rcu);
+ WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL);
+ }
+ }
+
+ ice_for_each_alloc_rxq(vsi, i) {
+ if (vsi_stat->rx_ring_stats[i]) {
+ kfree_rcu(vsi_stat->rx_ring_stats[i], rcu);
+ WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
+ }
+ }
+
+ kfree(vsi_stat->tx_ring_stats);
+ kfree(vsi_stat->rx_ring_stats);
+ kfree(vsi_stat);
+ pf->vsi_stats[vsi->idx] = NULL;
+}
+
+/**
+ * ice_vsi_alloc_ring_stats - Allocates Tx and Rx ring stats for the VSI
+ * @vsi: VSI which is having stats allocated
+ */
+static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi)
+{
+ struct ice_ring_stats **tx_ring_stats;
+ struct ice_ring_stats **rx_ring_stats;
+ struct ice_vsi_stats *vsi_stats;
+ struct ice_pf *pf = vsi->back;
+ u16 i;
+
+ vsi_stats = pf->vsi_stats[vsi->idx];
+ tx_ring_stats = vsi_stats->tx_ring_stats;
+ rx_ring_stats = vsi_stats->rx_ring_stats;
+
+ /* Allocate Tx ring stats */
+ ice_for_each_alloc_txq(vsi, i) {
+ struct ice_ring_stats *ring_stats;
+ struct ice_tx_ring *ring;
+
+ ring = vsi->tx_rings[i];
+ ring_stats = tx_ring_stats[i];
+
+ if (!ring_stats) {
+ ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
+ if (!ring_stats)
+ goto err_out;
+
+ WRITE_ONCE(tx_ring_stats[i], ring_stats);
+ }
+
+ ring->ring_stats = ring_stats;
+ }
+
+ /* Allocate Rx ring stats */
+ ice_for_each_alloc_rxq(vsi, i) {
+ struct ice_ring_stats *ring_stats;
+ struct ice_rx_ring *ring;
+
+ ring = vsi->rx_rings[i];
+ ring_stats = rx_ring_stats[i];
+
+ if (!ring_stats) {
+ ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
+ if (!ring_stats)
+ goto err_out;
+
+ WRITE_ONCE(rx_ring_stats[i], ring_stats);
+ }
+
+ ring->ring_stats = ring_stats;
+ }
+
+ return 0;
+
+err_out:
+ ice_vsi_free_stats(vsi);
+ return -ENOMEM;
+}
+
+/**
* ice_vsi_manage_rss_lut - disable/enable RSS
* @vsi: the VSI being changed
* @ena: boolean value indicating if this is an enable or disable request
@@ -1795,11 +1943,15 @@ void ice_update_eth_stats(struct ice_vsi *vsi)
{
struct ice_eth_stats *prev_es, *cur_es;
struct ice_hw *hw = &vsi->back->hw;
+ struct ice_pf *pf = vsi->back;
u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */
prev_es = &vsi->eth_stats_prev;
cur_es = &vsi->eth_stats;
+ if (ice_is_reset_in_progress(pf->state))
+ vsi->stat_offsets_loaded = false;
+
ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded,
&prev_es->rx_bytes, &cur_es->rx_bytes);
@@ -2576,6 +2728,10 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (ret)
goto unroll_vector_base;
+ ret = ice_vsi_alloc_ring_stats(vsi);
+ if (ret)
+ goto unroll_vector_base;
+
ice_vsi_map_rings_to_vectors(vsi);
/* ICE_VSI_CTRL does not need RSS so skip RSS processing */
@@ -2614,6 +2770,9 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (ret)
goto unroll_vector_base;
+ ret = ice_vsi_alloc_ring_stats(vsi);
+ if (ret)
+ goto unroll_vector_base;
/* Do not exit if configuring RSS had an issue, at least
* receive traffic on first queue. Hence no need to capture
* return value
@@ -2627,6 +2786,11 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
ret = ice_vsi_alloc_rings(vsi);
if (ret)
goto unroll_vsi_init;
+
+ ret = ice_vsi_alloc_ring_stats(vsi);
+ if (ret)
+ goto unroll_vector_base;
+
break;
default:
/* clean up the resources and exit */
@@ -2686,6 +2850,7 @@ unroll_vector_base:
unroll_alloc_q_vector:
ice_vsi_free_q_vectors(vsi);
unroll_vsi_init:
+ ice_vsi_free_stats(vsi);
ice_vsi_delete(vsi);
unroll_get_qs:
ice_vsi_put_qs(vsi);
@@ -3077,7 +3242,7 @@ int ice_vsi_release(struct ice_vsi *vsi)
vsi->agg_node && vsi->agg_node->valid)
vsi->agg_node->num_vsis--;
ice_vsi_clear_rings(vsi);
-
+ ice_vsi_free_stats(vsi);
ice_vsi_put_qs(vsi);
/* retain SW VSI data structure since it is needed to unregister and
@@ -3205,6 +3370,47 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
}
/**
+ * ice_vsi_realloc_stat_arrays - Frees unused stat structures
+ * @vsi: VSI pointer
+ * @prev_txq: Number of Tx rings before ring reallocation
+ * @prev_rxq: Number of Rx rings before ring reallocation
+ */
+static int
+ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq)
+{
+ struct ice_vsi_stats *vsi_stat;
+ struct ice_pf *pf = vsi->back;
+ int i;
+
+ if (!prev_txq || !prev_rxq)
+ return 0;
+ if (vsi->type == ICE_VSI_CHNL)
+ return 0;
+
+ vsi_stat = pf->vsi_stats[vsi->idx];
+
+ if (vsi->num_txq < prev_txq) {
+ for (i = vsi->num_txq; i < prev_txq; i++) {
+ if (vsi_stat->tx_ring_stats[i]) {
+ kfree_rcu(vsi_stat->tx_ring_stats[i], rcu);
+ WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL);
+ }
+ }
+ }
+
+ if (vsi->num_rxq < prev_rxq) {
+ for (i = vsi->num_rxq; i < prev_rxq; i++) {
+ if (vsi_stat->rx_ring_stats[i]) {
+ kfree_rcu(vsi_stat->rx_ring_stats[i], rcu);
+ WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
* ice_vsi_rebuild - Rebuild VSI after reset
* @vsi: VSI to be rebuild
* @init_vsi: is this an initialization or a reconfigure of the VSI
@@ -3215,10 +3421,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_coalesce_stored *coalesce;
+ int ret, i, prev_txq, prev_rxq;
int prev_num_q_vectors = 0;
enum ice_vsi_type vtype;
struct ice_pf *pf;
- int ret, i;
if (!vsi)
return -EINVAL;
@@ -3237,6 +3443,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
+ prev_txq = vsi->num_txq;
+ prev_rxq = vsi->num_rxq;
+
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
ret = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
if (ret)
@@ -3303,7 +3512,13 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
if (ret)
goto err_vectors;
+ ret = ice_vsi_alloc_ring_stats(vsi);
+ if (ret)
+ goto err_vectors;
+
ice_vsi_map_rings_to_vectors(vsi);
+
+ vsi->stat_offsets_loaded = false;
if (ice_is_xdp_ena_vsi(vsi)) {
ret = ice_vsi_determine_xdp_res(vsi);
if (ret)
@@ -3340,6 +3555,11 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
if (ret)
goto err_vectors;
+ ret = ice_vsi_alloc_ring_stats(vsi);
+ if (ret)
+ goto err_vectors;
+
+ vsi->stat_offsets_loaded = false;
break;
case ICE_VSI_CHNL:
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
@@ -3387,6 +3607,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
return ice_schedule_reset(pf, ICE_RESET_PFR);
}
}
+
+ if (ice_vsi_realloc_stat_arrays(vsi, prev_txq, prev_rxq))
+ goto err_vectors;
+
ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors);
kfree(coalesce);
@@ -3728,9 +3952,9 @@ static void ice_update_ring_stats(struct ice_q_stats *stats, u64 pkts, u64 bytes
*/
void ice_update_tx_ring_stats(struct ice_tx_ring *tx_ring, u64 pkts, u64 bytes)
{
- u64_stats_update_begin(&tx_ring->syncp);
- ice_update_ring_stats(&tx_ring->stats, pkts, bytes);
- u64_stats_update_end(&tx_ring->syncp);
+ u64_stats_update_begin(&tx_ring->ring_stats->syncp);
+ ice_update_ring_stats(&tx_ring->ring_stats->stats, pkts, bytes);
+ u64_stats_update_end(&tx_ring->ring_stats->syncp);
}
/**
@@ -3741,9 +3965,9 @@ void ice_update_tx_ring_stats(struct ice_tx_ring *tx_ring, u64 pkts, u64 bytes)
*/
void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes)
{
- u64_stats_update_begin(&rx_ring->syncp);
- ice_update_ring_stats(&rx_ring->stats, pkts, bytes);
- u64_stats_update_end(&rx_ring->syncp);
+ u64_stats_update_begin(&rx_ring->ring_stats->syncp);
+ ice_update_ring_stats(&rx_ring->ring_stats->stats, pkts, bytes);
+ u64_stats_update_end(&rx_ring->ring_stats->syncp);
}
/**
@@ -3850,33 +4074,11 @@ int ice_clear_dflt_vsi(struct ice_vsi *vsi)
*/
int ice_get_link_speed_mbps(struct ice_vsi *vsi)
{
- switch (vsi->port_info->phy.link_info.link_speed) {
- case ICE_AQ_LINK_SPEED_100GB:
- return SPEED_100000;
- case ICE_AQ_LINK_SPEED_50GB:
- return SPEED_50000;
- case ICE_AQ_LINK_SPEED_40GB:
- return SPEED_40000;
- case ICE_AQ_LINK_SPEED_25GB:
- return SPEED_25000;
- case ICE_AQ_LINK_SPEED_20GB:
- return SPEED_20000;
- case ICE_AQ_LINK_SPEED_10GB:
- return SPEED_10000;
- case ICE_AQ_LINK_SPEED_5GB:
- return SPEED_5000;
- case ICE_AQ_LINK_SPEED_2500MB:
- return SPEED_2500;
- case ICE_AQ_LINK_SPEED_1000MB:
- return SPEED_1000;
- case ICE_AQ_LINK_SPEED_100MB:
- return SPEED_100;
- case ICE_AQ_LINK_SPEED_10MB:
- return SPEED_10;
- case ICE_AQ_LINK_SPEED_UNKNOWN:
- default:
- return 0;
- }
+ unsigned int link_speed;
+
+ link_speed = vsi->port_info->phy.link_info.link_speed;
+
+ return (int)ice_get_link_speed(fls(link_speed) - 1);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index d6f460ff1b72..a21085c0cfee 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -130,12 +130,17 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
ice_for_each_txq(vsi, i) {
struct ice_tx_ring *tx_ring = vsi->tx_rings[i];
+ struct ice_ring_stats *ring_stats;
if (!tx_ring)
continue;
if (ice_ring_ch_enabled(tx_ring))
continue;
+ ring_stats = tx_ring->ring_stats;
+ if (!ring_stats)
+ continue;
+
if (tx_ring->desc) {
/* If packet counter has not changed the queue is
* likely stalled, so force an interrupt for this
@@ -144,8 +149,8 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
* prev_pkt would be negative if there was no
* pending work.
*/
- packets = tx_ring->stats.pkts & INT_MAX;
- if (tx_ring->tx_stats.prev_pkt == packets) {
+ packets = ring_stats->stats.pkts & INT_MAX;
+ if (ring_stats->tx_stats.prev_pkt == packets) {
/* Trigger sw interrupt to revive the queue */
ice_trigger_sw_intr(hw, tx_ring->q_vector);
continue;
@@ -155,7 +160,7 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
* to ice_get_tx_pending()
*/
smp_rmb();
- tx_ring->tx_stats.prev_pkt =
+ ring_stats->tx_stats.prev_pkt =
ice_get_tx_pending(tx_ring) ? packets : -1;
}
}
@@ -2546,13 +2551,20 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
ice_for_each_xdp_txq(vsi, i) {
u16 xdp_q_idx = vsi->alloc_txq + i;
+ struct ice_ring_stats *ring_stats;
struct ice_tx_ring *xdp_ring;
xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
-
if (!xdp_ring)
goto free_xdp_rings;
+ ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
+ if (!ring_stats) {
+ ice_free_tx_ring(xdp_ring);
+ goto free_xdp_rings;
+ }
+
+ xdp_ring->ring_stats = ring_stats;
xdp_ring->q_index = xdp_q_idx;
xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
xdp_ring->vsi = vsi;
@@ -2575,9 +2587,13 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
return 0;
free_xdp_rings:
- for (; i >= 0; i--)
- if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
+ for (; i >= 0; i--) {
+ if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) {
+ kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
+ vsi->xdp_rings[i]->ring_stats = NULL;
ice_free_tx_ring(vsi->xdp_rings[i]);
+ }
+ }
return -ENOMEM;
}
@@ -2778,6 +2794,8 @@ free_qmap:
synchronize_rcu();
ice_free_tx_ring(vsi->xdp_rings[i]);
}
+ kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
+ vsi->xdp_rings[i]->ring_stats = NULL;
kfree_rcu(vsi->xdp_rings[i], rcu);
vsi->xdp_rings[i] = NULL;
}
@@ -4756,11 +4774,18 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
goto err_init_pf_unroll;
}
+ pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi,
+ sizeof(*pf->vsi_stats), GFP_KERNEL);
+ if (!pf->vsi_stats) {
+ err = -ENOMEM;
+ goto err_init_vsi_unroll;
+ }
+
err = ice_init_interrupt_scheme(pf);
if (err) {
dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
err = -EIO;
- goto err_init_vsi_unroll;
+ goto err_init_vsi_stats_unroll;
}
/* In case of MSIX we are going to setup the misc vector right here
@@ -4941,6 +4966,9 @@ err_msix_misc_unroll:
ice_free_irq_msix_misc(pf);
err_init_interrupt_unroll:
ice_clear_interrupt_scheme(pf);
+err_init_vsi_stats_unroll:
+ devm_kfree(dev, pf->vsi_stats);
+ pf->vsi_stats = NULL;
err_init_vsi_unroll:
devm_kfree(dev, pf->vsi);
err_init_pf_unroll:
@@ -5063,6 +5091,8 @@ static void ice_remove(struct pci_dev *pdev)
continue;
ice_vsi_free_q_vectors(pf->vsi[i]);
}
+ devm_kfree(&pdev->dev, pf->vsi_stats);
+ pf->vsi_stats = NULL;
ice_deinit_pf(pf);
ice_devlink_destroy_regions(pf);
ice_deinit_hw(&pf->hw);
@@ -6380,14 +6410,16 @@ ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
u64 pkts = 0, bytes = 0;
ring = READ_ONCE(rings[i]);
- if (!ring)
+ if (!ring || !ring->ring_stats)
continue;
- ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
+ ice_fetch_u64_stats_per_ring(&ring->ring_stats->syncp,
+ ring->ring_stats->stats, &pkts,
+ &bytes);
vsi_stats->tx_packets += pkts;
vsi_stats->tx_bytes += bytes;
- vsi->tx_restart += ring->tx_stats.restart_q;
- vsi->tx_busy += ring->tx_stats.tx_busy;
- vsi->tx_linearize += ring->tx_stats.tx_linearize;
+ vsi->tx_restart += ring->ring_stats->tx_stats.restart_q;
+ vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy;
+ vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize;
}
}
@@ -6397,6 +6429,7 @@ ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
*/
static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
{
+ struct rtnl_link_stats64 *net_stats, *stats_prev;
struct rtnl_link_stats64 *vsi_stats;
u64 pkts, bytes;
int i;
@@ -6421,12 +6454,16 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
/* update Rx rings counters */
ice_for_each_rxq(vsi, i) {
struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
+ struct ice_ring_stats *ring_stats;
- ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
+ ring_stats = ring->ring_stats;
+ ice_fetch_u64_stats_per_ring(&ring_stats->syncp,
+ ring_stats->stats, &pkts,
+ &bytes);
vsi_stats->rx_packets += pkts;
vsi_stats->rx_bytes += bytes;
- vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
- vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
+ vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed;
+ vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed;
}
/* update XDP Tx rings counters */
@@ -6436,10 +6473,28 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
rcu_read_unlock();
- vsi->net_stats.tx_packets = vsi_stats->tx_packets;
- vsi->net_stats.tx_bytes = vsi_stats->tx_bytes;
- vsi->net_stats.rx_packets = vsi_stats->rx_packets;
- vsi->net_stats.rx_bytes = vsi_stats->rx_bytes;
+ net_stats = &vsi->net_stats;
+ stats_prev = &vsi->net_stats_prev;
+
+ /* clear prev counters after reset */
+ if (vsi_stats->tx_packets < stats_prev->tx_packets ||
+ vsi_stats->rx_packets < stats_prev->rx_packets) {
+ stats_prev->tx_packets = 0;
+ stats_prev->tx_bytes = 0;
+ stats_prev->rx_packets = 0;
+ stats_prev->rx_bytes = 0;
+ }
+
+ /* update netdev counters */
+ net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
+ net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
+ net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
+ net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
+
+ stats_prev->tx_packets = vsi_stats->tx_packets;
+ stats_prev->tx_bytes = vsi_stats->tx_bytes;
+ stats_prev->rx_packets = vsi_stats->rx_packets;
+ stats_prev->rx_bytes = vsi_stats->rx_bytes;
kfree(vsi_stats);
}
@@ -6501,6 +6556,9 @@ void ice_update_pf_stats(struct ice_pf *pf)
prev_ps = &pf->stats_prev;
cur_ps = &pf->stats;
+ if (ice_is_reset_in_progress(pf->state))
+ pf->stat_prev_loaded = false;
+
ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
&prev_ps->eth.rx_bytes,
&cur_ps->eth.rx_bytes);
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index 772b1f566d6e..1f8dd50db524 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -2963,16 +2963,18 @@ bool ice_ptp_lock(struct ice_hw *hw)
u32 hw_lock;
int i;
-#define MAX_TRIES 5
+#define MAX_TRIES 15
for (i = 0; i < MAX_TRIES; i++) {
hw_lock = rd32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
hw_lock = hw_lock & PFTSYN_SEM_BUSY_M;
- if (!hw_lock)
- break;
+ if (hw_lock) {
+ /* Somebody is holding the lock */
+ usleep_range(5000, 6000);
+ continue;
+ }
- /* Somebody is holding the lock */
- usleep_range(10000, 20000);
+ break;
}
return !hw_lock;
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
index 109761c8c858..fd1f8b0ad0ab 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.c
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -156,18 +156,20 @@ ice_repr_sp_stats64(const struct net_device *dev,
u64 pkts, bytes;
tx_ring = np->vsi->tx_rings[vf_id];
- ice_fetch_u64_stats_per_ring(&tx_ring->syncp, tx_ring->stats,
+ ice_fetch_u64_stats_per_ring(&tx_ring->ring_stats->syncp,
+ tx_ring->ring_stats->stats,
&pkts, &bytes);
stats->rx_packets = pkts;
stats->rx_bytes = bytes;
rx_ring = np->vsi->rx_rings[vf_id];
- ice_fetch_u64_stats_per_ring(&rx_ring->syncp, rx_ring->stats,
+ ice_fetch_u64_stats_per_ring(&rx_ring->ring_stats->syncp,
+ rx_ring->ring_stats->stats,
&pkts, &bytes);
stats->tx_packets = pkts;
stats->tx_bytes = bytes;
- stats->tx_dropped = rx_ring->rx_stats.alloc_page_failed +
- rx_ring->rx_stats.alloc_buf_failed;
+ stats->tx_dropped = rx_ring->ring_stats->rx_stats.alloc_page_failed +
+ rx_ring->ring_stats->rx_stats.alloc_buf_failed;
return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index dbe80e5053a8..086f0b3ab68d 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -325,7 +325,7 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
if (netif_tx_queue_stopped(txring_txq(tx_ring)) &&
!test_bit(ICE_VSI_DOWN, vsi->state)) {
netif_tx_wake_queue(txring_txq(tx_ring));
- ++tx_ring->tx_stats.restart_q;
+ ++tx_ring->ring_stats->tx_stats.restart_q;
}
}
@@ -367,7 +367,7 @@ int ice_setup_tx_ring(struct ice_tx_ring *tx_ring)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
- tx_ring->tx_stats.prev_pkt = -1;
+ tx_ring->ring_stats->tx_stats.prev_pkt = -1;
return 0;
err:
@@ -667,7 +667,7 @@ ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
/* alloc new page for storage */
page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
if (unlikely(!page)) {
- rx_ring->rx_stats.alloc_page_failed++;
+ rx_ring->ring_stats->rx_stats.alloc_page_failed++;
return false;
}
@@ -680,7 +680,7 @@ ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
*/
if (dma_mapping_error(rx_ring->dev, dma)) {
__free_pages(page, ice_rx_pg_order(rx_ring));
- rx_ring->rx_stats.alloc_page_failed++;
+ rx_ring->ring_stats->rx_stats.alloc_page_failed++;
return false;
}
@@ -1091,7 +1091,7 @@ ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF)))
return false;
- rx_ring->rx_stats.non_eop_descs++;
+ rx_ring->ring_stats->rx_stats.non_eop_descs++;
return true;
}
@@ -1222,7 +1222,7 @@ construct_skb:
}
/* exit if we failed to retrieve a buffer */
if (!skb) {
- rx_ring->rx_stats.alloc_buf_failed++;
+ rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
if (rx_buf)
rx_buf->pagecnt_bias++;
break;
@@ -1275,7 +1275,9 @@ construct_skb:
ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
rx_ring->skb = skb;
- ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes);
+ if (rx_ring->ring_stats)
+ ice_update_rx_ring_stats(rx_ring, total_rx_pkts,
+ total_rx_bytes);
/* guarantee a trip back through this routine if there was a failure */
return failure ? budget : (int)total_rx_pkts;
@@ -1292,15 +1294,25 @@ static void __ice_update_sample(struct ice_q_vector *q_vector,
struct ice_tx_ring *tx_ring;
ice_for_each_tx_ring(tx_ring, *rc) {
- packets += tx_ring->stats.pkts;
- bytes += tx_ring->stats.bytes;
+ struct ice_ring_stats *ring_stats;
+
+ ring_stats = tx_ring->ring_stats;
+ if (!ring_stats)
+ continue;
+ packets += ring_stats->stats.pkts;
+ bytes += ring_stats->stats.bytes;
}
} else {
struct ice_rx_ring *rx_ring;
ice_for_each_rx_ring(rx_ring, *rc) {
- packets += rx_ring->stats.pkts;
- bytes += rx_ring->stats.bytes;
+ struct ice_ring_stats *ring_stats;
+
+ ring_stats = rx_ring->ring_stats;
+ if (!ring_stats)
+ continue;
+ packets += ring_stats->stats.pkts;
+ bytes += ring_stats->stats.bytes;
}
}
@@ -1549,7 +1561,7 @@ static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
/* A reprieve! - use start_queue because it doesn't call schedule */
netif_tx_start_queue(txring_txq(tx_ring));
- ++tx_ring->tx_stats.restart_q;
+ ++tx_ring->ring_stats->tx_stats.restart_q;
return 0;
}
@@ -2293,7 +2305,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
if (__skb_linearize(skb))
goto out_drop;
count = ice_txd_use_count(skb->len);
- tx_ring->tx_stats.tx_linearize++;
+ tx_ring->ring_stats->tx_stats.tx_linearize++;
}
/* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
@@ -2304,7 +2316,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
*/
if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
ICE_DESCS_FOR_CTX_DESC)) {
- tx_ring->tx_stats.tx_busy++;
+ tx_ring->ring_stats->tx_stats.tx_busy++;
return NETDEV_TX_BUSY;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 932b5661ec4d..4fd0e5d0a313 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -191,6 +191,16 @@ struct ice_rxq_stats {
u64 alloc_buf_failed;
};
+struct ice_ring_stats {
+ struct rcu_head rcu; /* to avoid race on free */
+ struct ice_q_stats stats;
+ struct u64_stats_sync syncp;
+ union {
+ struct ice_txq_stats tx_stats;
+ struct ice_rxq_stats rx_stats;
+ };
+};
+
enum ice_ring_state_t {
ICE_TX_XPS_INIT_DONE,
ICE_TX_NBITS,
@@ -283,9 +293,7 @@ struct ice_rx_ring {
u16 rx_buf_len;
/* stats structs */
- struct ice_rxq_stats rx_stats;
- struct ice_q_stats stats;
- struct u64_stats_sync syncp;
+ struct ice_ring_stats *ring_stats;
struct rcu_head rcu; /* to avoid race on free */
/* CL4 - 3rd cacheline starts here */
@@ -325,10 +333,8 @@ struct ice_tx_ring {
u16 count; /* Number of descriptors */
u16 q_index; /* Queue number of ring */
/* stats structs */
- struct ice_txq_stats tx_stats;
+ struct ice_ring_stats *ring_stats;
/* CL3 - 3rd cacheline starts here */
- struct ice_q_stats stats;
- struct u64_stats_sync syncp;
struct rcu_head rcu; /* to avoid race on free */
DECLARE_BITMAP(xps_state, ICE_TX_NBITS); /* XPS Config State */
struct ice_channel *ch;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 7ee38d02d1e5..25f04266c668 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -285,7 +285,7 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring)
ice_clean_xdp_irq(xdp_ring);
if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) {
- xdp_ring->tx_stats.tx_busy++;
+ xdp_ring->ring_stats->tx_stats.tx_busy++;
return ICE_XDP_CONSUMED;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
index fc8c93fa4455..d4a4001b6e5d 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
@@ -39,6 +39,24 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
}
+static const u32 ice_legacy_aq_to_vc_speed[15] = {
+ VIRTCHNL_LINK_SPEED_100MB, /* BIT(0) */
+ VIRTCHNL_LINK_SPEED_100MB,
+ VIRTCHNL_LINK_SPEED_1GB,
+ VIRTCHNL_LINK_SPEED_1GB,
+ VIRTCHNL_LINK_SPEED_1GB,
+ VIRTCHNL_LINK_SPEED_10GB,
+ VIRTCHNL_LINK_SPEED_20GB,
+ VIRTCHNL_LINK_SPEED_25GB,
+ VIRTCHNL_LINK_SPEED_40GB,
+ VIRTCHNL_LINK_SPEED_40GB,
+ VIRTCHNL_LINK_SPEED_40GB,
+ VIRTCHNL_LINK_SPEED_UNKNOWN,
+ VIRTCHNL_LINK_SPEED_UNKNOWN,
+ VIRTCHNL_LINK_SPEED_UNKNOWN,
+ VIRTCHNL_LINK_SPEED_UNKNOWN /* BIT(14) */
+};
+
/**
* ice_conv_link_speed_to_virtchnl
* @adv_link_support: determines the format of the returned link speed
@@ -55,79 +73,17 @@ u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
{
u32 speed;
- if (adv_link_support)
- switch (link_speed) {
- case ICE_AQ_LINK_SPEED_10MB:
- speed = ICE_LINK_SPEED_10MBPS;
- break;
- case ICE_AQ_LINK_SPEED_100MB:
- speed = ICE_LINK_SPEED_100MBPS;
- break;
- case ICE_AQ_LINK_SPEED_1000MB:
- speed = ICE_LINK_SPEED_1000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_2500MB:
- speed = ICE_LINK_SPEED_2500MBPS;
- break;
- case ICE_AQ_LINK_SPEED_5GB:
- speed = ICE_LINK_SPEED_5000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_10GB:
- speed = ICE_LINK_SPEED_10000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_20GB:
- speed = ICE_LINK_SPEED_20000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_25GB:
- speed = ICE_LINK_SPEED_25000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_40GB:
- speed = ICE_LINK_SPEED_40000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_50GB:
- speed = ICE_LINK_SPEED_50000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_100GB:
- speed = ICE_LINK_SPEED_100000MBPS;
- break;
- default:
- speed = ICE_LINK_SPEED_UNKNOWN;
- break;
- }
- else
+ if (adv_link_support) {
+ /* convert a BIT() value into an array index */
+ speed = ice_get_link_speed(fls(link_speed) - 1);
+ } else {
/* Virtchnl speeds are not defined for every speed supported in
* the hardware. To maintain compatibility with older AVF
* drivers, while reporting the speed the new speed values are
* resolved to the closest known virtchnl speeds
*/
- switch (link_speed) {
- case ICE_AQ_LINK_SPEED_10MB:
- case ICE_AQ_LINK_SPEED_100MB:
- speed = (u32)VIRTCHNL_LINK_SPEED_100MB;
- break;
- case ICE_AQ_LINK_SPEED_1000MB:
- case ICE_AQ_LINK_SPEED_2500MB:
- case ICE_AQ_LINK_SPEED_5GB:
- speed = (u32)VIRTCHNL_LINK_SPEED_1GB;
- break;
- case ICE_AQ_LINK_SPEED_10GB:
- speed = (u32)VIRTCHNL_LINK_SPEED_10GB;
- break;
- case ICE_AQ_LINK_SPEED_20GB:
- speed = (u32)VIRTCHNL_LINK_SPEED_20GB;
- break;
- case ICE_AQ_LINK_SPEED_25GB:
- speed = (u32)VIRTCHNL_LINK_SPEED_25GB;
- break;
- case ICE_AQ_LINK_SPEED_40GB:
- case ICE_AQ_LINK_SPEED_50GB:
- case ICE_AQ_LINK_SPEED_100GB:
- speed = (u32)VIRTCHNL_LINK_SPEED_40GB;
- break;
- default:
- speed = (u32)VIRTCHNL_LINK_SPEED_UNKNOWN;
- break;
- }
+ speed = ice_legacy_aq_to_vc_speed[fls(link_speed) - 1];
+ }
return speed;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index ec90e594986e..dab3cd5d300e 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -1621,9 +1621,6 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
}
for (i = 0; i < qci->num_queue_pairs; i++) {
- struct ice_hw *hw;
- u32 rxdid;
- u16 pf_q;
qpi = &qci->qpair[i];
if (qpi->txq.vsi_id != qci->vsi_id ||
qpi->rxq.vsi_id != qci->vsi_id ||
@@ -1664,6 +1661,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
/* copy Rx queue info from VF into VSI */
if (qpi->rxq.ring_len > 0) {
u16 max_frame_size = ice_vc_get_max_frame_size(vf);
+ u32 rxdid;
vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
vsi->rx_rings[i]->count = qpi->rxq.ring_len;
@@ -1691,26 +1689,25 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
vf->vf_id, i);
goto error_param;
}
- }
- /* VF Rx queue RXDID configuration */
- pf_q = vsi->rxq_map[qpi->rxq.queue_id];
- rxdid = qpi->rxq.rxdid;
- hw = &vsi->back->hw;
+ /* If Rx flex desc is supported, select RXDID for Rx
+ * queues. Otherwise, use legacy 32byte descriptor
+ * format. Legacy 16byte descriptor is not supported.
+ * If this RXDID is selected, return error.
+ */
+ if (vf->driver_caps &
+ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
+ rxdid = qpi->rxq.rxdid;
+ if (!(BIT(rxdid) & pf->supported_rxdids))
+ goto error_param;
+ } else {
+ rxdid = ICE_RXDID_LEGACY_1;
+ }
- /* If Rx flex desc is supported, select RXDID for Rx queues.
- * Otherwise, use legacy 32byte descriptor format.
- * Legacy 16byte descriptor is not supported. If this RXDID
- * is selected, return error.
- */
- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
- if (!(BIT(rxdid) & pf->supported_rxdids))
- goto error_param;
- } else {
- rxdid = ICE_RXDID_LEGACY_1;
+ ice_write_qrxflxp_cntxt(&vsi->back->hw,
+ vsi->rxq_map[q_idx],
+ rxdid, 0x03, false);
}
-
- ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x03, false);
}
/* send the response to the VF */
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 056c904b83cc..907055b77af0 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -24,13 +24,24 @@ static struct xdp_buff **ice_xdp_buf(struct ice_rx_ring *rx_ring, u32 idx)
*/
static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
{
- memset(&vsi->rx_rings[q_idx]->rx_stats, 0,
- sizeof(vsi->rx_rings[q_idx]->rx_stats));
- memset(&vsi->tx_rings[q_idx]->stats, 0,
- sizeof(vsi->tx_rings[q_idx]->stats));
+ struct ice_vsi_stats *vsi_stat;
+ struct ice_pf *pf;
+
+ pf = vsi->back;
+ if (!pf->vsi_stats)
+ return;
+
+ vsi_stat = pf->vsi_stats[vsi->idx];
+ if (!vsi_stat)
+ return;
+
+ memset(&vsi_stat->rx_ring_stats[q_idx]->rx_stats, 0,
+ sizeof(vsi_stat->rx_ring_stats[q_idx]->rx_stats));
+ memset(&vsi_stat->tx_ring_stats[q_idx]->stats, 0,
+ sizeof(vsi_stat->tx_ring_stats[q_idx]->stats));
if (ice_is_xdp_ena_vsi(vsi))
- memset(&vsi->xdp_rings[q_idx]->stats, 0,
- sizeof(vsi->xdp_rings[q_idx]->stats));
+ memset(&vsi->xdp_rings[q_idx]->ring_stats->stats, 0,
+ sizeof(vsi->xdp_rings[q_idx]->ring_stats->stats));
}
/**
@@ -722,7 +733,7 @@ construct_skb:
/* XDP_PASS path */
skb = ice_construct_skb_zc(rx_ring, xdp);
if (!skb) {
- rx_ring->rx_stats.alloc_buf_failed++;
+ rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
break;
}