aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/ice/ice_sched.c
diff options
context:
space:
mode:
authorJacob Keller <jacob.e.keller@intel.com>2023-06-20 15:18:45 -0700
committerTony Nguyen <anthony.l.nguyen@intel.com>2023-07-27 10:56:32 -0700
commitf3fbda3396f39c9849ea31c19c9b188905b1691d (patch)
treec04cc06c06fd75b71adf77e49a93110675eea355 /drivers/net/ethernet/intel/ice/ice_sched.c
parent9d0cd5d25f7d45bce01bbb3193b54ac24b3a60f3 (diff)
ice: Correctly initialize queue context values
The ice_alloc_lan_q_ctx function allocates the queue context array for a given traffic class. This function uses devm_kcalloc which will zero-allocate the structure. Thus, prior to any queue being setup by ice_ena_vsi_txq, the q_ctx structure will have a q_handle of 0 and a q_teid of 0. These are potentially valid values. Modify the ice_alloc_lan_q_ctx function to initialize every member of the q_ctx array to have invalid values. Modify ice_dis_vsi_txq to ensure that it assigns q_teid to an invalid value when it assigns q_handle to the invalid value as well. This will allow other code to check whether the queue context is currently valid before operating on it. Reviewed-by: Simon Horman <simon.horman@corigine.com> Reviewed-by: Daniel Machon <daniel.machon@microchip.com> Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Signed-off-by: Dave Ertman <david.m.ertman@intel.com> Tested-by: Sujai Buvaneswaran <sujai.buvaneswaran@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_sched.c')
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c23
1 files changed, 18 insertions, 5 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index b664d60fd037..79a8972873f1 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -569,18 +569,24 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
{
struct ice_vsi_ctx *vsi_ctx;
struct ice_q_ctx *q_ctx;
+ u16 idx;
vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
if (!vsi_ctx)
return -EINVAL;
/* allocate LAN queue contexts */
if (!vsi_ctx->lan_q_ctx[tc]) {
- vsi_ctx->lan_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
- new_numqs,
- sizeof(*q_ctx),
- GFP_KERNEL);
- if (!vsi_ctx->lan_q_ctx[tc])
+ q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
+ sizeof(*q_ctx), GFP_KERNEL);
+ if (!q_ctx)
return -ENOMEM;
+
+ for (idx = 0; idx < new_numqs; idx++) {
+ q_ctx[idx].q_handle = ICE_INVAL_Q_HANDLE;
+ q_ctx[idx].q_teid = ICE_INVAL_TEID;
+ }
+
+ vsi_ctx->lan_q_ctx[tc] = q_ctx;
vsi_ctx->num_lan_q_entries[tc] = new_numqs;
return 0;
}
@@ -592,9 +598,16 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
sizeof(*q_ctx), GFP_KERNEL);
if (!q_ctx)
return -ENOMEM;
+
memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc],
prev_num * sizeof(*q_ctx));
devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]);
+
+ for (idx = prev_num; idx < new_numqs; idx++) {
+ q_ctx[idx].q_handle = ICE_INVAL_Q_HANDLE;
+ q_ctx[idx].q_teid = ICE_INVAL_TEID;
+ }
+
vsi_ctx->lan_q_ctx[tc] = q_ctx;
vsi_ctx->num_lan_q_entries[tc] = new_numqs;
}