aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/ice/ice_sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_sched.c')
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c93
1 files changed, 86 insertions, 7 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 2f097637e405..9f07b6641705 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -596,6 +596,50 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
}
/**
+ * ice_alloc_rdma_q_ctx - allocate RDMA queue contexts for the given VSI and TC
+ * @hw: pointer to the HW struct
+ * @vsi_handle: VSI handle
+ * @tc: TC number
+ * @new_numqs: number of queues
+ */
+static enum ice_status
+ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
+{
+ struct ice_vsi_ctx *vsi_ctx;
+ struct ice_q_ctx *q_ctx;
+
+ vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
+ if (!vsi_ctx)
+ return ICE_ERR_PARAM;
+ /* allocate RDMA queue contexts */
+ if (!vsi_ctx->rdma_q_ctx[tc]) {
+ vsi_ctx->rdma_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
+ new_numqs,
+ sizeof(*q_ctx),
+ GFP_KERNEL);
+ if (!vsi_ctx->rdma_q_ctx[tc])
+ return ICE_ERR_NO_MEMORY;
+ vsi_ctx->num_rdma_q_entries[tc] = new_numqs;
+ return 0;
+ }
+ /* num queues are increased, update the queue contexts */
+ if (new_numqs > vsi_ctx->num_rdma_q_entries[tc]) {
+ u16 prev_num = vsi_ctx->num_rdma_q_entries[tc];
+
+ q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
+ sizeof(*q_ctx), GFP_KERNEL);
+ if (!q_ctx)
+ return ICE_ERR_NO_MEMORY;
+ memcpy(q_ctx, vsi_ctx->rdma_q_ctx[tc],
+ prev_num * sizeof(*q_ctx));
+ devm_kfree(ice_hw_to_dev(hw), vsi_ctx->rdma_q_ctx[tc]);
+ vsi_ctx->rdma_q_ctx[tc] = q_ctx;
+ vsi_ctx->num_rdma_q_entries[tc] = new_numqs;
+ }
+ return 0;
+}
+
+/**
* ice_aq_rl_profile - performs a rate limiting task
* @hw: pointer to the HW struct
* @opcode: opcode for add, query, or remove profile(s)
@@ -1774,13 +1818,22 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
if (!vsi_ctx)
return ICE_ERR_PARAM;
- prev_numqs = vsi_ctx->sched.max_lanq[tc];
+ if (owner == ICE_SCHED_NODE_OWNER_LAN)
+ prev_numqs = vsi_ctx->sched.max_lanq[tc];
+ else
+ prev_numqs = vsi_ctx->sched.max_rdmaq[tc];
/* num queues are not changed or less than the previous number */
if (new_numqs <= prev_numqs)
return status;
- status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);
- if (status)
- return status;
+ if (owner == ICE_SCHED_NODE_OWNER_LAN) {
+ status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);
+ if (status)
+ return status;
+ } else {
+ status = ice_alloc_rdma_q_ctx(hw, vsi_handle, tc, new_numqs);
+ if (status)
+ return status;
+ }
if (new_numqs)
ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
@@ -1795,7 +1848,10 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
new_num_nodes, owner);
if (status)
return status;
- vsi_ctx->sched.max_lanq[tc] = new_numqs;
+ if (owner == ICE_SCHED_NODE_OWNER_LAN)
+ vsi_ctx->sched.max_lanq[tc] = new_numqs;
+ else
+ vsi_ctx->sched.max_rdmaq[tc] = new_numqs;
return 0;
}
@@ -1861,6 +1917,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
* recreate the child nodes all the time in these cases.
*/
vsi_ctx->sched.max_lanq[tc] = 0;
+ vsi_ctx->sched.max_rdmaq[tc] = 0;
}
/* update the VSI child nodes */
@@ -1990,6 +2047,8 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
}
if (owner == ICE_SCHED_NODE_OWNER_LAN)
vsi_ctx->sched.max_lanq[i] = 0;
+ else
+ vsi_ctx->sched.max_rdmaq[i] = 0;
}
status = 0;
@@ -2686,8 +2745,8 @@ static enum ice_status
ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
u16 vsi_handle, unsigned long *tc_bitmap)
{
- struct ice_sched_agg_vsi_info *agg_vsi_info;
- struct ice_sched_agg_info *agg_info;
+ struct ice_sched_agg_vsi_info *agg_vsi_info, *old_agg_vsi_info = NULL;
+ struct ice_sched_agg_info *agg_info, *old_agg_info;
enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
u8 tc;
@@ -2697,6 +2756,20 @@ ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
agg_info = ice_get_agg_info(hw, agg_id);
if (!agg_info)
return ICE_ERR_PARAM;
+ /* If the VSI is already part of another aggregator then update
+ * its VSI info list
+ */
+ old_agg_info = ice_get_vsi_agg_info(hw, vsi_handle);
+ if (old_agg_info && old_agg_info != agg_info) {
+ struct ice_sched_agg_vsi_info *vtmp;
+
+ list_for_each_entry_safe(old_agg_vsi_info, vtmp,
+ &old_agg_info->agg_vsi_list,
+ list_entry)
+ if (old_agg_vsi_info->vsi_handle == vsi_handle)
+ break;
+ }
+
/* check if entry already exist */
agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
if (!agg_vsi_info) {
@@ -2721,6 +2794,12 @@ ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
break;
set_bit(tc, agg_vsi_info->tc_bitmap);
+ if (old_agg_vsi_info)
+ clear_bit(tc, old_agg_vsi_info->tc_bitmap);
+ }
+ if (old_agg_vsi_info && !old_agg_vsi_info->tc_bitmap[0]) {
+ list_del(&old_agg_vsi_info->list_entry);
+ devm_kfree(ice_hw_to_dev(pi->hw), old_agg_vsi_info);
}
return status;
}