diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_sched.c')
| -rw-r--r-- | drivers/net/ethernet/intel/ice/ice_sched.c | 379 | 
1 files changed, 164 insertions, 215 deletions
| diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index eeae199469b6..7cc8aa18a22b 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -17,7 +17,6 @@ ice_sched_add_root_node(struct ice_port_info *pi,  {  	struct ice_sched_node *root;  	struct ice_hw *hw; -	u16 max_children;  	if (!pi)  		return ICE_ERR_PARAM; @@ -28,8 +27,8 @@ ice_sched_add_root_node(struct ice_port_info *pi,  	if (!root)  		return ICE_ERR_NO_MEMORY; -	max_children = le16_to_cpu(hw->layer_info[0].max_children); -	root->children = devm_kcalloc(ice_hw_to_dev(hw), max_children, +	/* coverity[suspicious_sizeof] */ +	root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0],  				      sizeof(*root), GFP_KERNEL);  	if (!root->children) {  		devm_kfree(ice_hw_to_dev(hw), root); @@ -86,6 +85,62 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)  }  /** + * ice_aq_query_sched_elems - query scheduler elements + * @hw: pointer to the hw struct + * @elems_req: number of elements to query + * @buf: pointer to buffer + * @buf_size: buffer size in bytes + * @elems_ret: returns total number of elements returned + * @cd: pointer to command details structure or NULL + * + * Query scheduling elements (0x0404) + */ +static enum ice_status +ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, +			 struct ice_aqc_get_elem *buf, u16 buf_size, +			 u16 *elems_ret, struct ice_sq_cd *cd) +{ +	struct ice_aqc_get_cfg_elem *cmd; +	struct ice_aq_desc desc; +	enum ice_status status; + +	cmd = &desc.params.get_update_elem; +	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sched_elems); +	cmd->num_elem_req = cpu_to_le16(elems_req); +	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); +	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); +	if (!status && elems_ret) +		*elems_ret = le16_to_cpu(cmd->num_elem_resp); + +	return status; +} + +/** + * ice_sched_query_elem - query element information from hw + * @hw: pointer to the hw struct + * @node_teid: node teid to be queried + * @buf: buffer to element information + * + * This function queries HW element information + */ +static enum ice_status +ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, +		     struct ice_aqc_get_elem *buf) +{ +	u16 buf_size, num_elem_ret = 0; +	enum ice_status status; + +	buf_size = sizeof(*buf); +	memset(buf, 0, buf_size); +	buf->generic[0].node_teid = cpu_to_le32(node_teid); +	status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret, +					  NULL); +	if (status || num_elem_ret != 1) +		ice_debug(hw, ICE_DBG_SCHED, "query element failed\n"); +	return status; +} + +/**   * ice_sched_add_node - Insert the Tx scheduler node in SW DB   * @pi: port information structure   * @layer: Scheduler layer of the node @@ -98,9 +153,10 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,  		   struct ice_aqc_txsched_elem_data *info)  {  	struct ice_sched_node *parent; +	struct ice_aqc_get_elem elem;  	struct ice_sched_node *node; +	enum ice_status status;  	struct ice_hw *hw; -	u16 max_children;  	if (!pi)  		return ICE_ERR_PARAM; @@ -117,12 +173,20 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,  		return ICE_ERR_PARAM;  	} +	/* query the current node information from FW  before additing it +	 * to the SW DB +	 */ +	status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), &elem); +	if (status) +		return status; +  	node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL);  	if (!node)  		return ICE_ERR_NO_MEMORY; -	max_children = le16_to_cpu(hw->layer_info[layer].max_children); -	if (max_children) { -		node->children = devm_kcalloc(ice_hw_to_dev(hw), max_children, +	if (hw->max_children[layer]) { +		/* coverity[suspicious_sizeof] */ +		node->children = devm_kcalloc(ice_hw_to_dev(hw), +					      hw->max_children[layer],  					      sizeof(*node), GFP_KERNEL);  		if (!node->children) {  			devm_kfree(ice_hw_to_dev(hw), node); @@ -134,7 +198,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,  	node->parent = parent;  	node->tx_sched_layer = layer;  	parent->children[parent->num_children++] = node; -	memcpy(&node->info, info, sizeof(*info)); +	memcpy(&node->info, &elem.generic[0], sizeof(node->info));  	return 0;  } @@ -192,14 +256,17 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,  	buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);  	if (!buf)  		return ICE_ERR_NO_MEMORY; +  	buf->hdr.parent_teid = parent->info.node_teid;  	buf->hdr.num_elems = cpu_to_le16(num_nodes);  	for (i = 0; i < num_nodes; i++)  		buf->teid[i] = cpu_to_le32(node_teids[i]); +  	status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,  					   &num_groups_removed, NULL);  	if (status || num_groups_removed != 1)  		ice_debug(hw, ICE_DBG_SCHED, "remove elements failed\n"); +  	devm_kfree(ice_hw_to_dev(hw), buf);  	return status;  } @@ -532,9 +599,7 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,  static void ice_sched_clear_tx_topo(struct ice_port_info *pi)  {  	struct ice_sched_agg_info *agg_info; -	struct ice_sched_vsi_info *vsi_elem;  	struct ice_sched_agg_info *atmp; -	struct ice_sched_vsi_info *tmp;  	struct ice_hw *hw;  	if (!pi) @@ -553,13 +618,6 @@ static void ice_sched_clear_tx_topo(struct ice_port_info *pi)  		}  	} -	/* remove the vsi list */ -	list_for_each_entry_safe(vsi_elem, tmp, &pi->vsi_info_list, -				 list_entry) { -		list_del(&vsi_elem->list_entry); -		devm_kfree(ice_hw_to_dev(hw), vsi_elem); -	} -  	if (pi->root) {  		ice_free_sched_node(pi, pi->root);  		pi->root = NULL; @@ -592,13 +650,16 @@ static void ice_sched_clear_port(struct ice_port_info *pi)   */  void ice_sched_cleanup_all(struct ice_hw *hw)  { -	if (!hw || !hw->port_info) +	if (!hw)  		return; -	if (hw->layer_info) +	if (hw->layer_info) {  		devm_kfree(ice_hw_to_dev(hw), hw->layer_info); +		hw->layer_info = NULL; +	} -	ice_sched_clear_port(hw->port_info); +	if (hw->port_info) +		ice_sched_clear_port(hw->port_info);  	hw->num_tx_sched_layers = 0;  	hw->num_tx_sched_phys_layers = 0; @@ -607,31 +668,6 @@ void ice_sched_cleanup_all(struct ice_hw *hw)  }  /** - * ice_sched_create_vsi_info_entry - create an empty new VSI entry - * @pi: port information structure - * @vsi_id: VSI Id - * - * This function creates a new VSI entry and adds it to list - */ -static struct ice_sched_vsi_info * -ice_sched_create_vsi_info_entry(struct ice_port_info *pi, u16 vsi_id) -{ -	struct ice_sched_vsi_info *vsi_elem; - -	if (!pi) -		return NULL; - -	vsi_elem = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*vsi_elem), -				GFP_KERNEL); -	if (!vsi_elem) -		return NULL; - -	list_add(&vsi_elem->list_entry, &pi->vsi_info_list); -	vsi_elem->vsi_id = vsi_id; -	return vsi_elem; -} - -/**   * ice_sched_add_elems - add nodes to hw and SW DB   * @pi: port information structure   * @tc_node: pointer to the branch node @@ -671,9 +707,13 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,  			ICE_AQC_ELEM_VALID_EIR;  		buf->generic[i].data.generic = 0;  		buf->generic[i].data.cir_bw.bw_profile_idx = -			ICE_SCHED_DFLT_RL_PROF_ID; +			cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); +		buf->generic[i].data.cir_bw.bw_alloc = +			cpu_to_le16(ICE_SCHED_DFLT_BW_WT);  		buf->generic[i].data.eir_bw.bw_profile_idx = -			ICE_SCHED_DFLT_RL_PROF_ID; +			cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); +		buf->generic[i].data.eir_bw.bw_alloc = +			cpu_to_le16(ICE_SCHED_DFLT_BW_WT);  	}  	status = ice_aq_add_sched_elems(hw, 1, buf, buf_size, @@ -697,7 +737,6 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,  		teid = le32_to_cpu(buf->generic[i].node_teid);  		new_node = ice_sched_find_node_by_teid(parent, teid); -  		if (!new_node) {  			ice_debug(hw, ICE_DBG_SCHED,  				  "Node is missing for teid =%d\n", teid); @@ -710,7 +749,6 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,  		/* add it to previous node sibling pointer */  		/* Note: siblings are not linked across branches */  		prev = ice_sched_get_first_node(hw, tc_node, layer); -  		if (prev && prev != new_node) {  			while (prev->sibling)  				prev = prev->sibling; @@ -760,8 +798,7 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,  		return ICE_ERR_PARAM;  	/* max children per node per layer */ -	max_child_nodes = -	    le16_to_cpu(hw->layer_info[parent->tx_sched_layer].max_children); +	max_child_nodes = hw->max_children[parent->tx_sched_layer];  	/* current number of children + required nodes exceed max children ? */  	if ((parent->num_children + num_nodes) > max_child_nodes) { @@ -851,78 +888,6 @@ static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)  }  /** - * ice_sched_get_num_nodes_per_layer - Get the total number of nodes per layer - * @pi: pointer to the port info struct - * @layer: layer number - * - * This function calculates the number of nodes present in the scheduler tree - * including all the branches for a given layer - */ -static u16 -ice_sched_get_num_nodes_per_layer(struct ice_port_info *pi, u8 layer) -{ -	struct ice_hw *hw; -	u16 num_nodes = 0; -	u8 i; - -	if (!pi) -		return num_nodes; - -	hw = pi->hw; - -	/* Calculate the number of nodes for all TCs */ -	for (i = 0; i < pi->root->num_children; i++) { -		struct ice_sched_node *tc_node, *node; - -		tc_node = pi->root->children[i]; - -		/* Get the first node */ -		node = ice_sched_get_first_node(hw, tc_node, layer); -		if (!node) -			continue; - -		/* count the siblings */ -		while (node) { -			num_nodes++; -			node = node->sibling; -		} -	} - -	return num_nodes; -} - -/** - * ice_sched_val_max_nodes - check max number of nodes reached or not - * @pi: port information structure - * @new_num_nodes_per_layer: pointer to the new number of nodes array - * - * This function checks whether the scheduler tree layers have enough space to - * add new nodes - */ -static enum ice_status -ice_sched_validate_for_max_nodes(struct ice_port_info *pi, -				 u16 *new_num_nodes_per_layer) -{ -	struct ice_hw *hw = pi->hw; -	u8 i, qg_layer; -	u16 num_nodes; - -	qg_layer = ice_sched_get_qgrp_layer(hw); - -	/* walk through all the layers from SW entry point to qgroup layer */ -	for (i = hw->sw_entry_point_layer; i <= qg_layer; i++) { -		num_nodes = ice_sched_get_num_nodes_per_layer(pi, i); -		if (num_nodes + new_num_nodes_per_layer[i] > -		    le16_to_cpu(hw->layer_info[i].max_pf_nodes)) { -			ice_debug(hw, ICE_DBG_SCHED, -				  "max nodes reached for layer = %d\n", i); -			return ICE_ERR_CFG; -		} -	} -	return 0; -} - -/**   * ice_rm_dflt_leaf_node - remove the default leaf node in the tree   * @pi: port information structure   * @@ -1003,14 +968,12 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi)  	hw = pi->hw;  	/* Query the Default Topology from FW */ -	buf = devm_kcalloc(ice_hw_to_dev(hw), ICE_TXSCHED_MAX_BRANCHES, -			   sizeof(*buf), GFP_KERNEL); +	buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);  	if (!buf)  		return ICE_ERR_NO_MEMORY;  	/* Query default scheduling tree topology */ -	status = ice_aq_get_dflt_topo(hw, pi->lport, buf, -				      sizeof(*buf) * ICE_TXSCHED_MAX_BRANCHES, +	status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN,  				      &num_branches, NULL);  	if (status)  		goto err_init_port; @@ -1075,7 +1038,6 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi)  	pi->port_state = ICE_SCHED_PORT_STATE_READY;  	mutex_init(&pi->sched_lock);  	INIT_LIST_HEAD(&pi->agg_list); -	INIT_LIST_HEAD(&pi->vsi_info_list);  err_init_port:  	if (status && pi->root) { @@ -1097,6 +1059,8 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)  {  	struct ice_aqc_query_txsched_res_resp *buf;  	enum ice_status status = 0; +	__le16 max_sibl; +	u8 i;  	if (hw->layer_info)  		return status; @@ -1115,7 +1079,20 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)  	hw->flattened_layers = buf->sched_props.flattening_bitmap;  	hw->max_cgds = buf->sched_props.max_pf_cgds; -	 hw->layer_info = devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props, +	/* max sibling group size of current layer refers to the max children +	 * of the below layer node. +	 * layer 1 node max children will be layer 2 max sibling group size +	 * layer 2 node max children will be layer 3 max sibling group size +	 * and so on. This array will be populated from root (index 0) to +	 * qgroup layer 7. Leaf node has no children. +	 */ +	for (i = 0; i < hw->num_tx_sched_layers; i++) { +		max_sibl = buf->layer_props[i].max_sibl_grp_sz; +		hw->max_children[i] = le16_to_cpu(max_sibl); +	} + +	hw->layer_info = (struct ice_aqc_layer_props *) +			  devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props,  				       (hw->num_tx_sched_layers *  					sizeof(*hw->layer_info)),  				       GFP_KERNEL); @@ -1130,27 +1107,6 @@ sched_query_out:  }  /** - * ice_sched_get_vsi_info_entry - Get the vsi entry list for given vsi_id - * @pi: port information structure - * @vsi_id: vsi id - * - * This function retrieves the vsi list for the given vsi id - */ -static struct ice_sched_vsi_info * -ice_sched_get_vsi_info_entry(struct ice_port_info *pi, u16 vsi_id) -{ -	struct ice_sched_vsi_info *list_elem; - -	if (!pi) -		return NULL; - -	list_for_each_entry(list_elem, &pi->vsi_info_list, list_entry) -		if (list_elem->vsi_id == vsi_id) -			return list_elem; -	return NULL; -} - -/**   * ice_sched_find_node_in_subtree - Find node in part of base node subtree   * @hw: pointer to the hw struct   * @base: pointer to the base node @@ -1186,30 +1142,28 @@ ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,  /**   * ice_sched_get_free_qparent - Get a free lan or rdma q group node   * @pi: port information structure - * @vsi_id: vsi id + * @vsi_handle: software VSI handle   * @tc: branch number   * @owner: lan or rdma   *   * This function retrieves a free lan or rdma q group node   */  struct ice_sched_node * -ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_id, u8 tc, +ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,  			   u8 owner)  {  	struct ice_sched_node *vsi_node, *qgrp_node = NULL; -	struct ice_sched_vsi_info *list_elem; +	struct ice_vsi_ctx *vsi_ctx;  	u16 max_children;  	u8 qgrp_layer;  	qgrp_layer = ice_sched_get_qgrp_layer(pi->hw); -	max_children = le16_to_cpu(pi->hw->layer_info[qgrp_layer].max_children); - -	list_elem = ice_sched_get_vsi_info_entry(pi, vsi_id); -	if (!list_elem) -		goto lan_q_exit; - -	vsi_node = list_elem->vsi_node[tc]; +	max_children = pi->hw->max_children[qgrp_layer]; +	vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); +	if (!vsi_ctx) +		return NULL; +	vsi_node = vsi_ctx->sched.vsi_node[tc];  	/* validate invalid VSI id */  	if (!vsi_node)  		goto lan_q_exit; @@ -1233,14 +1187,14 @@ lan_q_exit:   * ice_sched_get_vsi_node - Get a VSI node based on VSI id   * @hw: pointer to the hw struct   * @tc_node: pointer to the TC node - * @vsi_id: VSI id + * @vsi_handle: software VSI handle   *   * This function retrieves a VSI node for a given VSI id from a given   * TC branch   */  static struct ice_sched_node *  ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node, -		       u16 vsi_id) +		       u16 vsi_handle)  {  	struct ice_sched_node *node;  	u8 vsi_layer; @@ -1250,7 +1204,7 @@ ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node,  	/* Check whether it already exists */  	while (node) { -		if (node->vsi_id == vsi_id) +		if (node->vsi_handle == vsi_handle)  			return node;  		node = node->sibling;  	} @@ -1278,10 +1232,8 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)  	/* calculate num nodes from q group to VSI layer */  	for (i = qgl; i > vsil; i--) { -		u16 max_children = le16_to_cpu(hw->layer_info[i].max_children); -  		/* round to the next integer if there is a remainder */ -		num = DIV_ROUND_UP(num, max_children); +		num = DIV_ROUND_UP(num, hw->max_children[i]);  		/* need at least one node */  		num_nodes[i] = num ? num : 1; @@ -1291,7 +1243,7 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)  /**   * ice_sched_add_vsi_child_nodes - add VSI child nodes to tree   * @pi: port information structure - * @vsi_id: VSI id + * @vsi_handle: software VSI handle   * @tc_node: pointer to the TC node   * @num_nodes: pointer to the num nodes that needs to be added per layer   * @owner: node owner (lan or rdma) @@ -1300,7 +1252,7 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)   * lan and rdma separately.   */  static enum ice_status -ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, +ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,  			      struct ice_sched_node *tc_node, u16 *num_nodes,  			      u8 owner)  { @@ -1311,16 +1263,13 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id,  	u16 num_added = 0;  	u8 i, qgl, vsil; -	status = ice_sched_validate_for_max_nodes(pi, num_nodes); -	if (status) -		return status; -  	qgl = ice_sched_get_qgrp_layer(hw);  	vsil = ice_sched_get_vsi_layer(hw); -	parent = ice_sched_get_vsi_node(hw, tc_node, vsi_id); +	parent = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);  	for (i = vsil + 1; i <= qgl; i++) {  		if (!parent)  			return ICE_ERR_CFG; +  		status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,  						      num_nodes[i],  						      &first_node_teid, @@ -1398,8 +1347,8 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,  				 struct ice_sched_node *tc_node, u16 *num_nodes)  {  	struct ice_sched_node *node; -	u16 max_child; -	u8 i, vsil; +	u8 vsil; +	int i;  	vsil = ice_sched_get_vsi_layer(hw);  	for (i = vsil; i >= hw->sw_entry_point_layer; i--) @@ -1412,12 +1361,10 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,  			/* If intermediate nodes are reached max children  			 * then add a new one.  			 */ -			node = ice_sched_get_first_node(hw, tc_node, i); -			max_child = le16_to_cpu(hw->layer_info[i].max_children); - +			node = ice_sched_get_first_node(hw, tc_node, (u8)i);  			/* scan all the siblings */  			while (node) { -				if (node->num_children < max_child) +				if (node->num_children < hw->max_children[i])  					break;  				node = node->sibling;  			} @@ -1431,7 +1378,7 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,  /**   * ice_sched_add_vsi_support_nodes - add VSI supported nodes into tx tree   * @pi: port information structure - * @vsi_id: VSI Id + * @vsi_handle: software VSI handle   * @tc_node: pointer to TC node   * @num_nodes: pointer to num nodes array   * @@ -1439,7 +1386,7 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,   * VSI, its parent and intermediate nodes in below layers   */  static enum ice_status -ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_id, +ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,  				struct ice_sched_node *tc_node, u16 *num_nodes)  {  	struct ice_sched_node *parent = tc_node; @@ -1451,10 +1398,6 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_id,  	if (!pi)  		return ICE_ERR_PARAM; -	status = ice_sched_validate_for_max_nodes(pi, num_nodes); -	if (status) -		return status; -  	vsil = ice_sched_get_vsi_layer(pi->hw);  	for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {  		status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, @@ -1477,21 +1420,22 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_id,  			return ICE_ERR_CFG;  		if (i == vsil) -			parent->vsi_id = vsi_id; +			parent->vsi_handle = vsi_handle;  	} +  	return 0;  }  /**   * ice_sched_add_vsi_to_topo - add a new VSI into tree   * @pi: port information structure - * @vsi_id: VSI Id + * @vsi_handle: software VSI handle   * @tc: TC number   *   * This function adds a new VSI into scheduler tree   */  static enum ice_status -ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_id, u8 tc) +ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)  {  	u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };  	struct ice_sched_node *tc_node; @@ -1505,13 +1449,14 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_id, u8 tc)  	ice_sched_calc_vsi_support_nodes(hw, tc_node, num_nodes);  	/* add vsi supported nodes to tc subtree */ -	return ice_sched_add_vsi_support_nodes(pi, vsi_id, tc_node, num_nodes); +	return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node, +					       num_nodes);  }  /**   * ice_sched_update_vsi_child_nodes - update VSI child nodes   * @pi: port information structure - * @vsi_id: VSI Id + * @vsi_handle: software VSI handle   * @tc: TC number   * @new_numqs: new number of max queues   * @owner: owner of this subtree @@ -1519,14 +1464,14 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_id, u8 tc)   * This function updates the VSI child nodes based on the number of queues   */  static enum ice_status -ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc, -				 u16 new_numqs, u8 owner) +ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, +				 u8 tc, u16 new_numqs, u8 owner)  {  	u16 prev_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };  	u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };  	struct ice_sched_node *vsi_node;  	struct ice_sched_node *tc_node; -	struct ice_sched_vsi_info *vsi; +	struct ice_vsi_ctx *vsi_ctx;  	enum ice_status status = 0;  	struct ice_hw *hw = pi->hw;  	u16 prev_numqs; @@ -1536,16 +1481,16 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,  	if (!tc_node)  		return ICE_ERR_CFG; -	vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id); +	vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);  	if (!vsi_node)  		return ICE_ERR_CFG; -	vsi = ice_sched_get_vsi_info_entry(pi, vsi_id); -	if (!vsi) -		return ICE_ERR_CFG; +	vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); +	if (!vsi_ctx) +		return ICE_ERR_PARAM;  	if (owner == ICE_SCHED_NODE_OWNER_LAN) -		prev_numqs = vsi->max_lanq[tc]; +		prev_numqs = vsi_ctx->sched.max_lanq[tc];  	else  		return ICE_ERR_PARAM; @@ -1570,13 +1515,13 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,  		for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)  			new_num_nodes[i] -= prev_num_nodes[i]; -		status = ice_sched_add_vsi_child_nodes(pi, vsi_id, tc_node, +		status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,  						       new_num_nodes, owner);  		if (status)  			return status;  	} -	vsi->max_lanq[tc] = new_numqs; +	vsi_ctx->sched.max_lanq[tc] = new_numqs;  	return status;  } @@ -1584,7 +1529,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,  /**   * ice_sched_cfg_vsi - configure the new/exisiting VSI   * @pi: port information structure - * @vsi_id: VSI Id + * @vsi_handle: software VSI handle   * @tc: TC number   * @maxqs: max number of queues   * @owner: lan or rdma @@ -1595,25 +1540,21 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,   * disabled then suspend the VSI if it is not already.   */  enum ice_status -ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_id, u8 tc, u16 maxqs, +ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,  		  u8 owner, bool enable)  {  	struct ice_sched_node *vsi_node, *tc_node; -	struct ice_sched_vsi_info *vsi; +	struct ice_vsi_ctx *vsi_ctx;  	enum ice_status status = 0;  	struct ice_hw *hw = pi->hw;  	tc_node = ice_sched_get_tc_node(pi, tc);  	if (!tc_node)  		return ICE_ERR_PARAM; - -	vsi = ice_sched_get_vsi_info_entry(pi, vsi_id); -	if (!vsi) -		vsi = ice_sched_create_vsi_info_entry(pi, vsi_id); -	if (!vsi) -		return ICE_ERR_NO_MEMORY; - -	vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id); +	vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); +	if (!vsi_ctx) +		return ICE_ERR_PARAM; +	vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);  	/* suspend the VSI if tc is not enabled */  	if (!enable) { @@ -1630,18 +1571,26 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_id, u8 tc, u16 maxqs,  	/* TC is enabled, if it is a new VSI then add it to the tree */  	if (!vsi_node) { -		status = ice_sched_add_vsi_to_topo(pi, vsi_id, tc); +		status = ice_sched_add_vsi_to_topo(pi, vsi_handle, tc);  		if (status)  			return status; -		vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id); + +		vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);  		if (!vsi_node)  			return ICE_ERR_CFG; -		vsi->vsi_node[tc] = vsi_node; + +		vsi_ctx->sched.vsi_node[tc] = vsi_node;  		vsi_node->in_use = true; +		/* invalidate the max queues whenever VSI gets added first time +		 * into the scheduler tree (boot or after reset). We need to +		 * recreate the child nodes all the time in these cases. +		 */ +		vsi_ctx->sched.max_lanq[tc] = 0;  	}  	/* update the VSI child nodes */ -	status = ice_sched_update_vsi_child_nodes(pi, vsi_id, tc, maxqs, owner); +	status = ice_sched_update_vsi_child_nodes(pi, vsi_handle, tc, maxqs, +						  owner);  	if (status)  		return status; |