diff options
| author | Dmitry Torokhov <[email protected]> | 2023-05-01 15:20:08 -0700 | 
|---|---|---|
| committer | Dmitry Torokhov <[email protected]> | 2023-05-01 15:20:08 -0700 | 
| commit | 9a87ffc99ec8eb8d35eed7c4f816d75f5cc9662e (patch) | |
| tree | d57f3a63479a07b4e0cece029886e76e04feb984 /drivers/net/ethernet/intel/ice/ice_lib.c | |
| parent | 5dc63e56a9cf8df0b59c234a505a1653f1bdf885 (diff) | |
| parent | 53bea86b5712c7491bb3dae12e271666df0a308c (diff) | |
Merge branch 'next' into for-linus
Prepare input updates for 6.4 merge window.
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_lib.c')
| -rw-r--r-- | drivers/net/ethernet/intel/ice/ice_lib.c | 1019 | 
1 files changed, 459 insertions, 560 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 94aa834cd9a6..0f52ea38b6f3 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -166,14 +166,14 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)  /**   * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI   * @vsi: the VSI being configured - * @vf: the VF associated with this VSI, if any   *   * Return 0 on success and a negative value on error   */ -static void ice_vsi_set_num_qs(struct ice_vsi *vsi, struct ice_vf *vf) +static void ice_vsi_set_num_qs(struct ice_vsi *vsi)  {  	enum ice_vsi_type vsi_type = vsi->type;  	struct ice_pf *pf = vsi->back; +	struct ice_vf *vf = vsi->vf;  	if (WARN_ON(vsi_type == ICE_VSI_VF && !vf))  		return; @@ -282,10 +282,10 @@ static int ice_get_free_slot(void *array, int size, int curr)  }  /** - * ice_vsi_delete - delete a VSI from the switch + * ice_vsi_delete_from_hw - delete a VSI from the switch   * @vsi: pointer to VSI being removed   */ -void ice_vsi_delete(struct ice_vsi *vsi) +static void ice_vsi_delete_from_hw(struct ice_vsi *vsi)  {  	struct ice_pf *pf = vsi->back;  	struct ice_vsi_ctx *ctxt; @@ -348,47 +348,144 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)  }  /** - * ice_vsi_clear - clean up and deallocate the provided VSI + * ice_vsi_free_stats - Free the ring statistics structures + * @vsi: VSI pointer + */ +static void ice_vsi_free_stats(struct ice_vsi *vsi) +{ +	struct ice_vsi_stats *vsi_stat; +	struct ice_pf *pf = vsi->back; +	int i; + +	if (vsi->type == ICE_VSI_CHNL) +		return; +	if (!pf->vsi_stats) +		return; + +	vsi_stat = pf->vsi_stats[vsi->idx]; +	if (!vsi_stat) +		return; + +	ice_for_each_alloc_txq(vsi, i) { +		if (vsi_stat->tx_ring_stats[i]) { +			kfree_rcu(vsi_stat->tx_ring_stats[i], rcu); +			WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL); +		} +	} + +	ice_for_each_alloc_rxq(vsi, i) { +		if (vsi_stat->rx_ring_stats[i]) { +			kfree_rcu(vsi_stat->rx_ring_stats[i], rcu); +			WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL); +		} +	} + +	kfree(vsi_stat->tx_ring_stats); +	kfree(vsi_stat->rx_ring_stats); +	kfree(vsi_stat); +	pf->vsi_stats[vsi->idx] = NULL; +} + +/** + * ice_vsi_alloc_ring_stats - Allocates Tx and Rx ring stats for the VSI + * @vsi: VSI which is having stats allocated + */ +static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi) +{ +	struct ice_ring_stats **tx_ring_stats; +	struct ice_ring_stats **rx_ring_stats; +	struct ice_vsi_stats *vsi_stats; +	struct ice_pf *pf = vsi->back; +	u16 i; + +	vsi_stats = pf->vsi_stats[vsi->idx]; +	tx_ring_stats = vsi_stats->tx_ring_stats; +	rx_ring_stats = vsi_stats->rx_ring_stats; + +	/* Allocate Tx ring stats */ +	ice_for_each_alloc_txq(vsi, i) { +		struct ice_ring_stats *ring_stats; +		struct ice_tx_ring *ring; + +		ring = vsi->tx_rings[i]; +		ring_stats = tx_ring_stats[i]; + +		if (!ring_stats) { +			ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL); +			if (!ring_stats) +				goto err_out; + +			WRITE_ONCE(tx_ring_stats[i], ring_stats); +		} + +		ring->ring_stats = ring_stats; +	} + +	/* Allocate Rx ring stats */ +	ice_for_each_alloc_rxq(vsi, i) { +		struct ice_ring_stats *ring_stats; +		struct ice_rx_ring *ring; + +		ring = vsi->rx_rings[i]; +		ring_stats = rx_ring_stats[i]; + +		if (!ring_stats) { +			ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL); +			if (!ring_stats) +				goto err_out; + +			WRITE_ONCE(rx_ring_stats[i], ring_stats); +		} + +		ring->ring_stats = ring_stats; +	} + +	return 0; + +err_out: +	ice_vsi_free_stats(vsi); +	return -ENOMEM; +} + +/** + * ice_vsi_free - clean up and deallocate the provided VSI   * @vsi: pointer to VSI being cleared   *   * This deallocates the VSI's queue resources, removes it from the PF's   * VSI array if necessary, and deallocates the VSI - * - * Returns 0 on success, negative on failure   */ -int ice_vsi_clear(struct ice_vsi *vsi) +static void ice_vsi_free(struct ice_vsi *vsi)  {  	struct ice_pf *pf = NULL;  	struct device *dev; -	if (!vsi) -		return 0; - -	if (!vsi->back) -		return -EINVAL; +	if (!vsi || !vsi->back) +		return;  	pf = vsi->back;  	dev = ice_pf_to_dev(pf);  	if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {  		dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx); -		return -EINVAL; +		return;  	}  	mutex_lock(&pf->sw_mutex);  	/* updates the PF for this cleared VSI */  	pf->vsi[vsi->idx] = NULL; -	if (vsi->idx < pf->next_vsi && vsi->type != ICE_VSI_CTRL) -		pf->next_vsi = vsi->idx; -	if (vsi->idx < pf->next_vsi && vsi->type == ICE_VSI_CTRL && vsi->vf) -		pf->next_vsi = vsi->idx; +	pf->next_vsi = vsi->idx; +	ice_vsi_free_stats(vsi);  	ice_vsi_free_arrays(vsi);  	mutex_unlock(&pf->sw_mutex);  	devm_kfree(dev, vsi); +} -	return 0; +void ice_vsi_delete(struct ice_vsi *vsi) +{ +	ice_vsi_delete_from_hw(vsi); +	ice_vsi_free(vsi);  }  /** @@ -461,6 +558,10 @@ static int ice_vsi_alloc_stat_arrays(struct ice_vsi *vsi)  	if (!pf->vsi_stats)  		return -ENOENT; +	if (pf->vsi_stats[vsi->idx]) +	/* realloc will happen in rebuild path */ +		return 0; +  	vsi_stat = kzalloc(sizeof(*vsi_stat), GFP_KERNEL);  	if (!vsi_stat)  		return -ENOMEM; @@ -491,128 +592,93 @@ err_alloc_tx:  }  /** - * ice_vsi_alloc - Allocates the next available struct VSI in the PF - * @pf: board private structure - * @vsi_type: type of VSI + * ice_vsi_alloc_def - set default values for already allocated VSI + * @vsi: ptr to VSI   * @ch: ptr to channel - * @vf: VF for ICE_VSI_VF and ICE_VSI_CTRL - * - * The VF pointer is used for ICE_VSI_VF and ICE_VSI_CTRL. For ICE_VSI_CTRL, - * it may be NULL in the case there is no association with a VF. For - * ICE_VSI_VF the VF pointer *must not* be NULL. - * - * returns a pointer to a VSI on success, NULL on failure.   */ -static struct ice_vsi * -ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, -	      struct ice_channel *ch, struct ice_vf *vf) +static int +ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch)  { -	struct device *dev = ice_pf_to_dev(pf); -	struct ice_vsi *vsi = NULL; - -	if (WARN_ON(vsi_type == ICE_VSI_VF && !vf)) -		return NULL; - -	/* Need to protect the allocation of the VSIs at the PF level */ -	mutex_lock(&pf->sw_mutex); - -	/* If we have already allocated our maximum number of VSIs, -	 * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index -	 * is available to be populated -	 */ -	if (pf->next_vsi == ICE_NO_VSI) { -		dev_dbg(dev, "out of VSI slots!\n"); -		goto unlock_pf; +	if (vsi->type != ICE_VSI_CHNL) { +		ice_vsi_set_num_qs(vsi); +		if (ice_vsi_alloc_arrays(vsi)) +			return -ENOMEM;  	} -	vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL); -	if (!vsi) -		goto unlock_pf; - -	vsi->type = vsi_type; -	vsi->back = pf; -	set_bit(ICE_VSI_DOWN, vsi->state); - -	if (vsi_type == ICE_VSI_VF) -		ice_vsi_set_num_qs(vsi, vf); -	else if (vsi_type != ICE_VSI_CHNL) -		ice_vsi_set_num_qs(vsi, NULL); -  	switch (vsi->type) {  	case ICE_VSI_SWITCHDEV_CTRL: -		if (ice_vsi_alloc_arrays(vsi)) -			goto err_rings; -  		/* Setup eswitch MSIX irq handler for VSI */  		vsi->irq_handler = ice_eswitch_msix_clean_rings;  		break;  	case ICE_VSI_PF: -		if (ice_vsi_alloc_arrays(vsi)) -			goto err_rings; -  		/* Setup default MSIX irq handler for VSI */  		vsi->irq_handler = ice_msix_clean_rings;  		break;  	case ICE_VSI_CTRL: -		if (ice_vsi_alloc_arrays(vsi)) -			goto err_rings; -  		/* Setup ctrl VSI MSIX irq handler */  		vsi->irq_handler = ice_msix_clean_ctrl_vsi; - -		/* For the PF control VSI this is NULL, for the VF control VSI -		 * this will be the first VF to allocate it. -		 */ -		vsi->vf = vf; -		break; -	case ICE_VSI_VF: -		if (ice_vsi_alloc_arrays(vsi)) -			goto err_rings; -		vsi->vf = vf;  		break;  	case ICE_VSI_CHNL:  		if (!ch) -			goto err_rings; +			return -EINVAL; +  		vsi->num_rxq = ch->num_rxq;  		vsi->num_txq = ch->num_txq;  		vsi->next_base_q = ch->base_q;  		break; +	case ICE_VSI_VF:  	case ICE_VSI_LB: -		if (ice_vsi_alloc_arrays(vsi)) -			goto err_rings;  		break;  	default: -		dev_warn(dev, "Unknown VSI type %d\n", vsi->type); -		goto unlock_pf; +		ice_vsi_free_arrays(vsi); +		return -EINVAL;  	} -	if (vsi->type == ICE_VSI_CTRL && !vf) { -		/* Use the last VSI slot as the index for PF control VSI */ -		vsi->idx = pf->num_alloc_vsi - 1; -		pf->ctrl_vsi_idx = vsi->idx; -		pf->vsi[vsi->idx] = vsi; -	} else { -		/* fill slot and make note of the index */ -		vsi->idx = pf->next_vsi; -		pf->vsi[pf->next_vsi] = vsi; +	return 0; +} + +/** + * ice_vsi_alloc - Allocates the next available struct VSI in the PF + * @pf: board private structure + * + * Reserves a VSI index from the PF and allocates an empty VSI structure + * without a type. The VSI structure must later be initialized by calling + * ice_vsi_cfg(). + * + * returns a pointer to a VSI on success, NULL on failure. + */ +static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf) +{ +	struct device *dev = ice_pf_to_dev(pf); +	struct ice_vsi *vsi = NULL; -		/* prepare pf->next_vsi for next use */ -		pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, -						 pf->next_vsi); +	/* Need to protect the allocation of the VSIs at the PF level */ +	mutex_lock(&pf->sw_mutex); + +	/* If we have already allocated our maximum number of VSIs, +	 * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index +	 * is available to be populated +	 */ +	if (pf->next_vsi == ICE_NO_VSI) { +		dev_dbg(dev, "out of VSI slots!\n"); +		goto unlock_pf;  	} -	if (vsi->type == ICE_VSI_CTRL && vf) -		vf->ctrl_vsi_idx = vsi->idx; +	vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL); +	if (!vsi) +		goto unlock_pf; -	/* allocate memory for Tx/Rx ring stat pointers */ -	if (ice_vsi_alloc_stat_arrays(vsi)) -		goto err_rings; +	vsi->back = pf; +	set_bit(ICE_VSI_DOWN, vsi->state); -	goto unlock_pf; +	/* fill slot and make note of the index */ +	vsi->idx = pf->next_vsi; +	pf->vsi[pf->next_vsi] = vsi; + +	/* prepare pf->next_vsi for next use */ +	pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, +					 pf->next_vsi); -err_rings: -	devm_kfree(dev, vsi); -	vsi = NULL;  unlock_pf:  	mutex_unlock(&pf->sw_mutex);  	return vsi; @@ -1177,12 +1243,15 @@ ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)  /**   * ice_vsi_init - Create and initialize a VSI   * @vsi: the VSI being configured - * @init_vsi: is this call creating a VSI + * @vsi_flags: VSI configuration flags + * + * Set ICE_FLAG_VSI_INIT to initialize a new VSI context, clear it to + * reconfigure an existing context.   *   * This initializes a VSI context depending on the VSI type to be added and   * passes it down to the add_vsi aq command to create a new VSI.   */ -static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi) +static int ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags)  {  	struct ice_pf *pf = vsi->back;  	struct ice_hw *hw = &pf->hw; @@ -1244,7 +1313,7 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)  		/* if updating VSI context, make sure to set valid_section:  		 * to indicate which section of VSI context being updated  		 */ -		if (!init_vsi) +		if (!(vsi_flags & ICE_VSI_FLAG_INIT))  			ctxt->info.valid_sections |=  				cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);  	} @@ -1257,7 +1326,8 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)  		if (ret)  			goto out; -		if (!init_vsi) /* means VSI being updated */ +		if (!(vsi_flags & ICE_VSI_FLAG_INIT)) +			/* means VSI being updated */  			/* must to indicate which section of VSI context are  			 * being modified  			 */ @@ -1272,7 +1342,7 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)  			cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);  	} -	if (init_vsi) { +	if (vsi_flags & ICE_VSI_FLAG_INIT) {  		ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);  		if (ret) {  			dev_err(dev, "Add VSI failed, err %d\n", ret); @@ -1436,7 +1506,7 @@ static int ice_get_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi)   * ice_vsi_setup_vector_base - Set up the base vector for the given VSI   * @vsi: ptr to the VSI   * - * This should only be called after ice_vsi_alloc() which allocates the + * This should only be called after ice_vsi_alloc_def() which allocates the   * corresponding SW VSI structure and initializes num_queue_pairs for the   * newly allocated VSI.   * @@ -1584,106 +1654,6 @@ err_out:  }  /** - * ice_vsi_free_stats - Free the ring statistics structures - * @vsi: VSI pointer - */ -static void ice_vsi_free_stats(struct ice_vsi *vsi) -{ -	struct ice_vsi_stats *vsi_stat; -	struct ice_pf *pf = vsi->back; -	int i; - -	if (vsi->type == ICE_VSI_CHNL) -		return; -	if (!pf->vsi_stats) -		return; - -	vsi_stat = pf->vsi_stats[vsi->idx]; -	if (!vsi_stat) -		return; - -	ice_for_each_alloc_txq(vsi, i) { -		if (vsi_stat->tx_ring_stats[i]) { -			kfree_rcu(vsi_stat->tx_ring_stats[i], rcu); -			WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL); -		} -	} - -	ice_for_each_alloc_rxq(vsi, i) { -		if (vsi_stat->rx_ring_stats[i]) { -			kfree_rcu(vsi_stat->rx_ring_stats[i], rcu); -			WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL); -		} -	} - -	kfree(vsi_stat->tx_ring_stats); -	kfree(vsi_stat->rx_ring_stats); -	kfree(vsi_stat); -	pf->vsi_stats[vsi->idx] = NULL; -} - -/** - * ice_vsi_alloc_ring_stats - Allocates Tx and Rx ring stats for the VSI - * @vsi: VSI which is having stats allocated - */ -static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi) -{ -	struct ice_ring_stats **tx_ring_stats; -	struct ice_ring_stats **rx_ring_stats; -	struct ice_vsi_stats *vsi_stats; -	struct ice_pf *pf = vsi->back; -	u16 i; - -	vsi_stats = pf->vsi_stats[vsi->idx]; -	tx_ring_stats = vsi_stats->tx_ring_stats; -	rx_ring_stats = vsi_stats->rx_ring_stats; - -	/* Allocate Tx ring stats */ -	ice_for_each_alloc_txq(vsi, i) { -		struct ice_ring_stats *ring_stats; -		struct ice_tx_ring *ring; - -		ring = vsi->tx_rings[i]; -		ring_stats = tx_ring_stats[i]; - -		if (!ring_stats) { -			ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL); -			if (!ring_stats) -				goto err_out; - -			WRITE_ONCE(tx_ring_stats[i], ring_stats); -		} - -		ring->ring_stats = ring_stats; -	} - -	/* Allocate Rx ring stats */ -	ice_for_each_alloc_rxq(vsi, i) { -		struct ice_ring_stats *ring_stats; -		struct ice_rx_ring *ring; - -		ring = vsi->rx_rings[i]; -		ring_stats = rx_ring_stats[i]; - -		if (!ring_stats) { -			ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL); -			if (!ring_stats) -				goto err_out; - -			 WRITE_ONCE(rx_ring_stats[i], ring_stats); -		} - -		ring->ring_stats = ring_stats; -	} - -	return 0; - -err_out: -	ice_vsi_free_stats(vsi); -	return -ENOMEM; -} - -/**   * ice_vsi_manage_rss_lut - disable/enable RSS   * @vsi: the VSI being changed   * @ena: boolean value indicating if this is an enable or disable request @@ -1992,8 +1962,8 @@ void ice_update_eth_stats(struct ice_vsi *vsi)  void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)  {  	if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) { -		vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; -		vsi->rx_buf_len = ICE_RXBUF_2048; +		vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX; +		vsi->rx_buf_len = ICE_RXBUF_1664;  #if (PAGE_SIZE < 8192)  	} else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&  		   (vsi->netdev->mtu <= ETH_DATA_LEN)) { @@ -2002,11 +1972,7 @@ void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)  #endif  	} else {  		vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; -#if (PAGE_SIZE < 8192)  		vsi->rx_buf_len = ICE_RXBUF_3072; -#else -		vsi->rx_buf_len = ICE_RXBUF_2048; -#endif  	}  } @@ -2160,7 +2126,7 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)  	ice_for_each_rxq(vsi, i)  		ice_tx_xsk_pool(vsi, i); -	return ret; +	return 0;  }  /** @@ -2645,54 +2611,99 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)  }  /** - * ice_vsi_setup - Set up a VSI by a given type - * @pf: board private structure - * @pi: pointer to the port_info instance - * @vsi_type: VSI type - * @vf: pointer to VF to which this VSI connects. This field is used primarily - *      for the ICE_VSI_VF type. Other VSI types should pass NULL. - * @ch: ptr to channel - * - * This allocates the sw VSI structure and its queue resources. + * ice_free_vf_ctrl_res - Free the VF control VSI resource + * @pf: pointer to PF structure + * @vsi: the VSI to free resources for   * - * Returns pointer to the successfully allocated and configured VSI sw struct on - * success, NULL on failure. + * Check if the VF control VSI resource is still in use. If no VF is using it + * any more, release the VSI resource. Otherwise, leave it to be cleaned up + * once no other VF uses it.   */ -struct ice_vsi * -ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, -	      enum ice_vsi_type vsi_type, struct ice_vf *vf, -	      struct ice_channel *ch) +static void ice_free_vf_ctrl_res(struct ice_pf *pf,  struct ice_vsi *vsi) +{ +	struct ice_vf *vf; +	unsigned int bkt; + +	rcu_read_lock(); +	ice_for_each_vf_rcu(pf, bkt, vf) { +		if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) { +			rcu_read_unlock(); +			return; +		} +	} +	rcu_read_unlock(); + +	/* No other VFs left that have control VSI. It is now safe to reclaim +	 * SW interrupts back to the common pool. +	 */ +	ice_free_res(pf->irq_tracker, vsi->base_vector, +		     ICE_RES_VF_CTRL_VEC_ID); +	pf->num_avail_sw_msix += vsi->num_q_vectors; +} + +static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi)  {  	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };  	struct device *dev = ice_pf_to_dev(pf); -	struct ice_vsi *vsi;  	int ret, i; -	if (vsi_type == ICE_VSI_CHNL) -		vsi = ice_vsi_alloc(pf, vsi_type, ch, NULL); -	else if (vsi_type == ICE_VSI_VF || vsi_type == ICE_VSI_CTRL) -		vsi = ice_vsi_alloc(pf, vsi_type, NULL, vf); -	else -		vsi = ice_vsi_alloc(pf, vsi_type, NULL, NULL); +	/* configure VSI nodes based on number of queues and TC's */ +	ice_for_each_traffic_class(i) { +		if (!(vsi->tc_cfg.ena_tc & BIT(i))) +			continue; -	if (!vsi) { -		dev_err(dev, "could not allocate VSI\n"); -		return NULL; +		if (vsi->type == ICE_VSI_CHNL) { +			if (!vsi->alloc_txq && vsi->num_txq) +				max_txqs[i] = vsi->num_txq; +			else +				max_txqs[i] = pf->num_lan_tx; +		} else { +			max_txqs[i] = vsi->alloc_txq; +		}  	} -	vsi->port_info = pi; +	dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc); +	ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, +			      max_txqs); +	if (ret) { +		dev_err(dev, "VSI %d failed lan queue config, error %d\n", +			vsi->vsi_num, ret); +		return ret; +	} + +	return 0; +} + +/** + * ice_vsi_cfg_def - configure default VSI based on the type + * @vsi: pointer to VSI + * @params: the parameters to configure this VSI with + */ +static int +ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) +{ +	struct device *dev = ice_pf_to_dev(vsi->back); +	struct ice_pf *pf = vsi->back; +	int ret; +  	vsi->vsw = pf->first_sw; -	if (vsi->type == ICE_VSI_PF) -		vsi->ethtype = ETH_P_PAUSE; + +	ret = ice_vsi_alloc_def(vsi, params->ch); +	if (ret) +		return ret; + +	/* allocate memory for Tx/Rx ring stat pointers */ +	ret = ice_vsi_alloc_stat_arrays(vsi); +	if (ret) +		goto unroll_vsi_alloc;  	ice_alloc_fd_res(vsi); -	if (vsi_type != ICE_VSI_CHNL) { -		if (ice_vsi_get_qs(vsi)) { -			dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n", -				vsi->idx); -			goto unroll_vsi_alloc; -		} +	ret = ice_vsi_get_qs(vsi); +	if (ret) { +		dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n", +			vsi->idx); +		goto unroll_vsi_alloc_stat;  	}  	/* set RSS capabilities */ @@ -2702,7 +2713,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,  	ice_vsi_set_tc_cfg(vsi);  	/* create the VSI */ -	ret = ice_vsi_init(vsi, true); +	ret = ice_vsi_init(vsi, params->flags);  	if (ret)  		goto unroll_get_qs; @@ -2733,6 +2744,14 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,  			goto unroll_vector_base;  		ice_vsi_map_rings_to_vectors(vsi); +		if (ice_is_xdp_ena_vsi(vsi)) { +			ret = ice_vsi_determine_xdp_res(vsi); +			if (ret) +				goto unroll_vector_base; +			ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog); +			if (ret) +				goto unroll_vector_base; +		}  		/* ICE_VSI_CTRL does not need RSS so skip RSS processing */  		if (vsi->type != ICE_VSI_CTRL) @@ -2794,32 +2813,159 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,  		break;  	default:  		/* clean up the resources and exit */ +		ret = -EINVAL;  		goto unroll_vsi_init;  	} -	/* configure VSI nodes based on number of queues and TC's */ -	ice_for_each_traffic_class(i) { -		if (!(vsi->tc_cfg.ena_tc & BIT(i))) -			continue; +	return 0; -		if (vsi->type == ICE_VSI_CHNL) { -			if (!vsi->alloc_txq && vsi->num_txq) -				max_txqs[i] = vsi->num_txq; -			else -				max_txqs[i] = pf->num_lan_tx; +unroll_vector_base: +	/* reclaim SW interrupts back to the common pool */ +	ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); +	pf->num_avail_sw_msix += vsi->num_q_vectors; +unroll_alloc_q_vector: +	ice_vsi_free_q_vectors(vsi); +unroll_vsi_init: +	ice_vsi_delete_from_hw(vsi); +unroll_get_qs: +	ice_vsi_put_qs(vsi); +unroll_vsi_alloc_stat: +	ice_vsi_free_stats(vsi); +unroll_vsi_alloc: +	ice_vsi_free_arrays(vsi); +	return ret; +} + +/** + * ice_vsi_cfg - configure a previously allocated VSI + * @vsi: pointer to VSI + * @params: parameters used to configure this VSI + */ +int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) +{ +	struct ice_pf *pf = vsi->back; +	int ret; + +	if (WARN_ON(params->type == ICE_VSI_VF && !params->vf)) +		return -EINVAL; + +	vsi->type = params->type; +	vsi->port_info = params->pi; + +	/* For VSIs which don't have a connected VF, this will be NULL */ +	vsi->vf = params->vf; + +	ret = ice_vsi_cfg_def(vsi, params); +	if (ret) +		return ret; + +	ret = ice_vsi_cfg_tc_lan(vsi->back, vsi); +	if (ret) +		ice_vsi_decfg(vsi); + +	if (vsi->type == ICE_VSI_CTRL) { +		if (vsi->vf) { +			WARN_ON(vsi->vf->ctrl_vsi_idx != ICE_NO_VSI); +			vsi->vf->ctrl_vsi_idx = vsi->idx;  		} else { -			max_txqs[i] = vsi->alloc_txq; +			WARN_ON(pf->ctrl_vsi_idx != ICE_NO_VSI); +			pf->ctrl_vsi_idx = vsi->idx;  		}  	} -	dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc); -	ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, -			      max_txqs); -	if (ret) { -		dev_err(dev, "VSI %d failed lan queue config, error %d\n", -			vsi->vsi_num, ret); -		goto unroll_clear_rings; +	return ret; +} + +/** + * ice_vsi_decfg - remove all VSI configuration + * @vsi: pointer to VSI + */ +void ice_vsi_decfg(struct ice_vsi *vsi) +{ +	struct ice_pf *pf = vsi->back; +	int err; + +	/* The Rx rule will only exist to remove if the LLDP FW +	 * engine is currently stopped +	 */ +	if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF && +	    !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) +		ice_cfg_sw_lldp(vsi, false, false); + +	ice_fltr_remove_all(vsi); +	ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); +	err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx); +	if (err) +		dev_err(ice_pf_to_dev(pf), "Failed to remove RDMA scheduler config for VSI %u, err %d\n", +			vsi->vsi_num, err); + +	if (ice_is_xdp_ena_vsi(vsi)) +		/* return value check can be skipped here, it always returns +		 * 0 if reset is in progress +		 */ +		ice_destroy_xdp_rings(vsi); + +	ice_vsi_clear_rings(vsi); +	ice_vsi_free_q_vectors(vsi); +	ice_vsi_put_qs(vsi); +	ice_vsi_free_arrays(vsi); + +	/* SR-IOV determines needed MSIX resources all at once instead of per +	 * VSI since when VFs are spawned we know how many VFs there are and how +	 * many interrupts each VF needs. SR-IOV MSIX resources are also +	 * cleared in the same manner. +	 */ +	if (vsi->type == ICE_VSI_CTRL && vsi->vf) { +		ice_free_vf_ctrl_res(pf, vsi); +	} else if (vsi->type != ICE_VSI_VF) { +		/* reclaim SW interrupts back to the common pool */ +		ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); +		pf->num_avail_sw_msix += vsi->num_q_vectors; +		vsi->base_vector = 0; +	} + +	if (vsi->type == ICE_VSI_VF && +	    vsi->agg_node && vsi->agg_node->valid) +		vsi->agg_node->num_vsis--; +	if (vsi->agg_node) { +		vsi->agg_node->valid = false; +		vsi->agg_node->agg_id = 0;  	} +} + +/** + * ice_vsi_setup - Set up a VSI by a given type + * @pf: board private structure + * @params: parameters to use when creating the VSI + * + * This allocates the sw VSI structure and its queue resources. + * + * Returns pointer to the successfully allocated and configured VSI sw struct on + * success, NULL on failure. + */ +struct ice_vsi * +ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params) +{ +	struct device *dev = ice_pf_to_dev(pf); +	struct ice_vsi *vsi; +	int ret; + +	/* ice_vsi_setup can only initialize a new VSI, and we must have +	 * a port_info structure for it. +	 */ +	if (WARN_ON(!(params->flags & ICE_VSI_FLAG_INIT)) || +	    WARN_ON(!params->pi)) +		return NULL; + +	vsi = ice_vsi_alloc(pf); +	if (!vsi) { +		dev_err(dev, "could not allocate VSI\n"); +		return NULL; +	} + +	ret = ice_vsi_cfg(vsi, params); +	if (ret) +		goto err_vsi_cfg;  	/* Add switch rule to drop all Tx Flow Control Frames, of look up  	 * type ETHERTYPE from VSIs, and restrict malicious VF from sending @@ -2830,34 +2976,21 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,  	 * be dropped so that VFs cannot send LLDP packets to reconfig DCB  	 * settings in the HW.  	 */ -	if (!ice_is_safe_mode(pf)) -		if (vsi->type == ICE_VSI_PF) { -			ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX, -					 ICE_DROP_PACKET); -			ice_cfg_sw_lldp(vsi, true, true); -		} +	if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) { +		ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX, +				 ICE_DROP_PACKET); +		ice_cfg_sw_lldp(vsi, true, true); +	}  	if (!vsi->agg_node)  		ice_set_agg_vsi(vsi); +  	return vsi; -unroll_clear_rings: -	ice_vsi_clear_rings(vsi); -unroll_vector_base: -	/* reclaim SW interrupts back to the common pool */ -	ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); -	pf->num_avail_sw_msix += vsi->num_q_vectors; -unroll_alloc_q_vector: -	ice_vsi_free_q_vectors(vsi); -unroll_vsi_init: -	ice_vsi_free_stats(vsi); -	ice_vsi_delete(vsi); -unroll_get_qs: -	ice_vsi_put_qs(vsi); -unroll_vsi_alloc: -	if (vsi_type == ICE_VSI_VF) +err_vsi_cfg: +	if (params->type == ICE_VSI_VF)  		ice_enable_lag(pf->lag); -	ice_vsi_clear(vsi); +	ice_vsi_free(vsi);  	return NULL;  } @@ -3121,37 +3254,6 @@ void ice_napi_del(struct ice_vsi *vsi)  }  /** - * ice_free_vf_ctrl_res - Free the VF control VSI resource - * @pf: pointer to PF structure - * @vsi: the VSI to free resources for - * - * Check if the VF control VSI resource is still in use. If no VF is using it - * any more, release the VSI resource. Otherwise, leave it to be cleaned up - * once no other VF uses it. - */ -static void ice_free_vf_ctrl_res(struct ice_pf *pf,  struct ice_vsi *vsi) -{ -	struct ice_vf *vf; -	unsigned int bkt; - -	rcu_read_lock(); -	ice_for_each_vf_rcu(pf, bkt, vf) { -		if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) { -			rcu_read_unlock(); -			return; -		} -	} -	rcu_read_unlock(); - -	/* No other VFs left that have control VSI. It is now safe to reclaim -	 * SW interrupts back to the common pool. -	 */ -	ice_free_res(pf->irq_tracker, vsi->base_vector, -		     ICE_RES_VF_CTRL_VEC_ID); -	pf->num_avail_sw_msix += vsi->num_q_vectors; -} - -/**   * ice_vsi_release - Delete a VSI and free its resources   * @vsi: the VSI being removed   * @@ -3160,7 +3262,6 @@ static void ice_free_vf_ctrl_res(struct ice_pf *pf,  struct ice_vsi *vsi)  int ice_vsi_release(struct ice_vsi *vsi)  {  	struct ice_pf *pf; -	int err;  	if (!vsi->back)  		return -ENODEV; @@ -3178,50 +3279,14 @@ int ice_vsi_release(struct ice_vsi *vsi)  		clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);  	} +	if (vsi->type == ICE_VSI_PF) +		ice_devlink_destroy_pf_port(pf); +  	if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))  		ice_rss_clean(vsi); -	/* Disable VSI and free resources */ -	if (vsi->type != ICE_VSI_LB) -		ice_vsi_dis_irq(vsi);  	ice_vsi_close(vsi); - -	/* SR-IOV determines needed MSIX resources all at once instead of per -	 * VSI since when VFs are spawned we know how many VFs there are and how -	 * many interrupts each VF needs. SR-IOV MSIX resources are also -	 * cleared in the same manner. -	 */ -	if (vsi->type == ICE_VSI_CTRL && vsi->vf) { -		ice_free_vf_ctrl_res(pf, vsi); -	} else if (vsi->type != ICE_VSI_VF) { -		/* reclaim SW interrupts back to the common pool */ -		ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); -		pf->num_avail_sw_msix += vsi->num_q_vectors; -	} - -	if (!ice_is_safe_mode(pf)) { -		if (vsi->type == ICE_VSI_PF) { -			ice_fltr_remove_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX, -					    ICE_DROP_PACKET); -			ice_cfg_sw_lldp(vsi, true, false); -			/* The Rx rule will only exist to remove if the LLDP FW -			 * engine is currently stopped -			 */ -			if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) -				ice_cfg_sw_lldp(vsi, false, false); -		} -	} - -	if (ice_is_vsi_dflt_vsi(vsi)) -		ice_clear_dflt_vsi(vsi); -	ice_fltr_remove_all(vsi); -	ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); -	err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx); -	if (err) -		dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n", -			vsi->vsi_num, err); -	ice_vsi_delete(vsi); -	ice_vsi_free_q_vectors(vsi); +	ice_vsi_decfg(vsi);  	if (vsi->netdev) {  		if (test_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state)) { @@ -3235,22 +3300,12 @@ int ice_vsi_release(struct ice_vsi *vsi)  		}  	} -	if (vsi->type == ICE_VSI_PF) -		ice_devlink_destroy_pf_port(pf); - -	if (vsi->type == ICE_VSI_VF && -	    vsi->agg_node && vsi->agg_node->valid) -		vsi->agg_node->num_vsis--; -	ice_vsi_clear_rings(vsi); -	ice_vsi_free_stats(vsi); -	ice_vsi_put_qs(vsi); -  	/* retain SW VSI data structure since it is needed to unregister and  	 * free VSI netdev when PF is not in reset recovery pending state,\  	 * for ex: during rmmod.  	 */  	if (!ice_is_reset_in_progress(pf->state)) -		ice_vsi_clear(vsi); +		ice_vsi_delete(vsi);  	return 0;  } @@ -3375,7 +3430,7 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,   * @prev_txq: Number of Tx rings before ring reallocation   * @prev_rxq: Number of Rx rings before ring reallocation   */ -static int +static void  ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq)  {  	struct ice_vsi_stats *vsi_stat; @@ -3383,9 +3438,9 @@ ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq)  	int i;  	if (!prev_txq || !prev_rxq) -		return 0; +		return;  	if (vsi->type == ICE_VSI_CHNL) -		return 0; +		return;  	vsi_stat = pf->vsi_stats[vsi->idx]; @@ -3406,36 +3461,36 @@ ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq)  			}  		}  	} - -	return 0;  }  /**   * ice_vsi_rebuild - Rebuild VSI after reset   * @vsi: VSI to be rebuild - * @init_vsi: is this an initialization or a reconfigure of the VSI + * @vsi_flags: flags used for VSI rebuild flow + * + * Set vsi_flags to ICE_VSI_FLAG_INIT to initialize a new VSI, or + * ICE_VSI_FLAG_NO_INIT to rebuild an existing VSI in hardware.   *   * Returns 0 on success and negative value on failure   */ -int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) +int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)  { -	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; +	struct ice_vsi_cfg_params params = {};  	struct ice_coalesce_stored *coalesce; -	int ret, i, prev_txq, prev_rxq; +	int ret, prev_txq, prev_rxq;  	int prev_num_q_vectors = 0; -	enum ice_vsi_type vtype;  	struct ice_pf *pf;  	if (!vsi)  		return -EINVAL; +	params = ice_vsi_to_params(vsi); +	params.flags = vsi_flags; +  	pf = vsi->back; -	vtype = vsi->type; -	if (WARN_ON(vtype == ICE_VSI_VF && !vsi->vf)) +	if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf))  		return -EINVAL; -	ice_vsi_init_vlan_ops(vsi); -  	coalesce = kcalloc(vsi->num_q_vectors,  			   sizeof(struct ice_coalesce_stored), GFP_KERNEL);  	if (!coalesce) @@ -3446,188 +3501,32 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)  	prev_txq = vsi->num_txq;  	prev_rxq = vsi->num_rxq; -	ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); -	ret = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx); +	ice_vsi_decfg(vsi); +	ret = ice_vsi_cfg_def(vsi, ¶ms);  	if (ret) -		dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n", -			vsi->vsi_num, ret); -	ice_vsi_free_q_vectors(vsi); - -	/* SR-IOV determines needed MSIX resources all at once instead of per -	 * VSI since when VFs are spawned we know how many VFs there are and how -	 * many interrupts each VF needs. SR-IOV MSIX resources are also -	 * cleared in the same manner. -	 */ -	if (vtype != ICE_VSI_VF) { -		/* reclaim SW interrupts back to the common pool */ -		ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); -		pf->num_avail_sw_msix += vsi->num_q_vectors; -		vsi->base_vector = 0; -	} - -	if (ice_is_xdp_ena_vsi(vsi)) -		/* return value check can be skipped here, it always returns -		 * 0 if reset is in progress -		 */ -		ice_destroy_xdp_rings(vsi); -	ice_vsi_put_qs(vsi); -	ice_vsi_clear_rings(vsi); -	ice_vsi_free_arrays(vsi); -	if (vtype == ICE_VSI_VF) -		ice_vsi_set_num_qs(vsi, vsi->vf); -	else -		ice_vsi_set_num_qs(vsi, NULL); - -	ret = ice_vsi_alloc_arrays(vsi); -	if (ret < 0) -		goto err_vsi; - -	ice_vsi_get_qs(vsi); - -	ice_alloc_fd_res(vsi); -	ice_vsi_set_tc_cfg(vsi); - -	/* Initialize VSI struct elements and create VSI in FW */ -	ret = ice_vsi_init(vsi, init_vsi); -	if (ret < 0) -		goto err_vsi; - -	switch (vtype) { -	case ICE_VSI_CTRL: -	case ICE_VSI_SWITCHDEV_CTRL: -	case ICE_VSI_PF: -		ret = ice_vsi_alloc_q_vectors(vsi); -		if (ret) -			goto err_rings; - -		ret = ice_vsi_setup_vector_base(vsi); -		if (ret) -			goto err_vectors; - -		ret = ice_vsi_set_q_vectors_reg_idx(vsi); -		if (ret) -			goto err_vectors; - -		ret = ice_vsi_alloc_rings(vsi); -		if (ret) -			goto err_vectors; - -		ret = ice_vsi_alloc_ring_stats(vsi); -		if (ret) -			goto err_vectors; - -		ice_vsi_map_rings_to_vectors(vsi); - -		vsi->stat_offsets_loaded = false; -		if (ice_is_xdp_ena_vsi(vsi)) { -			ret = ice_vsi_determine_xdp_res(vsi); -			if (ret) -				goto err_vectors; -			ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog); -			if (ret) -				goto err_vectors; -		} -		/* ICE_VSI_CTRL does not need RSS so skip RSS processing */ -		if (vtype != ICE_VSI_CTRL) -			/* Do not exit if configuring RSS had an issue, at -			 * least receive traffic on first queue. Hence no -			 * need to capture return value -			 */ -			if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) -				ice_vsi_cfg_rss_lut_key(vsi); - -		/* disable or enable CRC stripping */ -		if (vsi->netdev) -			ice_vsi_cfg_crc_strip(vsi, !!(vsi->netdev->features & -					      NETIF_F_RXFCS)); - -		break; -	case ICE_VSI_VF: -		ret = ice_vsi_alloc_q_vectors(vsi); -		if (ret) -			goto err_rings; - -		ret = ice_vsi_set_q_vectors_reg_idx(vsi); -		if (ret) -			goto err_vectors; - -		ret = ice_vsi_alloc_rings(vsi); -		if (ret) -			goto err_vectors; - -		ret = ice_vsi_alloc_ring_stats(vsi); -		if (ret) -			goto err_vectors; - -		vsi->stat_offsets_loaded = false; -		break; -	case ICE_VSI_CHNL: -		if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { -			ice_vsi_cfg_rss_lut_key(vsi); -			ice_vsi_set_rss_flow_fld(vsi); -		} -		break; -	default: -		break; -	} - -	/* configure VSI nodes based on number of queues and TC's */ -	for (i = 0; i < vsi->tc_cfg.numtc; i++) { -		/* configure VSI nodes based on number of queues and TC's. -		 * ADQ creates VSIs for each TC/Channel but doesn't -		 * allocate queues instead it reconfigures the PF queues -		 * as per the TC command. So max_txqs should point to the -		 * PF Tx queues. -		 */ -		if (vtype == ICE_VSI_CHNL) -			max_txqs[i] = pf->num_lan_tx; -		else -			max_txqs[i] = vsi->alloc_txq; - -		if (ice_is_xdp_ena_vsi(vsi)) -			max_txqs[i] += vsi->num_xdp_txq; -	} - -	if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) -		/* If MQPRIO is set, means channel code path, hence for main -		 * VSI's, use TC as 1 -		 */ -		ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs); -	else -		ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, -				      vsi->tc_cfg.ena_tc, max_txqs); +		goto err_vsi_cfg; +	ret = ice_vsi_cfg_tc_lan(pf, vsi);  	if (ret) { -		dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %d\n", -			vsi->vsi_num, ret); -		if (init_vsi) { +		if (vsi_flags & ICE_VSI_FLAG_INIT) {  			ret = -EIO; -			goto err_vectors; -		} else { -			return ice_schedule_reset(pf, ICE_RESET_PFR); +			goto err_vsi_cfg_tc_lan;  		} + +		kfree(coalesce); +		return ice_schedule_reset(pf, ICE_RESET_PFR);  	} -	if (ice_vsi_realloc_stat_arrays(vsi, prev_txq, prev_rxq)) -		goto err_vectors; +	ice_vsi_realloc_stat_arrays(vsi, prev_txq, prev_rxq);  	ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors);  	kfree(coalesce);  	return 0; -err_vectors: -	ice_vsi_free_q_vectors(vsi); -err_rings: -	if (vsi->netdev) { -		vsi->current_netdev_flags = 0; -		unregister_netdev(vsi->netdev); -		free_netdev(vsi->netdev); -		vsi->netdev = NULL; -	} -err_vsi: -	ice_vsi_clear(vsi); -	set_bit(ICE_RESET_FAILED, pf->state); +err_vsi_cfg_tc_lan: +	ice_vsi_decfg(vsi); +err_vsi_cfg:  	kfree(coalesce);  	return ret;  } @@ -3863,7 +3762,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)  	dev = ice_pf_to_dev(pf);  	if (vsi->tc_cfg.ena_tc == ena_tc &&  	    vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL) -		return ret; +		return 0;  	ice_for_each_traffic_class(i) {  		/* build bitmap of enabled TCs */  |