diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_main.c')
| -rw-r--r-- | drivers/net/ethernet/intel/ice/ice_main.c | 138 | 
1 files changed, 87 insertions, 51 deletions
| diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index f60c022f7960..55a42aad92a5 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -805,6 +805,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)  	}  	switch (vsi->port_info->phy.link_info.link_speed) { +	case ICE_AQ_LINK_SPEED_200GB: +		speed = "200 G"; +		break;  	case ICE_AQ_LINK_SPEED_100GB:  		speed = "100 G";  		break; @@ -2707,17 +2710,72 @@ static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)  		bpf_prog_put(old_prog);  } +static struct ice_tx_ring *ice_xdp_ring_from_qid(struct ice_vsi *vsi, int qid) +{ +	struct ice_q_vector *q_vector; +	struct ice_tx_ring *ring; + +	if (static_key_enabled(&ice_xdp_locking_key)) +		return vsi->xdp_rings[qid % vsi->num_xdp_txq]; + +	q_vector = vsi->rx_rings[qid]->q_vector; +	ice_for_each_tx_ring(ring, q_vector->tx) +		if (ice_ring_is_xdp(ring)) +			return ring; + +	return NULL; +} + +/** + * ice_map_xdp_rings - Map XDP rings to interrupt vectors + * @vsi: the VSI with XDP rings being configured + * + * Map XDP rings to interrupt vectors and perform the configuration steps + * dependent on the mapping. + */ +void ice_map_xdp_rings(struct ice_vsi *vsi) +{ +	int xdp_rings_rem = vsi->num_xdp_txq; +	int v_idx, q_idx; + +	/* follow the logic from ice_vsi_map_rings_to_vectors */ +	ice_for_each_q_vector(vsi, v_idx) { +		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; +		int xdp_rings_per_v, q_id, q_base; + +		xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem, +					       vsi->num_q_vectors - v_idx); +		q_base = vsi->num_xdp_txq - xdp_rings_rem; + +		for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) { +			struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id]; + +			xdp_ring->q_vector = q_vector; +			xdp_ring->next = q_vector->tx.tx_ring; +			q_vector->tx.tx_ring = xdp_ring; +		} +		xdp_rings_rem -= xdp_rings_per_v; +	} + +	ice_for_each_rxq(vsi, q_idx) { +		vsi->rx_rings[q_idx]->xdp_ring = ice_xdp_ring_from_qid(vsi, +								       q_idx); +		ice_tx_xsk_pool(vsi, q_idx); +	} +} +  /**   * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP   * @vsi: VSI to bring up Tx rings used by XDP   * @prog: bpf program that will be assigned to VSI + * @cfg_type: create from scratch or restore the existing configuration   *   * Return 0 on success and negative value on error   */ -int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) +int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog, +			  enum ice_xdp_cfg cfg_type)  {  	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; -	int xdp_rings_rem = vsi->num_xdp_txq;  	struct ice_pf *pf = vsi->back;  	struct ice_qs_cfg xdp_qs_cfg = {  		.qs_mutex = &pf->avail_q_mutex, @@ -2730,8 +2788,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)  		.mapping_mode = ICE_VSI_MAP_CONTIG  	};  	struct device *dev; -	int i, v_idx; -	int status; +	int status, i;  	dev = ice_pf_to_dev(pf);  	vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq, @@ -2750,49 +2807,15 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)  	if (ice_xdp_alloc_setup_rings(vsi))  		goto clear_xdp_rings; -	/* follow the logic from ice_vsi_map_rings_to_vectors */ -	ice_for_each_q_vector(vsi, v_idx) { -		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; -		int xdp_rings_per_v, q_id, q_base; - -		xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem, -					       vsi->num_q_vectors - v_idx); -		q_base = vsi->num_xdp_txq - xdp_rings_rem; - -		for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) { -			struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id]; - -			xdp_ring->q_vector = q_vector; -			xdp_ring->next = q_vector->tx.tx_ring; -			q_vector->tx.tx_ring = xdp_ring; -		} -		xdp_rings_rem -= xdp_rings_per_v; -	} - -	ice_for_each_rxq(vsi, i) { -		if (static_key_enabled(&ice_xdp_locking_key)) { -			vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq]; -		} else { -			struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector; -			struct ice_tx_ring *ring; - -			ice_for_each_tx_ring(ring, q_vector->tx) { -				if (ice_ring_is_xdp(ring)) { -					vsi->rx_rings[i]->xdp_ring = ring; -					break; -				} -			} -		} -		ice_tx_xsk_pool(vsi, i); -	} -  	/* omit the scheduler update if in reset path; XDP queues will be  	 * taken into account at the end of ice_vsi_rebuild, where  	 * ice_cfg_vsi_lan is being called  	 */ -	if (ice_is_reset_in_progress(pf->state)) +	if (cfg_type == ICE_XDP_CFG_PART)  		return 0; +	ice_map_xdp_rings(vsi); +  	/* tell the Tx scheduler that right now we have  	 * additional queues  	 */ @@ -2842,22 +2865,21 @@ err_map_xdp:  /**   * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings   * @vsi: VSI to remove XDP rings + * @cfg_type: disable XDP permanently or allow it to be restored later   *   * Detach XDP rings from irq vectors, clean up the PF bitmap and free   * resources   */ -int ice_destroy_xdp_rings(struct ice_vsi *vsi) +int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)  {  	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };  	struct ice_pf *pf = vsi->back;  	int i, v_idx;  	/* q_vectors are freed in reset path so there's no point in detaching -	 * rings; in case of rebuild being triggered not from reset bits -	 * in pf->state won't be set, so additionally check first q_vector -	 * against NULL +	 * rings  	 */ -	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) +	if (cfg_type == ICE_XDP_CFG_PART)  		goto free_qmap;  	ice_for_each_q_vector(vsi, v_idx) { @@ -2898,7 +2920,7 @@ free_qmap:  	if (static_key_enabled(&ice_xdp_locking_key))  		static_branch_dec(&ice_xdp_locking_key); -	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) +	if (cfg_type == ICE_XDP_CFG_PART)  		return 0;  	ice_vsi_assign_bpf_prog(vsi, NULL); @@ -3009,7 +3031,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,  		if (xdp_ring_err) {  			NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");  		} else { -			xdp_ring_err = ice_prepare_xdp_rings(vsi, prog); +			xdp_ring_err = ice_prepare_xdp_rings(vsi, prog, +							     ICE_XDP_CFG_FULL);  			if (xdp_ring_err)  				NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");  		} @@ -3020,7 +3043,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,  			NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");  	} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {  		xdp_features_clear_redirect_target(vsi->netdev); -		xdp_ring_err = ice_destroy_xdp_rings(vsi); +		xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL);  		if (xdp_ring_err)  			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");  		/* reallocate Rx queues that were used for zero-copy */ @@ -4116,7 +4139,7 @@ bool ice_is_wol_supported(struct ice_hw *hw)  int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)  {  	struct ice_pf *pf = vsi->back; -	int err = 0, timeout = 50; +	int i, err = 0, timeout = 50;  	if (!new_rx && !new_tx)  		return -EINVAL; @@ -4142,6 +4165,14 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)  	ice_vsi_close(vsi);  	ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); + +	ice_for_each_traffic_class(i) { +		if (vsi->tc_cfg.ena_tc & BIT(i)) +			netdev_set_tc_queue(vsi->netdev, +					    vsi->tc_cfg.tc_info[i].netdev_tc, +					    vsi->tc_cfg.tc_info[i].qcount_tx, +					    vsi->tc_cfg.tc_info[i].qoffset); +	}  	ice_pf_dcb_recfg(pf, locked);  	ice_vsi_open(vsi);  done: @@ -5544,7 +5575,7 @@ static int ice_suspend(struct device *dev)  	 */  	disabled = ice_service_task_stop(pf); -	ice_unplug_aux_dev(pf); +	ice_deinit_rdma(pf);  	/* Already suspended?, then there is nothing to do */  	if (test_and_set_bit(ICE_SUSPENDED, pf->state)) { @@ -5624,6 +5655,11 @@ static int ice_resume(struct device *dev)  	if (ret)  		dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret); +	ret = ice_init_rdma(pf); +	if (ret) +		dev_err(dev, "Reinitialize RDMA during resume failed: %d\n", +			ret); +  	clear_bit(ICE_DOWN, pf->state);  	/* Now perform PF reset and rebuild */  	reset_type = ICE_RESET_PFR; |