diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_txrx.c')
| -rw-r--r-- | drivers/net/ethernet/intel/ice/ice_txrx.c | 63 | 
1 files changed, 41 insertions, 22 deletions
| diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index bc3ba19dc88f..3e38695f1c9d 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -3,8 +3,9 @@  /* The driver transmit and receive code */ -#include <linux/prefetch.h>  #include <linux/mm.h> +#include <linux/netdevice.h> +#include <linux/prefetch.h>  #include <linux/bpf_trace.h>  #include <net/dsfield.h>  #include <net/xdp.h> @@ -219,6 +220,10 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)  	struct ice_tx_desc *tx_desc;  	struct ice_tx_buf *tx_buf; +	/* get the bql data ready */ +	if (!ice_ring_is_xdp(tx_ring)) +		netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring)); +  	tx_buf = &tx_ring->tx_buf[i];  	tx_desc = ICE_TX_DESC(tx_ring, i);  	i -= tx_ring->count; @@ -232,6 +237,9 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)  		if (!eop_desc)  			break; +		/* follow the guidelines of other drivers */ +		prefetchw(&tx_buf->skb->users); +  		smp_rmb();	/* prevent any other reads prior to eop_desc */  		ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); @@ -304,8 +312,10 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)  	ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes); -	netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, -				  total_bytes); +	if (ice_ring_is_xdp(tx_ring)) +		return !!budget; + +	netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, total_bytes);  #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))  	if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) && @@ -314,11 +324,9 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)  		 * sees the new next_to_clean.  		 */  		smp_mb(); -		if (__netif_subqueue_stopped(tx_ring->netdev, -					     tx_ring->q_index) && +		if (netif_tx_queue_stopped(txring_txq(tx_ring)) &&  		    !test_bit(ICE_VSI_DOWN, vsi->state)) { -			netif_wake_subqueue(tx_ring->netdev, -					    tx_ring->q_index); +			netif_tx_wake_queue(txring_txq(tx_ring));  			++tx_ring->tx_stats.restart_q;  		}  	} @@ -419,7 +427,10 @@ void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)  	}  rx_skip_free: -	memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count); +	if (rx_ring->xsk_pool) +		memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf))); +	else +		memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf)));  	/* Zero out the descriptor ring */  	size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), @@ -446,8 +457,13 @@ void ice_free_rx_ring(struct ice_rx_ring *rx_ring)  		if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))  			xdp_rxq_info_unreg(&rx_ring->xdp_rxq);  	rx_ring->xdp_prog = NULL; -	devm_kfree(rx_ring->dev, rx_ring->rx_buf); -	rx_ring->rx_buf = NULL; +	if (rx_ring->xsk_pool) { +		kfree(rx_ring->xdp_buf); +		rx_ring->xdp_buf = NULL; +	} else { +		kfree(rx_ring->rx_buf); +		rx_ring->rx_buf = NULL; +	}  	if (rx_ring->desc) {  		size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), @@ -475,8 +491,7 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)  	/* warn if we are about to overwrite the pointer */  	WARN_ON(rx_ring->rx_buf);  	rx_ring->rx_buf = -		devm_kcalloc(dev, sizeof(*rx_ring->rx_buf), rx_ring->count, -			     GFP_KERNEL); +		kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);  	if (!rx_ring->rx_buf)  		return -ENOMEM; @@ -505,7 +520,7 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)  	return 0;  err: -	devm_kfree(dev, rx_ring->rx_buf); +	kfree(rx_ring->rx_buf);  	rx_ring->rx_buf = NULL;  	return -ENOMEM;  } @@ -561,7 +576,7 @@ ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,  			goto out_failure;  		return ICE_XDP_REDIR;  	default: -		bpf_warn_invalid_xdp_action(act); +		bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);  		fallthrough;  	case XDP_ABORTED:  out_failure: @@ -933,7 +948,7 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,  	 */  	net_prefetch(xdp->data_meta);  	/* build an skb around the page buffer */ -	skb = build_skb(xdp->data_hard_start, truesize); +	skb = napi_build_skb(xdp->data_hard_start, truesize);  	if (unlikely(!skb))  		return NULL; @@ -1517,7 +1532,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)   */  static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)  { -	netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index); +	netif_tx_stop_queue(txring_txq(tx_ring));  	/* Memory barrier before checking head and tail */  	smp_mb(); @@ -1525,8 +1540,8 @@ static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)  	if (likely(ICE_DESC_UNUSED(tx_ring) < size))  		return -EBUSY; -	/* A reprieve! - use start_subqueue because it doesn't call schedule */ -	netif_start_subqueue(tx_ring->netdev, tx_ring->q_index); +	/* A reprieve! - use start_queue because it doesn't call schedule */ +	netif_tx_start_queue(txring_txq(tx_ring));  	++tx_ring->tx_stats.restart_q;  	return 0;  } @@ -1568,6 +1583,7 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,  	struct sk_buff *skb;  	skb_frag_t *frag;  	dma_addr_t dma; +	bool kick;  	td_tag = off->td_l2tag1;  	td_cmd = off->td_cmd; @@ -1649,9 +1665,6 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,  		tx_buf = &tx_ring->tx_buf[i];  	} -	/* record bytecount for BQL */ -	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); -  	/* record SW timestamp if HW timestamp is not available */  	skb_tx_timestamp(first->skb); @@ -1680,7 +1693,10 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,  	ice_maybe_stop_tx(tx_ring, DESC_NEEDED);  	/* notify HW of packet */ -	if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) +	kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount, +				      netdev_xmit_more()); +	if (kick) +		/* notify HW of packet */  		writel(i, tx_ring->tail);  	return; @@ -2265,6 +2281,9 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)  		return NETDEV_TX_BUSY;  	} +	/* prefetch for bql data which is infrequently used */ +	netdev_txq_bql_enqueue_prefetchw(txring_txq(tx_ring)); +  	offload.tx_ring = tx_ring;  	/* record the location of the first descriptor for this packet */ |