diff options
Diffstat (limited to 'drivers/net/ethernet/intel/igc/igc_main.c')
| -rw-r--r-- | drivers/net/ethernet/intel/igc/igc_main.c | 163 | 
1 files changed, 144 insertions, 19 deletions
| diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 1c4676882082..019ce91c45aa 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -254,6 +254,13 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)  	/* reset BQL for queue */  	netdev_tx_reset_queue(txring_txq(tx_ring)); +	/* Zero out the buffer ring */ +	memset(tx_ring->tx_buffer_info, 0, +	       sizeof(*tx_ring->tx_buffer_info) * tx_ring->count); + +	/* Zero out the descriptor ring */ +	memset(tx_ring->desc, 0, tx_ring->size); +  	/* reset next_to_use and next_to_clean */  	tx_ring->next_to_use = 0;  	tx_ring->next_to_clean = 0; @@ -267,7 +274,7 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)   */  void igc_free_tx_resources(struct igc_ring *tx_ring)  { -	igc_clean_tx_ring(tx_ring); +	igc_disable_tx_ring(tx_ring);  	vfree(tx_ring->tx_buffer_info);  	tx_ring->tx_buffer_info = NULL; @@ -1578,14 +1585,16 @@ done:  		}  	} -	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { +	if (unlikely(test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags) && +		     skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {  		/* FIXME: add support for retrieving timestamps from  		 * the other timer registers before skipping the  		 * timestamping request.  		 */ -		if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && -		    !test_and_set_bit_lock(__IGC_PTP_TX_IN_PROGRESS, -					   &adapter->state)) { +		unsigned long flags; + +		spin_lock_irqsave(&adapter->ptp_tx_lock, flags); +		if (!adapter->ptp_tx_skb) {  			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;  			tx_flags |= IGC_TX_FLAGS_TSTAMP; @@ -1594,6 +1603,8 @@ done:  		} else {  			adapter->tx_hwtstamp_skipped++;  		} + +		spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags);  	}  	if (skb_vlan_tag_present(skb)) { @@ -1690,14 +1701,36 @@ static void igc_rx_checksum(struct igc_ring *ring,  		   le32_to_cpu(rx_desc->wb.upper.status_error));  } +/* Mapping HW RSS Type to enum pkt_hash_types */ +static const enum pkt_hash_types igc_rss_type_table[IGC_RSS_TYPE_MAX_TABLE] = { +	[IGC_RSS_TYPE_NO_HASH]		= PKT_HASH_TYPE_L2, +	[IGC_RSS_TYPE_HASH_TCP_IPV4]	= PKT_HASH_TYPE_L4, +	[IGC_RSS_TYPE_HASH_IPV4]	= PKT_HASH_TYPE_L3, +	[IGC_RSS_TYPE_HASH_TCP_IPV6]	= PKT_HASH_TYPE_L4, +	[IGC_RSS_TYPE_HASH_IPV6_EX]	= PKT_HASH_TYPE_L3, +	[IGC_RSS_TYPE_HASH_IPV6]	= PKT_HASH_TYPE_L3, +	[IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = PKT_HASH_TYPE_L4, +	[IGC_RSS_TYPE_HASH_UDP_IPV4]	= PKT_HASH_TYPE_L4, +	[IGC_RSS_TYPE_HASH_UDP_IPV6]	= PKT_HASH_TYPE_L4, +	[IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = PKT_HASH_TYPE_L4, +	[10] = PKT_HASH_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW  */ +	[11] = PKT_HASH_TYPE_NONE, /* keep array sized for SW bit-mask   */ +	[12] = PKT_HASH_TYPE_NONE, /* to handle future HW revisons       */ +	[13] = PKT_HASH_TYPE_NONE, +	[14] = PKT_HASH_TYPE_NONE, +	[15] = PKT_HASH_TYPE_NONE, +}; +  static inline void igc_rx_hash(struct igc_ring *ring,  			       union igc_adv_rx_desc *rx_desc,  			       struct sk_buff *skb)  { -	if (ring->netdev->features & NETIF_F_RXHASH) -		skb_set_hash(skb, -			     le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), -			     PKT_HASH_TYPE_L3); +	if (ring->netdev->features & NETIF_F_RXHASH) { +		u32 rss_hash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); +		u32 rss_type = igc_rss_type(rx_desc); + +		skb_set_hash(skb, rss_hash, igc_rss_type_table[rss_type]); +	}  }  static void igc_rx_vlan(struct igc_ring *rx_ring, @@ -2214,6 +2247,8 @@ static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count)  	if (!count)  		return ok; +	XSK_CHECK_PRIV_TYPE(struct igc_xdp_buff); +  	desc = IGC_RX_DESC(ring, i);  	bi = &ring->rx_buffer_info[i];  	i -= ring->count; @@ -2387,6 +2422,8 @@ static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp)  	nq = txring_txq(ring);  	__netif_tx_lock(nq, cpu); +	/* Avoid transmit queue timeout since we share it with the slow path */ +	txq_trans_cond_update(nq);  	res = igc_xdp_init_tx_descriptor(ring, xdpf);  	__netif_tx_unlock(nq);  	return res; @@ -2498,8 +2535,8 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)  		union igc_adv_rx_desc *rx_desc;  		struct igc_rx_buffer *rx_buffer;  		unsigned int size, truesize; +		struct igc_xdp_buff ctx;  		ktime_t timestamp = 0; -		struct xdp_buff xdp;  		int pkt_offset = 0;  		void *pktbuf; @@ -2528,18 +2565,20 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)  		if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) {  			timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,  							pktbuf); +			ctx.rx_ts = timestamp;  			pkt_offset = IGC_TS_HDR_LEN;  			size -= IGC_TS_HDR_LEN;  		}  		if (!skb) { -			xdp_init_buff(&xdp, truesize, &rx_ring->xdp_rxq); -			xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring), +			xdp_init_buff(&ctx.xdp, truesize, &rx_ring->xdp_rxq); +			xdp_prepare_buff(&ctx.xdp, pktbuf - igc_rx_offset(rx_ring),  					 igc_rx_offset(rx_ring) + pkt_offset,  					 size, true); -			xdp_buff_clear_frags_flag(&xdp); +			xdp_buff_clear_frags_flag(&ctx.xdp); +			ctx.rx_desc = rx_desc; -			skb = igc_xdp_run_prog(adapter, &xdp); +			skb = igc_xdp_run_prog(adapter, &ctx.xdp);  		}  		if (IS_ERR(skb)) { @@ -2561,9 +2600,9 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)  		} else if (skb)  			igc_add_rx_frag(rx_ring, rx_buffer, skb, size);  		else if (ring_uses_build_skb(rx_ring)) -			skb = igc_build_skb(rx_ring, rx_buffer, &xdp); +			skb = igc_build_skb(rx_ring, rx_buffer, &ctx.xdp);  		else -			skb = igc_construct_skb(rx_ring, rx_buffer, &xdp, +			skb = igc_construct_skb(rx_ring, rx_buffer, &ctx.xdp,  						timestamp);  		/* exit if we failed to retrieve a buffer */ @@ -2664,6 +2703,15 @@ static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector,  	napi_gro_receive(&q_vector->napi, skb);  } +static struct igc_xdp_buff *xsk_buff_to_igc_ctx(struct xdp_buff *xdp) +{ +	/* xdp_buff pointer used by ZC code path is alloc as xdp_buff_xsk. The +	 * igc_xdp_buff shares its layout with xdp_buff_xsk and private +	 * igc_xdp_buff fields fall into xdp_buff_xsk->cb +	 */ +       return (struct igc_xdp_buff *)xdp; +} +  static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)  {  	struct igc_adapter *adapter = q_vector->adapter; @@ -2682,6 +2730,7 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)  	while (likely(total_packets < budget)) {  		union igc_adv_rx_desc *desc;  		struct igc_rx_buffer *bi; +		struct igc_xdp_buff *ctx;  		ktime_t timestamp = 0;  		unsigned int size;  		int res; @@ -2699,9 +2748,13 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)  		bi = &ring->rx_buffer_info[ntc]; +		ctx = xsk_buff_to_igc_ctx(bi->xdp); +		ctx->rx_desc = desc; +  		if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) {  			timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,  							bi->xdp->data); +			ctx->rx_ts = timestamp;  			bi->xdp->data += IGC_TS_HDR_LEN; @@ -2789,6 +2842,9 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring)  	__netif_tx_lock(nq, cpu); +	/* Avoid transmit queue timeout since we share it with the slow path */ +	txq_trans_cond_update(nq); +  	budget = igc_desc_unused(ring);  	while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) { @@ -5212,7 +5268,7 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter)  	if (tsicr & IGC_TSICR_TXTS) {  		/* retrieve hardware timestamp */ -		schedule_work(&adapter->ptp_tx_work); +		igc_ptp_tx_tstamp_event(adapter);  		ack |= IGC_TSICR_TXTS;  	} @@ -6068,9 +6124,18 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,  	size_t n;  	int i; -	adapter->qbv_enable = qopt->enable; +	switch (qopt->cmd) { +	case TAPRIO_CMD_REPLACE: +		adapter->qbv_enable = true; +		break; +	case TAPRIO_CMD_DESTROY: +		adapter->qbv_enable = false; +		break; +	default: +		return -EOPNOTSUPP; +	} -	if (!qopt->enable) +	if (!adapter->qbv_enable)  		return igc_tsn_clear_schedule(adapter);  	if (qopt->base_time < 0) @@ -6314,6 +6379,9 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,  	__netif_tx_lock(nq, cpu); +	/* Avoid transmit queue timeout since we share it with the slow path */ +	txq_trans_cond_update(nq); +  	drops = 0;  	for (i = 0; i < num_frames; i++) {  		int err; @@ -6454,6 +6522,58 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg)  	return value;  } +/* Mapping HW RSS Type to enum xdp_rss_hash_type */ +static enum xdp_rss_hash_type igc_xdp_rss_type[IGC_RSS_TYPE_MAX_TABLE] = { +	[IGC_RSS_TYPE_NO_HASH]		= XDP_RSS_TYPE_L2, +	[IGC_RSS_TYPE_HASH_TCP_IPV4]	= XDP_RSS_TYPE_L4_IPV4_TCP, +	[IGC_RSS_TYPE_HASH_IPV4]	= XDP_RSS_TYPE_L3_IPV4, +	[IGC_RSS_TYPE_HASH_TCP_IPV6]	= XDP_RSS_TYPE_L4_IPV6_TCP, +	[IGC_RSS_TYPE_HASH_IPV6_EX]	= XDP_RSS_TYPE_L3_IPV6_EX, +	[IGC_RSS_TYPE_HASH_IPV6]	= XDP_RSS_TYPE_L3_IPV6, +	[IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_TCP_EX, +	[IGC_RSS_TYPE_HASH_UDP_IPV4]	= XDP_RSS_TYPE_L4_IPV4_UDP, +	[IGC_RSS_TYPE_HASH_UDP_IPV6]	= XDP_RSS_TYPE_L4_IPV6_UDP, +	[IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_UDP_EX, +	[10] = XDP_RSS_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW  */ +	[11] = XDP_RSS_TYPE_NONE, /* keep array sized for SW bit-mask   */ +	[12] = XDP_RSS_TYPE_NONE, /* to handle future HW revisons       */ +	[13] = XDP_RSS_TYPE_NONE, +	[14] = XDP_RSS_TYPE_NONE, +	[15] = XDP_RSS_TYPE_NONE, +}; + +static int igc_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash, +			   enum xdp_rss_hash_type *rss_type) +{ +	const struct igc_xdp_buff *ctx = (void *)_ctx; + +	if (!(ctx->xdp.rxq->dev->features & NETIF_F_RXHASH)) +		return -ENODATA; + +	*hash = le32_to_cpu(ctx->rx_desc->wb.lower.hi_dword.rss); +	*rss_type = igc_xdp_rss_type[igc_rss_type(ctx->rx_desc)]; + +	return 0; +} + +static int igc_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp) +{ +	const struct igc_xdp_buff *ctx = (void *)_ctx; + +	if (igc_test_staterr(ctx->rx_desc, IGC_RXDADV_STAT_TSIP)) { +		*timestamp = ctx->rx_ts; + +		return 0; +	} + +	return -ENODATA; +} + +static const struct xdp_metadata_ops igc_xdp_metadata_ops = { +	.xmo_rx_hash			= igc_xdp_rx_hash, +	.xmo_rx_timestamp		= igc_xdp_rx_timestamp, +}; +  /**   * igc_probe - Device Initialization Routine   * @pdev: PCI device information struct @@ -6527,6 +6647,7 @@ static int igc_probe(struct pci_dev *pdev,  	hw->hw_addr = adapter->io_addr;  	netdev->netdev_ops = &igc_netdev_ops; +	netdev->xdp_metadata_ops = &igc_xdp_metadata_ops;  	igc_ethtool_set_ops(netdev);  	netdev->watchdog_timeo = 5 * HZ; @@ -6554,6 +6675,7 @@ static int igc_probe(struct pci_dev *pdev,  	netdev->features |= NETIF_F_TSO;  	netdev->features |= NETIF_F_TSO6;  	netdev->features |= NETIF_F_TSO_ECN; +	netdev->features |= NETIF_F_RXHASH;  	netdev->features |= NETIF_F_RXCSUM;  	netdev->features |= NETIF_F_HW_CSUM;  	netdev->features |= NETIF_F_SCTP_CRC; @@ -6723,6 +6845,9 @@ static void igc_remove(struct pci_dev *pdev)  	igc_ptp_stop(adapter); +	pci_disable_ptm(pdev); +	pci_clear_master(pdev); +  	set_bit(__IGC_DOWN, &adapter->state);  	del_timer_sync(&adapter->watchdog_timer); |