diff options
Diffstat (limited to 'drivers/net/ethernet/intel/igc')
| -rw-r--r-- | drivers/net/ethernet/intel/igc/igc.h | 34 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igc/igc_base.h | 2 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igc/igc_defines.h | 9 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igc/igc_dump.c | 2 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igc/igc_ethtool.c | 41 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igc/igc_main.c | 731 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igc/igc_regs.h | 2 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igc/igc_xdp.c | 109 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igc/igc_xdp.h | 8 | 
9 files changed, 776 insertions, 162 deletions
| diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 25871351730b..9e0bbb2e55e3 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -118,6 +118,7 @@ struct igc_ring {  	};  	struct xdp_rxq_info xdp_rxq; +	struct xsk_buff_pool *xsk_pool;  } ____cacheline_internodealigned_in_smp;  /* Board specific private data structure */ @@ -255,6 +256,11 @@ bool igc_has_link(struct igc_adapter *adapter);  void igc_reset(struct igc_adapter *adapter);  int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx);  void igc_update_stats(struct igc_adapter *adapter); +void igc_disable_rx_ring(struct igc_ring *ring); +void igc_enable_rx_ring(struct igc_ring *ring); +void igc_disable_tx_ring(struct igc_ring *ring); +void igc_enable_tx_ring(struct igc_ring *ring); +int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);  /* igc_dump declarations */  void igc_rings_dump(struct igc_adapter *adapter); @@ -366,6 +372,7 @@ extern char igc_driver_name[];  /* VLAN info */  #define IGC_TX_FLAGS_VLAN_MASK	0xffff0000 +#define IGC_TX_FLAGS_VLAN_SHIFT	16  /* igc_test_staterr - tests bits within Rx descriptor status and error fields */  static inline __le32 igc_test_staterr(union igc_adv_rx_desc *rx_desc, @@ -390,8 +397,6 @@ enum igc_tx_flags {  	/* olinfo flags */  	IGC_TX_FLAGS_IPV4	= 0x10,  	IGC_TX_FLAGS_CSUM	= 0x20, - -	IGC_TX_FLAGS_XDP	= 0x100,  };  enum igc_boards { @@ -408,12 +413,19 @@ enum igc_boards {  #define TXD_USE_COUNT(S)	DIV_ROUND_UP((S), IGC_MAX_DATA_PER_TXD)  #define DESC_NEEDED	(MAX_SKB_FRAGS + 4) +enum igc_tx_buffer_type { +	IGC_TX_BUFFER_TYPE_SKB, +	IGC_TX_BUFFER_TYPE_XDP, +	IGC_TX_BUFFER_TYPE_XSK, +}; +  /* wrapper around a pointer to a socket buffer,   * so a DMA handle can be stored along with the buffer   */  struct igc_tx_buffer {  	union igc_adv_tx_desc *next_to_watch;  	unsigned long time_stamp; +	enum igc_tx_buffer_type type;  	union {  		struct sk_buff *skb;  		struct xdp_frame *xdpf; @@ -428,14 +440,19 @@ struct igc_tx_buffer {  };  struct igc_rx_buffer { -	dma_addr_t dma; -	struct page *page; +	union { +		struct { +			dma_addr_t dma; +			struct page *page;  #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) -	__u32 page_offset; +			__u32 page_offset;  #else -	__u16 page_offset; +			__u16 page_offset;  #endif -	__u16 pagecnt_bias; +			__u16 pagecnt_bias; +		}; +		struct xdp_buff *xdp; +	};  };  struct igc_q_vector { @@ -521,7 +538,8 @@ enum igc_ring_flags_t {  	IGC_RING_FLAG_RX_SCTP_CSUM,  	IGC_RING_FLAG_RX_LB_VLAN_BSWAP,  	IGC_RING_FLAG_TX_CTX_IDX, -	IGC_RING_FLAG_TX_DETECT_HANG +	IGC_RING_FLAG_TX_DETECT_HANG, +	IGC_RING_FLAG_AF_XDP_ZC,  };  #define ring_uses_large_buffer(ring) \ diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h index ea627ce52525..ce530f5fd7bd 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.h +++ b/drivers/net/ethernet/intel/igc/igc_base.h @@ -78,9 +78,11 @@ union igc_adv_rx_desc {  /* Additional Transmit Descriptor Control definitions */  #define IGC_TXDCTL_QUEUE_ENABLE	0x02000000 /* Ena specific Tx Queue */ +#define IGC_TXDCTL_SWFLUSH	0x04000000 /* Transmit Software Flush */  /* Additional Receive Descriptor Control definitions */  #define IGC_RXDCTL_QUEUE_ENABLE	0x02000000 /* Ena specific Rx Queue */ +#define IGC_RXDCTL_SWFLUSH		0x04000000 /* Receive Software Flush */  /* SRRCTL bit definitions */  #define IGC_SRRCTL_BSIZEPKT_SHIFT		10 /* Shift _right_ */ diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index 0103dda32f39..c3a5a5518790 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -94,12 +94,13 @@  #define IGC_CTRL_SLU		0x00000040  /* Set link up (Force Link) */  #define IGC_CTRL_FRCSPD		0x00000800  /* Force Speed */  #define IGC_CTRL_FRCDPX		0x00001000  /* Force Duplex */ +#define IGC_CTRL_VME		0x40000000  /* IEEE VLAN mode enable */  #define IGC_CTRL_RFCE		0x08000000  /* Receive Flow Control enable */  #define IGC_CTRL_TFCE		0x10000000  /* Transmit flow control enable */ -#define IGC_CTRL_SDP0_DIR 0x00400000	/* SDP0 Data direction */ -#define IGC_CTRL_SDP1_DIR 0x00800000	/* SDP1 Data direction */ +#define IGC_CTRL_SDP0_DIR	0x00400000  /* SDP0 Data direction */ +#define IGC_CTRL_SDP1_DIR	0x00800000  /* SDP1 Data direction */  /* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */  #define MAX_JUMBO_FRAME_SIZE	0x2600 @@ -128,7 +129,6 @@  #define NWAY_LPAR_ASM_DIR	0x0800 /* LP Asymmetric Pause Direction bit */  /* 1000BASE-T Control Register */ -#define CR_1000T_ASYM_PAUSE	0x0080 /* Advertise asymmetric pause bit */  #define CR_1000T_HD_CAPS	0x0100 /* Advertise 1000T HD capability */  #define CR_1000T_FD_CAPS	0x0200 /* Advertise 1000T FD capability  */ @@ -323,6 +323,9 @@  #define IGC_RXD_STAT_IXSM	0x04	/* Ignore checksum */  #define IGC_RXD_STAT_UDPCS	0x10	/* UDP xsum calculated */  #define IGC_RXD_STAT_TCPCS	0x20	/* TCP xsum calculated */ +#define IGC_RXD_STAT_VP		0x08	/* IEEE VLAN Packet */ + +#define IGC_RXDEXT_STATERR_LB	0x00040000  /* Advanced Receive Descriptor bit definitions */  #define IGC_RXDADV_STAT_TSIP	0x08000 /* timestamp in packet */ diff --git a/drivers/net/ethernet/intel/igc/igc_dump.c b/drivers/net/ethernet/intel/igc/igc_dump.c index 495bed47ed0a..c09c95cc5f70 100644 --- a/drivers/net/ethernet/intel/igc/igc_dump.c +++ b/drivers/net/ethernet/intel/igc/igc_dump.c @@ -112,7 +112,7 @@ static void igc_regdump(struct igc_hw *hw, struct igc_reg_info *reginfo)  void igc_rings_dump(struct igc_adapter *adapter)  {  	struct net_device *netdev = adapter->netdev; -	struct my_u0 { u64 a; u64 b; } *u0; +	struct my_u0 { __le64 a; __le64 b; } *u0;  	union igc_adv_tx_desc *tx_desc;  	union igc_adv_rx_desc *rx_desc;  	struct igc_ring *tx_ring; diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index 9722449d7633..fa4171860623 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -554,7 +554,7 @@ static int igc_ethtool_set_eeprom(struct net_device *netdev,  	memcpy(ptr, bytes, eeprom->len);  	for (i = 0; i < last_word - first_word + 1; i++) -		eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); +		cpu_to_le16s(&eeprom_buff[i]);  	ret_val = hw->nvm.ops.write(hw, first_word,  				    last_word - first_word + 1, eeprom_buff); @@ -765,35 +765,22 @@ static void igc_ethtool_get_strings(struct net_device *netdev, u32 stringset,  		       IGC_TEST_LEN * ETH_GSTRING_LEN);  		break;  	case ETH_SS_STATS: -		for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) { -			memcpy(p, igc_gstrings_stats[i].stat_string, -			       ETH_GSTRING_LEN); -			p += ETH_GSTRING_LEN; -		} -		for (i = 0; i < IGC_NETDEV_STATS_LEN; i++) { -			memcpy(p, igc_gstrings_net_stats[i].stat_string, -			       ETH_GSTRING_LEN); -			p += ETH_GSTRING_LEN; -		} +		for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) +			ethtool_sprintf(&p, igc_gstrings_stats[i].stat_string); +		for (i = 0; i < IGC_NETDEV_STATS_LEN; i++) +			ethtool_sprintf(&p, +					igc_gstrings_net_stats[i].stat_string);  		for (i = 0; i < adapter->num_tx_queues; i++) { -			sprintf(p, "tx_queue_%u_packets", i); -			p += ETH_GSTRING_LEN; -			sprintf(p, "tx_queue_%u_bytes", i); -			p += ETH_GSTRING_LEN; -			sprintf(p, "tx_queue_%u_restart", i); -			p += ETH_GSTRING_LEN; +			ethtool_sprintf(&p, "tx_queue_%u_packets", i); +			ethtool_sprintf(&p, "tx_queue_%u_bytes", i); +			ethtool_sprintf(&p, "tx_queue_%u_restart", i);  		}  		for (i = 0; i < adapter->num_rx_queues; i++) { -			sprintf(p, "rx_queue_%u_packets", i); -			p += ETH_GSTRING_LEN; -			sprintf(p, "rx_queue_%u_bytes", i); -			p += ETH_GSTRING_LEN; -			sprintf(p, "rx_queue_%u_drops", i); -			p += ETH_GSTRING_LEN; -			sprintf(p, "rx_queue_%u_csum_err", i); -			p += ETH_GSTRING_LEN; -			sprintf(p, "rx_queue_%u_alloc_failed", i); -			p += ETH_GSTRING_LEN; +			ethtool_sprintf(&p, "rx_queue_%u_packets", i); +			ethtool_sprintf(&p, "rx_queue_%u_bytes", i); +			ethtool_sprintf(&p, "rx_queue_%u_drops", i); +			ethtool_sprintf(&p, "rx_queue_%u_csum_err", i); +			ethtool_sprintf(&p, "rx_queue_%u_alloc_failed", i);  		}  		/* BUG_ON(p - data != IGC_STATS_LEN * ETH_GSTRING_LEN); */  		break; diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index f1adf154ec4a..95323095094d 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -11,7 +11,7 @@  #include <linux/pm_runtime.h>  #include <net/pkt_sched.h>  #include <linux/bpf_trace.h> - +#include <net/xdp_sock_drv.h>  #include <net/ipv6.h>  #include "igc.h" @@ -111,6 +111,9 @@ void igc_reset(struct igc_adapter *adapter)  	if (!netif_running(adapter->netdev))  		igc_power_down_phy_copper_base(&adapter->hw); +	/* Enable HW to recognize an 802.1Q VLAN Ethernet packet */ +	wr32(IGC_VET, ETH_P_8021Q); +  	/* Re-enable PTP, where applicable. */  	igc_ptp_reset(adapter); @@ -171,6 +174,14 @@ static void igc_get_hw_control(struct igc_adapter *adapter)  	     ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);  } +static void igc_unmap_tx_buffer(struct device *dev, struct igc_tx_buffer *buf) +{ +	dma_unmap_single(dev, dma_unmap_addr(buf, dma), +			 dma_unmap_len(buf, len), DMA_TO_DEVICE); + +	dma_unmap_len_set(buf, len, 0); +} +  /**   * igc_clean_tx_ring - Free Tx Buffers   * @tx_ring: ring to be cleaned @@ -179,20 +190,27 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)  {  	u16 i = tx_ring->next_to_clean;  	struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; +	u32 xsk_frames = 0;  	while (i != tx_ring->next_to_use) {  		union igc_adv_tx_desc *eop_desc, *tx_desc; -		if (tx_buffer->tx_flags & IGC_TX_FLAGS_XDP) +		switch (tx_buffer->type) { +		case IGC_TX_BUFFER_TYPE_XSK: +			xsk_frames++; +			break; +		case IGC_TX_BUFFER_TYPE_XDP:  			xdp_return_frame(tx_buffer->xdpf); -		else +			igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); +			break; +		case IGC_TX_BUFFER_TYPE_SKB:  			dev_kfree_skb_any(tx_buffer->skb); - -		/* unmap skb header data */ -		dma_unmap_single(tx_ring->dev, -				 dma_unmap_addr(tx_buffer, dma), -				 dma_unmap_len(tx_buffer, len), -				 DMA_TO_DEVICE); +			igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); +			break; +		default: +			netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); +			break; +		}  		/* check for eop_desc to determine the end of the packet */  		eop_desc = tx_buffer->next_to_watch; @@ -211,10 +229,7 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)  			/* unmap any remaining paged data */  			if (dma_unmap_len(tx_buffer, len)) -				dma_unmap_page(tx_ring->dev, -					       dma_unmap_addr(tx_buffer, dma), -					       dma_unmap_len(tx_buffer, len), -					       DMA_TO_DEVICE); +				igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);  		}  		/* move us one more past the eop_desc for start of next pkt */ @@ -226,6 +241,9 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)  		}  	} +	if (tx_ring->xsk_pool && xsk_frames) +		xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); +  	/* reset BQL for queue */  	netdev_tx_reset_queue(txring_txq(tx_ring)); @@ -346,11 +364,7 @@ static int igc_setup_all_tx_resources(struct igc_adapter *adapter)  	return err;  } -/** - * igc_clean_rx_ring - Free Rx Buffers per Queue - * @rx_ring: ring to free buffers from - */ -static void igc_clean_rx_ring(struct igc_ring *rx_ring) +static void igc_clean_rx_ring_page_shared(struct igc_ring *rx_ring)  {  	u16 i = rx_ring->next_to_clean; @@ -383,12 +397,39 @@ static void igc_clean_rx_ring(struct igc_ring *rx_ring)  		if (i == rx_ring->count)  			i = 0;  	} +} + +static void igc_clean_rx_ring_xsk_pool(struct igc_ring *ring) +{ +	struct igc_rx_buffer *bi; +	u16 i; + +	for (i = 0; i < ring->count; i++) { +		bi = &ring->rx_buffer_info[i]; +		if (!bi->xdp) +			continue; + +		xsk_buff_free(bi->xdp); +		bi->xdp = NULL; +	} +} -	clear_ring_uses_large_buffer(rx_ring); +/** + * igc_clean_rx_ring - Free Rx Buffers per Queue + * @ring: ring to free buffers from + */ +static void igc_clean_rx_ring(struct igc_ring *ring) +{ +	if (ring->xsk_pool) +		igc_clean_rx_ring_xsk_pool(ring); +	else +		igc_clean_rx_ring_page_shared(ring); -	rx_ring->next_to_alloc = 0; -	rx_ring->next_to_clean = 0; -	rx_ring->next_to_use = 0; +	clear_ring_uses_large_buffer(ring); + +	ring->next_to_alloc = 0; +	ring->next_to_clean = 0; +	ring->next_to_use = 0;  }  /** @@ -414,7 +455,7 @@ void igc_free_rx_resources(struct igc_ring *rx_ring)  {  	igc_clean_rx_ring(rx_ring); -	igc_xdp_unregister_rxq_info(rx_ring); +	xdp_rxq_info_unreg(&rx_ring->xdp_rxq);  	vfree(rx_ring->rx_buffer_info);  	rx_ring->rx_buffer_info = NULL; @@ -453,11 +494,16 @@ int igc_setup_rx_resources(struct igc_ring *rx_ring)  {  	struct net_device *ndev = rx_ring->netdev;  	struct device *dev = rx_ring->dev; +	u8 index = rx_ring->queue_index;  	int size, desc_len, res; -	res = igc_xdp_register_rxq_info(rx_ring); -	if (res < 0) +	res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index, +			       rx_ring->q_vector->napi.napi_id); +	if (res < 0) { +		netdev_err(ndev, "Failed to register xdp_rxq index %u\n", +			   index);  		return res; +	}  	size = sizeof(struct igc_rx_buffer) * rx_ring->count;  	rx_ring->rx_buffer_info = vzalloc(size); @@ -483,7 +529,7 @@ int igc_setup_rx_resources(struct igc_ring *rx_ring)  	return 0;  err: -	igc_xdp_unregister_rxq_info(rx_ring); +	xdp_rxq_info_unreg(&rx_ring->xdp_rxq);  	vfree(rx_ring->rx_buffer_info);  	rx_ring->rx_buffer_info = NULL;  	netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n"); @@ -515,9 +561,14 @@ static int igc_setup_all_rx_resources(struct igc_adapter *adapter)  	return err;  } -static bool igc_xdp_is_enabled(struct igc_adapter *adapter) +static struct xsk_buff_pool *igc_get_xsk_pool(struct igc_adapter *adapter, +					      struct igc_ring *ring)  { -	return !!adapter->xdp_prog; +	if (!igc_xdp_is_enabled(adapter) || +	    !test_bit(IGC_RING_FLAG_AF_XDP_ZC, &ring->flags)) +		return NULL; + +	return xsk_get_pool_from_qid(ring->netdev, ring->queue_index);  }  /** @@ -535,6 +586,20 @@ static void igc_configure_rx_ring(struct igc_adapter *adapter,  	int reg_idx = ring->reg_idx;  	u32 srrctl = 0, rxdctl = 0;  	u64 rdba = ring->dma; +	u32 buf_size; + +	xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); +	ring->xsk_pool = igc_get_xsk_pool(adapter, ring); +	if (ring->xsk_pool) { +		WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, +						   MEM_TYPE_XSK_BUFF_POOL, +						   NULL)); +		xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); +	} else { +		WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, +						   MEM_TYPE_PAGE_SHARED, +						   NULL)); +	}  	if (igc_xdp_is_enabled(adapter))  		set_ring_uses_large_buffer(ring); @@ -558,12 +623,15 @@ static void igc_configure_rx_ring(struct igc_adapter *adapter,  	ring->next_to_clean = 0;  	ring->next_to_use = 0; -	/* set descriptor configuration */ -	srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT; -	if (ring_uses_large_buffer(ring)) -		srrctl |= IGC_RXBUFFER_3072 >> IGC_SRRCTL_BSIZEPKT_SHIFT; +	if (ring->xsk_pool) +		buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool); +	else if (ring_uses_large_buffer(ring)) +		buf_size = IGC_RXBUFFER_3072;  	else -		srrctl |= IGC_RXBUFFER_2048 >> IGC_SRRCTL_BSIZEPKT_SHIFT; +		buf_size = IGC_RXBUFFER_2048; + +	srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT; +	srrctl |= buf_size >> IGC_SRRCTL_BSIZEPKT_SHIFT;  	srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;  	wr32(IGC_SRRCTL(reg_idx), srrctl); @@ -618,6 +686,8 @@ static void igc_configure_tx_ring(struct igc_adapter *adapter,  	u64 tdba = ring->dma;  	u32 txdctl = 0; +	ring->xsk_pool = igc_get_xsk_pool(adapter, ring); +  	/* disable the queue */  	wr32(IGC_TXDCTL(reg_idx), 0);  	wrfl(); @@ -1055,13 +1125,17 @@ static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)  	 ((u32)((_input) & (_flag)) * ((_result) / (_flag))) :	\  	 ((u32)((_input) & (_flag)) / ((_flag) / (_result)))) -static u32 igc_tx_cmd_type(u32 tx_flags) +static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)  {  	/* set type for advanced descriptor with frame checksum insertion */  	u32 cmd_type = IGC_ADVTXD_DTYP_DATA |  		       IGC_ADVTXD_DCMD_DEXT |  		       IGC_ADVTXD_DCMD_IFCS; +	/* set HW vlan bit if vlan is present */ +	cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_VLAN, +				 IGC_ADVTXD_DCMD_VLE); +  	/* set segmentation bits for TSO */  	cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO,  				 (IGC_ADVTXD_DCMD_TSE)); @@ -1070,6 +1144,9 @@ static u32 igc_tx_cmd_type(u32 tx_flags)  	cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP,  				 (IGC_ADVTXD_MAC_TSTAMP)); +	/* insert frame checksum */ +	cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS); +  	return cmd_type;  } @@ -1104,8 +1181,9 @@ static int igc_tx_map(struct igc_ring *tx_ring,  	u16 i = tx_ring->next_to_use;  	unsigned int data_len, size;  	dma_addr_t dma; -	u32 cmd_type = igc_tx_cmd_type(tx_flags); +	u32 cmd_type; +	cmd_type = igc_tx_cmd_type(skb, tx_flags);  	tx_desc = IGC_TX_DESC(tx_ring, i);  	igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); @@ -1211,11 +1289,7 @@ dma_error:  	/* clear dma mappings for failed tx_buffer_info map */  	while (tx_buffer != first) {  		if (dma_unmap_len(tx_buffer, len)) -			dma_unmap_page(tx_ring->dev, -				       dma_unmap_addr(tx_buffer, dma), -				       dma_unmap_len(tx_buffer, len), -				       DMA_TO_DEVICE); -		dma_unmap_len_set(tx_buffer, len, 0); +			igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);  		if (i-- == 0)  			i += tx_ring->count; @@ -1223,11 +1297,7 @@ dma_error:  	}  	if (dma_unmap_len(tx_buffer, len)) -		dma_unmap_single(tx_ring->dev, -				 dma_unmap_addr(tx_buffer, dma), -				 dma_unmap_len(tx_buffer, len), -				 DMA_TO_DEVICE); -	dma_unmap_len_set(tx_buffer, len, 0); +		igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);  	dev_kfree_skb_any(tx_buffer->skb);  	tx_buffer->skb = NULL; @@ -1359,6 +1429,7 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,  	/* record the location of the first descriptor for this packet */  	first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; +	first->type = IGC_TX_BUFFER_TYPE_SKB;  	first->skb = skb;  	first->bytecount = skb->len;  	first->gso_segs = 1; @@ -1383,6 +1454,11 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,  		}  	} +	if (skb_vlan_tag_present(skb)) { +		tx_flags |= IGC_TX_FLAGS_VLAN; +		tx_flags |= (skb_vlan_tag_get(skb) << IGC_TX_FLAGS_VLAN_SHIFT); +	} +  	/* record initial flags and protocol */  	first->tx_flags = tx_flags;  	first->protocol = protocol; @@ -1482,6 +1558,25 @@ static inline void igc_rx_hash(struct igc_ring *ring,  			     PKT_HASH_TYPE_L3);  } +static void igc_rx_vlan(struct igc_ring *rx_ring, +			union igc_adv_rx_desc *rx_desc, +			struct sk_buff *skb) +{ +	struct net_device *dev = rx_ring->netdev; +	u16 vid; + +	if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && +	    igc_test_staterr(rx_desc, IGC_RXD_STAT_VP)) { +		if (igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_LB) && +		    test_bit(IGC_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) +			vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan); +		else +			vid = le16_to_cpu(rx_desc->wb.upper.vlan); + +		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); +	} +} +  /**   * igc_process_skb_fields - Populate skb header fields from Rx descriptor   * @rx_ring: rx descriptor ring packet is being transacted on @@ -1500,11 +1595,37 @@ static void igc_process_skb_fields(struct igc_ring *rx_ring,  	igc_rx_checksum(rx_ring, rx_desc, skb); +	igc_rx_vlan(rx_ring, rx_desc, skb); +  	skb_record_rx_queue(skb, rx_ring->queue_index);  	skb->protocol = eth_type_trans(skb, rx_ring->netdev);  } +static void igc_vlan_mode(struct net_device *netdev, netdev_features_t features) +{ +	bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); +	struct igc_adapter *adapter = netdev_priv(netdev); +	struct igc_hw *hw = &adapter->hw; +	u32 ctrl; + +	ctrl = rd32(IGC_CTRL); + +	if (enable) { +		/* enable VLAN tag insert/strip */ +		ctrl |= IGC_CTRL_VME; +	} else { +		/* disable VLAN tag insert/strip */ +		ctrl &= ~IGC_CTRL_VME; +	} +	wr32(IGC_CTRL, ctrl); +} + +static void igc_restore_vlan(struct igc_adapter *adapter) +{ +	igc_vlan_mode(adapter->netdev, adapter->netdev->features); +} +  static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring,  					       const unsigned int size,  					       int *rx_buffer_pgcnt) @@ -1930,6 +2051,63 @@ static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count)  	}  } +static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count) +{ +	union igc_adv_rx_desc *desc; +	u16 i = ring->next_to_use; +	struct igc_rx_buffer *bi; +	dma_addr_t dma; +	bool ok = true; + +	if (!count) +		return ok; + +	desc = IGC_RX_DESC(ring, i); +	bi = &ring->rx_buffer_info[i]; +	i -= ring->count; + +	do { +		bi->xdp = xsk_buff_alloc(ring->xsk_pool); +		if (!bi->xdp) { +			ok = false; +			break; +		} + +		dma = xsk_buff_xdp_get_dma(bi->xdp); +		desc->read.pkt_addr = cpu_to_le64(dma); + +		desc++; +		bi++; +		i++; +		if (unlikely(!i)) { +			desc = IGC_RX_DESC(ring, 0); +			bi = ring->rx_buffer_info; +			i -= ring->count; +		} + +		/* Clear the length for the next_to_use descriptor. */ +		desc->wb.upper.length = 0; + +		count--; +	} while (count); + +	i += ring->count; + +	if (ring->next_to_use != i) { +		ring->next_to_use = i; + +		/* Force memory writes to complete before letting h/w +		 * know there are new descriptors to fetch.  (Only +		 * applicable for weak-ordered memory model archs, +		 * such as IA-64). +		 */ +		wmb(); +		writel(i, ring->tail); +	} + +	return ok; +} +  static int igc_xdp_init_tx_buffer(struct igc_tx_buffer *buffer,  				  struct xdp_frame *xdpf,  				  struct igc_ring *ring) @@ -1942,8 +2120,8 @@ static int igc_xdp_init_tx_buffer(struct igc_tx_buffer *buffer,  		return -ENOMEM;  	} +	buffer->type = IGC_TX_BUFFER_TYPE_XDP;  	buffer->xdpf = xdpf; -	buffer->tx_flags = IGC_TX_FLAGS_XDP;  	buffer->protocol = 0;  	buffer->bytecount = xdpf->len;  	buffer->gso_segs = 1; @@ -2025,35 +2203,24 @@ static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp)  	return res;  } -static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter, -					struct xdp_buff *xdp) +/* This function assumes rcu_read_lock() is held by the caller. */ +static int __igc_xdp_run_prog(struct igc_adapter *adapter, +			      struct bpf_prog *prog, +			      struct xdp_buff *xdp)  { -	struct bpf_prog *prog; -	int res; -	u32 act; +	u32 act = bpf_prog_run_xdp(prog, xdp); -	rcu_read_lock(); - -	prog = READ_ONCE(adapter->xdp_prog); -	if (!prog) { -		res = IGC_XDP_PASS; -		goto unlock; -	} - -	act = bpf_prog_run_xdp(prog, xdp);  	switch (act) {  	case XDP_PASS: -		res = IGC_XDP_PASS; -		break; +		return IGC_XDP_PASS;  	case XDP_TX:  		if (igc_xdp_xmit_back(adapter, xdp) < 0)  			goto out_failure; -		res = IGC_XDP_TX; -		break; +		return IGC_XDP_TX;  	case XDP_REDIRECT:  		if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0)  			goto out_failure; -		res = IGC_XDP_REDIRECT; +		return IGC_XDP_REDIRECT;  		break;  	default:  		bpf_warn_invalid_xdp_action(act); @@ -2063,12 +2230,25 @@ out_failure:  		trace_xdp_exception(adapter->netdev, prog, act);  		fallthrough;  	case XDP_DROP: -		res = IGC_XDP_CONSUMED; -		break; +		return IGC_XDP_CONSUMED;  	} +} -unlock: -	rcu_read_unlock(); +static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter, +					struct xdp_buff *xdp) +{ +	struct bpf_prog *prog; +	int res; + +	prog = READ_ONCE(adapter->xdp_prog); +	if (!prog) { +		res = IGC_XDP_PASS; +		goto out; +	} + +	res = __igc_xdp_run_prog(adapter, prog, xdp); + +out:  	return ERR_PTR(-res);  } @@ -2102,6 +2282,20 @@ static void igc_finalize_xdp(struct igc_adapter *adapter, int status)  		xdp_do_flush();  } +static void igc_update_rx_stats(struct igc_q_vector *q_vector, +				unsigned int packets, unsigned int bytes) +{ +	struct igc_ring *ring = q_vector->rx.ring; + +	u64_stats_update_begin(&ring->rx_syncp); +	ring->rx_stats.packets += packets; +	ring->rx_stats.bytes += bytes; +	u64_stats_update_end(&ring->rx_syncp); + +	q_vector->rx.total_packets += packets; +	q_vector->rx.total_bytes += bytes; +} +  static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)  {  	unsigned int total_bytes = 0, total_packets = 0; @@ -2150,12 +2344,9 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)  		}  		if (!skb) { -			xdp.data = pktbuf + pkt_offset; -			xdp.data_end = xdp.data + size; -			xdp.data_hard_start = pktbuf - igc_rx_offset(rx_ring); -			xdp_set_data_meta_invalid(&xdp); -			xdp.frame_sz = truesize; -			xdp.rxq = &rx_ring->xdp_rxq; +			xdp_init_buff(&xdp, truesize, &rx_ring->xdp_rxq); +			xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring), +					 igc_rx_offset(rx_ring) + pkt_offset, size, false);  			skb = igc_xdp_run_prog(adapter, &xdp);  		} @@ -2225,12 +2416,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)  	/* place incomplete frames back on ring for completion */  	rx_ring->skb = skb; -	u64_stats_update_begin(&rx_ring->rx_syncp); -	rx_ring->rx_stats.packets += total_packets; -	rx_ring->rx_stats.bytes += total_bytes; -	u64_stats_update_end(&rx_ring->rx_syncp); -	q_vector->rx.total_packets += total_packets; -	q_vector->rx.total_bytes += total_bytes; +	igc_update_rx_stats(q_vector, total_packets, total_bytes);  	if (cleaned_count)  		igc_alloc_rx_buffers(rx_ring, cleaned_count); @@ -2238,6 +2424,221 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)  	return total_packets;  } +static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring, +					    struct xdp_buff *xdp) +{ +	unsigned int metasize = xdp->data - xdp->data_meta; +	unsigned int datasize = xdp->data_end - xdp->data; +	unsigned int totalsize = metasize + datasize; +	struct sk_buff *skb; + +	skb = __napi_alloc_skb(&ring->q_vector->napi, +			       xdp->data_end - xdp->data_hard_start, +			       GFP_ATOMIC | __GFP_NOWARN); +	if (unlikely(!skb)) +		return NULL; + +	skb_reserve(skb, xdp->data_meta - xdp->data_hard_start); +	memcpy(__skb_put(skb, totalsize), xdp->data_meta, totalsize); +	if (metasize) +		skb_metadata_set(skb, metasize); + +	return skb; +} + +static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector, +				union igc_adv_rx_desc *desc, +				struct xdp_buff *xdp, +				ktime_t timestamp) +{ +	struct igc_ring *ring = q_vector->rx.ring; +	struct sk_buff *skb; + +	skb = igc_construct_skb_zc(ring, xdp); +	if (!skb) { +		ring->rx_stats.alloc_failed++; +		return; +	} + +	if (timestamp) +		skb_hwtstamps(skb)->hwtstamp = timestamp; + +	if (igc_cleanup_headers(ring, desc, skb)) +		return; + +	igc_process_skb_fields(ring, desc, skb); +	napi_gro_receive(&q_vector->napi, skb); +} + +static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget) +{ +	struct igc_adapter *adapter = q_vector->adapter; +	struct igc_ring *ring = q_vector->rx.ring; +	u16 cleaned_count = igc_desc_unused(ring); +	int total_bytes = 0, total_packets = 0; +	u16 ntc = ring->next_to_clean; +	struct bpf_prog *prog; +	bool failure = false; +	int xdp_status = 0; + +	rcu_read_lock(); + +	prog = READ_ONCE(adapter->xdp_prog); + +	while (likely(total_packets < budget)) { +		union igc_adv_rx_desc *desc; +		struct igc_rx_buffer *bi; +		ktime_t timestamp = 0; +		unsigned int size; +		int res; + +		desc = IGC_RX_DESC(ring, ntc); +		size = le16_to_cpu(desc->wb.upper.length); +		if (!size) +			break; + +		/* This memory barrier is needed to keep us from reading +		 * any other fields out of the rx_desc until we know the +		 * descriptor has been written back +		 */ +		dma_rmb(); + +		bi = &ring->rx_buffer_info[ntc]; + +		if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) { +			timestamp = igc_ptp_rx_pktstamp(q_vector->adapter, +							bi->xdp->data); + +			bi->xdp->data += IGC_TS_HDR_LEN; + +			/* HW timestamp has been copied into local variable. Metadata +			 * length when XDP program is called should be 0. +			 */ +			bi->xdp->data_meta += IGC_TS_HDR_LEN; +			size -= IGC_TS_HDR_LEN; +		} + +		bi->xdp->data_end = bi->xdp->data + size; +		xsk_buff_dma_sync_for_cpu(bi->xdp, ring->xsk_pool); + +		res = __igc_xdp_run_prog(adapter, prog, bi->xdp); +		switch (res) { +		case IGC_XDP_PASS: +			igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp); +			fallthrough; +		case IGC_XDP_CONSUMED: +			xsk_buff_free(bi->xdp); +			break; +		case IGC_XDP_TX: +		case IGC_XDP_REDIRECT: +			xdp_status |= res; +			break; +		} + +		bi->xdp = NULL; +		total_bytes += size; +		total_packets++; +		cleaned_count++; +		ntc++; +		if (ntc == ring->count) +			ntc = 0; +	} + +	ring->next_to_clean = ntc; +	rcu_read_unlock(); + +	if (cleaned_count >= IGC_RX_BUFFER_WRITE) +		failure = !igc_alloc_rx_buffers_zc(ring, cleaned_count); + +	if (xdp_status) +		igc_finalize_xdp(adapter, xdp_status); + +	igc_update_rx_stats(q_vector, total_packets, total_bytes); + +	if (xsk_uses_need_wakeup(ring->xsk_pool)) { +		if (failure || ring->next_to_clean == ring->next_to_use) +			xsk_set_rx_need_wakeup(ring->xsk_pool); +		else +			xsk_clear_rx_need_wakeup(ring->xsk_pool); +		return total_packets; +	} + +	return failure ? budget : total_packets; +} + +static void igc_update_tx_stats(struct igc_q_vector *q_vector, +				unsigned int packets, unsigned int bytes) +{ +	struct igc_ring *ring = q_vector->tx.ring; + +	u64_stats_update_begin(&ring->tx_syncp); +	ring->tx_stats.bytes += bytes; +	ring->tx_stats.packets += packets; +	u64_stats_update_end(&ring->tx_syncp); + +	q_vector->tx.total_bytes += bytes; +	q_vector->tx.total_packets += packets; +} + +static void igc_xdp_xmit_zc(struct igc_ring *ring) +{ +	struct xsk_buff_pool *pool = ring->xsk_pool; +	struct netdev_queue *nq = txring_txq(ring); +	union igc_adv_tx_desc *tx_desc = NULL; +	int cpu = smp_processor_id(); +	u16 ntu = ring->next_to_use; +	struct xdp_desc xdp_desc; +	u16 budget; + +	if (!netif_carrier_ok(ring->netdev)) +		return; + +	__netif_tx_lock(nq, cpu); + +	budget = igc_desc_unused(ring); + +	while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) { +		u32 cmd_type, olinfo_status; +		struct igc_tx_buffer *bi; +		dma_addr_t dma; + +		cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | +			   IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | +			   xdp_desc.len; +		olinfo_status = xdp_desc.len << IGC_ADVTXD_PAYLEN_SHIFT; + +		dma = xsk_buff_raw_get_dma(pool, xdp_desc.addr); +		xsk_buff_raw_dma_sync_for_device(pool, dma, xdp_desc.len); + +		tx_desc = IGC_TX_DESC(ring, ntu); +		tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); +		tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); +		tx_desc->read.buffer_addr = cpu_to_le64(dma); + +		bi = &ring->tx_buffer_info[ntu]; +		bi->type = IGC_TX_BUFFER_TYPE_XSK; +		bi->protocol = 0; +		bi->bytecount = xdp_desc.len; +		bi->gso_segs = 1; +		bi->time_stamp = jiffies; +		bi->next_to_watch = tx_desc; + +		netdev_tx_sent_queue(txring_txq(ring), xdp_desc.len); + +		ntu++; +		if (ntu == ring->count) +			ntu = 0; +	} + +	ring->next_to_use = ntu; +	if (tx_desc) { +		igc_flush_tx_descriptors(ring); +		xsk_tx_release(pool); +	} + +	__netif_tx_unlock(nq); +} +  /**   * igc_clean_tx_irq - Reclaim resources after transmit completes   * @q_vector: pointer to q_vector containing needed info @@ -2254,6 +2655,7 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)  	unsigned int i = tx_ring->next_to_clean;  	struct igc_tx_buffer *tx_buffer;  	union igc_adv_tx_desc *tx_desc; +	u32 xsk_frames = 0;  	if (test_bit(__IGC_DOWN, &adapter->state))  		return true; @@ -2283,19 +2685,22 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)  		total_bytes += tx_buffer->bytecount;  		total_packets += tx_buffer->gso_segs; -		if (tx_buffer->tx_flags & IGC_TX_FLAGS_XDP) +		switch (tx_buffer->type) { +		case IGC_TX_BUFFER_TYPE_XSK: +			xsk_frames++; +			break; +		case IGC_TX_BUFFER_TYPE_XDP:  			xdp_return_frame(tx_buffer->xdpf); -		else +			igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); +			break; +		case IGC_TX_BUFFER_TYPE_SKB:  			napi_consume_skb(tx_buffer->skb, napi_budget); - -		/* unmap skb header data */ -		dma_unmap_single(tx_ring->dev, -				 dma_unmap_addr(tx_buffer, dma), -				 dma_unmap_len(tx_buffer, len), -				 DMA_TO_DEVICE); - -		/* clear tx_buffer data */ -		dma_unmap_len_set(tx_buffer, len, 0); +			igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); +			break; +		default: +			netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); +			break; +		}  		/* clear last DMA location and unmap remaining buffers */  		while (tx_desc != eop_desc) { @@ -2309,13 +2714,8 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)  			}  			/* unmap any remaining paged data */ -			if (dma_unmap_len(tx_buffer, len)) { -				dma_unmap_page(tx_ring->dev, -					       dma_unmap_addr(tx_buffer, dma), -					       dma_unmap_len(tx_buffer, len), -					       DMA_TO_DEVICE); -				dma_unmap_len_set(tx_buffer, len, 0); -			} +			if (dma_unmap_len(tx_buffer, len)) +				igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);  		}  		/* move us one more past the eop_desc for start of next pkt */ @@ -2340,12 +2740,16 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)  	i += tx_ring->count;  	tx_ring->next_to_clean = i; -	u64_stats_update_begin(&tx_ring->tx_syncp); -	tx_ring->tx_stats.bytes += total_bytes; -	tx_ring->tx_stats.packets += total_packets; -	u64_stats_update_end(&tx_ring->tx_syncp); -	q_vector->tx.total_bytes += total_bytes; -	q_vector->tx.total_packets += total_packets; + +	igc_update_tx_stats(q_vector, total_packets, total_bytes); + +	if (tx_ring->xsk_pool) { +		if (xsk_frames) +			xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); +		if (xsk_uses_need_wakeup(tx_ring->xsk_pool)) +			xsk_set_tx_need_wakeup(tx_ring->xsk_pool); +		igc_xdp_xmit_zc(tx_ring); +	}  	if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {  		struct igc_hw *hw = &adapter->hw; @@ -2906,6 +3310,8 @@ static void igc_configure(struct igc_adapter *adapter)  	igc_get_hw_control(adapter);  	igc_set_rx_mode(netdev); +	igc_restore_vlan(adapter); +  	igc_setup_tctl(adapter);  	igc_setup_mrqc(adapter);  	igc_setup_rctl(adapter); @@ -2925,7 +3331,10 @@ static void igc_configure(struct igc_adapter *adapter)  	for (i = 0; i < adapter->num_rx_queues; i++) {  		struct igc_ring *ring = adapter->rx_ring[i]; -		igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); +		if (ring->xsk_pool) +			igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring)); +		else +			igc_alloc_rx_buffers(ring, igc_desc_unused(ring));  	}  } @@ -3540,14 +3949,17 @@ static int igc_poll(struct napi_struct *napi, int budget)  	struct igc_q_vector *q_vector = container_of(napi,  						     struct igc_q_vector,  						     napi); +	struct igc_ring *rx_ring = q_vector->rx.ring;  	bool clean_complete = true;  	int work_done = 0;  	if (q_vector->tx.ring)  		clean_complete = igc_clean_tx_irq(q_vector, budget); -	if (q_vector->rx.ring) { -		int cleaned = igc_clean_rx_irq(q_vector, budget); +	if (rx_ring) { +		int cleaned = rx_ring->xsk_pool ? +			      igc_clean_rx_irq_zc(q_vector, budget) : +			      igc_clean_rx_irq(q_vector, budget);  		work_done += cleaned;  		if (cleaned >= budget) @@ -4199,6 +4611,9 @@ static int igc_set_features(struct net_device *netdev,  	netdev_features_t changed = netdev->features ^ features;  	struct igc_adapter *adapter = netdev_priv(netdev); +	if (changed & NETIF_F_HW_VLAN_CTAG_RX) +		igc_vlan_mode(netdev, features); +  	/* Add VLAN support */  	if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))  		return 0; @@ -5185,6 +5600,9 @@ static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf)  	switch (bpf->command) {  	case XDP_SETUP_PROG:  		return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack); +	case XDP_SETUP_XSK_POOL: +		return igc_xdp_setup_pool(adapter, bpf->xsk.pool, +					  bpf->xsk.queue_id);  	default:  		return -EOPNOTSUPP;  	} @@ -5230,6 +5648,43 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,  	return num_frames - drops;  } +static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter, +					struct igc_q_vector *q_vector) +{ +	struct igc_hw *hw = &adapter->hw; +	u32 eics = 0; + +	eics |= q_vector->eims_value; +	wr32(IGC_EICS, eics); +} + +int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) +{ +	struct igc_adapter *adapter = netdev_priv(dev); +	struct igc_q_vector *q_vector; +	struct igc_ring *ring; + +	if (test_bit(__IGC_DOWN, &adapter->state)) +		return -ENETDOWN; + +	if (!igc_xdp_is_enabled(adapter)) +		return -ENXIO; + +	if (queue_id >= adapter->num_rx_queues) +		return -EINVAL; + +	ring = adapter->rx_ring[queue_id]; + +	if (!ring->xsk_pool) +		return -ENXIO; + +	q_vector = adapter->q_vector[queue_id]; +	if (!napi_if_scheduled_mark_missed(&q_vector->napi)) +		igc_trigger_rxtxq_interrupt(adapter, q_vector); + +	return 0; +} +  static const struct net_device_ops igc_netdev_ops = {  	.ndo_open		= igc_open,  	.ndo_stop		= igc_close, @@ -5245,6 +5700,7 @@ static const struct net_device_ops igc_netdev_ops = {  	.ndo_setup_tc		= igc_setup_tc,  	.ndo_bpf		= igc_bpf,  	.ndo_xdp_xmit		= igc_xdp_xmit, +	.ndo_xsk_wakeup		= igc_xsk_wakeup,  };  /* PCIe configuration access */ @@ -5484,11 +5940,15 @@ static int igc_probe(struct pci_dev *pdev,  	/* copy netdev features into list of user selectable features */  	netdev->hw_features |= NETIF_F_NTUPLE; +	netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; +	netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;  	netdev->hw_features |= netdev->features;  	if (pci_using_dac)  		netdev->features |= NETIF_F_HIGHDMA; +	netdev->vlan_features |= netdev->features; +  	/* MTU range: 68 - 9216 */  	netdev->min_mtu = ETH_MIN_MTU;  	netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; @@ -5997,6 +6457,61 @@ struct net_device *igc_get_hw_dev(struct igc_hw *hw)  	return adapter->netdev;  } +static void igc_disable_rx_ring_hw(struct igc_ring *ring) +{ +	struct igc_hw *hw = &ring->q_vector->adapter->hw; +	u8 idx = ring->reg_idx; +	u32 rxdctl; + +	rxdctl = rd32(IGC_RXDCTL(idx)); +	rxdctl &= ~IGC_RXDCTL_QUEUE_ENABLE; +	rxdctl |= IGC_RXDCTL_SWFLUSH; +	wr32(IGC_RXDCTL(idx), rxdctl); +} + +void igc_disable_rx_ring(struct igc_ring *ring) +{ +	igc_disable_rx_ring_hw(ring); +	igc_clean_rx_ring(ring); +} + +void igc_enable_rx_ring(struct igc_ring *ring) +{ +	struct igc_adapter *adapter = ring->q_vector->adapter; + +	igc_configure_rx_ring(adapter, ring); + +	if (ring->xsk_pool) +		igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring)); +	else +		igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); +} + +static void igc_disable_tx_ring_hw(struct igc_ring *ring) +{ +	struct igc_hw *hw = &ring->q_vector->adapter->hw; +	u8 idx = ring->reg_idx; +	u32 txdctl; + +	txdctl = rd32(IGC_TXDCTL(idx)); +	txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE; +	txdctl |= IGC_TXDCTL_SWFLUSH; +	wr32(IGC_TXDCTL(idx), txdctl); +} + +void igc_disable_tx_ring(struct igc_ring *ring) +{ +	igc_disable_tx_ring_hw(ring); +	igc_clean_tx_ring(ring); +} + +void igc_enable_tx_ring(struct igc_ring *ring) +{ +	struct igc_adapter *adapter = ring->q_vector->adapter; + +	igc_configure_tx_ring(adapter, ring); +} +  /**   * igc_init_module - Driver Registration Routine   * diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h index cc174853554b..0f82990567d9 100644 --- a/drivers/net/ethernet/intel/igc/igc_regs.h +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -10,8 +10,8 @@  #define IGC_EECD		0x00010  /* EEPROM/Flash Control - RW */  #define IGC_CTRL_EXT		0x00018  /* Extended Device Control - RW */  #define IGC_MDIC		0x00020  /* MDI Control - RW */ -#define IGC_MDICNFG		0x00E04  /* MDC/MDIO Configuration - RW */  #define IGC_CONNSW		0x00034  /* Copper/Fiber switch control - RW */ +#define IGC_VET			0x00038  /* VLAN Ether Type - RW */  #define IGC_I225_PHPM		0x00E14  /* I225 PHY Power Management */  #define IGC_GPHY_VERSION	0x0001E  /* I225 gPHY Firmware Version */ diff --git a/drivers/net/ethernet/intel/igc/igc_xdp.c b/drivers/net/ethernet/intel/igc/igc_xdp.c index 11133c4619bb..a8cf5374be47 100644 --- a/drivers/net/ethernet/intel/igc/igc_xdp.c +++ b/drivers/net/ethernet/intel/igc/igc_xdp.c @@ -1,6 +1,8 @@  // SPDX-License-Identifier: GPL-2.0  /* Copyright (c) 2020, Intel Corporation. */ +#include <net/xdp_sock_drv.h> +  #include "igc.h"  #include "igc_xdp.h" @@ -32,29 +34,112 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,  	return 0;  } -int igc_xdp_register_rxq_info(struct igc_ring *ring) +static int igc_xdp_enable_pool(struct igc_adapter *adapter, +			       struct xsk_buff_pool *pool, u16 queue_id)  { -	struct net_device *dev = ring->netdev; +	struct net_device *ndev = adapter->netdev; +	struct device *dev = &adapter->pdev->dev; +	struct igc_ring *rx_ring, *tx_ring; +	struct napi_struct *napi; +	bool needs_reset; +	u32 frame_size;  	int err; -	err = xdp_rxq_info_reg(&ring->xdp_rxq, dev, ring->queue_index, 0); -	if (err) { -		netdev_err(dev, "Failed to register xdp rxq info\n"); -		return err; +	if (queue_id >= adapter->num_rx_queues || +	    queue_id >= adapter->num_tx_queues) +		return -EINVAL; + +	frame_size = xsk_pool_get_rx_frame_size(pool); +	if (frame_size < ETH_FRAME_LEN + VLAN_HLEN * 2) { +		/* When XDP is enabled, the driver doesn't support frames that +		 * span over multiple buffers. To avoid that, we check if xsk +		 * frame size is big enough to fit the max ethernet frame size +		 * + vlan double tagging. +		 */ +		return -EOPNOTSUPP;  	} -	err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_PAGE_SHARED, -					 NULL); +	err = xsk_pool_dma_map(pool, dev, IGC_RX_DMA_ATTR);  	if (err) { -		netdev_err(dev, "Failed to register xdp rxq mem model\n"); -		xdp_rxq_info_unreg(&ring->xdp_rxq); +		netdev_err(ndev, "Failed to map xsk pool\n");  		return err;  	} +	needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter); + +	rx_ring = adapter->rx_ring[queue_id]; +	tx_ring = adapter->tx_ring[queue_id]; +	/* Rx and Tx rings share the same napi context. */ +	napi = &rx_ring->q_vector->napi; + +	if (needs_reset) { +		igc_disable_rx_ring(rx_ring); +		igc_disable_tx_ring(tx_ring); +		napi_disable(napi); +	} + +	set_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags); +	set_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags); + +	if (needs_reset) { +		napi_enable(napi); +		igc_enable_rx_ring(rx_ring); +		igc_enable_tx_ring(tx_ring); + +		err = igc_xsk_wakeup(ndev, queue_id, XDP_WAKEUP_RX); +		if (err) { +			xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR); +			return err; +		} +	} + +	return 0; +} + +static int igc_xdp_disable_pool(struct igc_adapter *adapter, u16 queue_id) +{ +	struct igc_ring *rx_ring, *tx_ring; +	struct xsk_buff_pool *pool; +	struct napi_struct *napi; +	bool needs_reset; + +	if (queue_id >= adapter->num_rx_queues || +	    queue_id >= adapter->num_tx_queues) +		return -EINVAL; + +	pool = xsk_get_pool_from_qid(adapter->netdev, queue_id); +	if (!pool) +		return -EINVAL; + +	needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter); + +	rx_ring = adapter->rx_ring[queue_id]; +	tx_ring = adapter->tx_ring[queue_id]; +	/* Rx and Tx rings share the same napi context. */ +	napi = &rx_ring->q_vector->napi; + +	if (needs_reset) { +		igc_disable_rx_ring(rx_ring); +		igc_disable_tx_ring(tx_ring); +		napi_disable(napi); +	} + +	xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR); +	clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags); +	clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags); + +	if (needs_reset) { +		napi_enable(napi); +		igc_enable_rx_ring(rx_ring); +		igc_enable_tx_ring(tx_ring); +	} +  	return 0;  } -void igc_xdp_unregister_rxq_info(struct igc_ring *ring) +int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool, +		       u16 queue_id)  { -	xdp_rxq_info_unreg(&ring->xdp_rxq); +	return pool ? igc_xdp_enable_pool(adapter, pool, queue_id) : +		      igc_xdp_disable_pool(adapter, queue_id);  } diff --git a/drivers/net/ethernet/intel/igc/igc_xdp.h b/drivers/net/ethernet/intel/igc/igc_xdp.h index cfecb515b718..a74e5487d199 100644 --- a/drivers/net/ethernet/intel/igc/igc_xdp.h +++ b/drivers/net/ethernet/intel/igc/igc_xdp.h @@ -6,8 +6,12 @@  int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,  		     struct netlink_ext_ack *extack); +int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool, +		       u16 queue_id); -int igc_xdp_register_rxq_info(struct igc_ring *ring); -void igc_xdp_unregister_rxq_info(struct igc_ring *ring); +static inline bool igc_xdp_is_enabled(struct igc_adapter *adapter) +{ +	return !!adapter->xdp_prog; +}  #endif /* _IGC_XDP_H_ */ |