diff options
Diffstat (limited to 'drivers/net/ethernet/intel/igc')
| -rw-r--r-- | drivers/net/ethernet/intel/igc/Makefile | 2 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igc/igc.h | 31 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igc/igc_defines.h | 68 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igc/igc_ethtool.c | 2 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igc/igc_i225.c | 6 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igc/igc_main.c | 538 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igc/igc_ptp.c | 320 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igc/igc_regs.h | 10 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igc/igc_xdp.c | 60 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igc/igc_xdp.h | 13 | 
10 files changed, 950 insertions, 100 deletions
diff --git a/drivers/net/ethernet/intel/igc/Makefile b/drivers/net/ethernet/intel/igc/Makefile index 1c3051db9085..95d1e8c490a4 100644 --- a/drivers/net/ethernet/intel/igc/Makefile +++ b/drivers/net/ethernet/intel/igc/Makefile @@ -8,4 +8,4 @@  obj-$(CONFIG_IGC) += igc.o  igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \ -igc_diag.o igc_ethtool.o igc_ptp.o igc_dump.o igc_tsn.o +igc_diag.o igc_ethtool.o igc_ptp.o igc_dump.o igc_tsn.o igc_xdp.o diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 1b08a7dc7bc4..25871351730b 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -28,6 +28,11 @@ void igc_ethtool_set_ops(struct net_device *);  #define MAX_ETYPE_FILTER		8  #define IGC_RETA_SIZE			128 +/* SDP support */ +#define IGC_N_EXTTS	2 +#define IGC_N_PEROUT	2 +#define IGC_N_SDP	4 +  enum igc_mac_filter_type {  	IGC_MAC_FILTER_TYPE_DST = 0,  	IGC_MAC_FILTER_TYPE_SRC @@ -111,6 +116,8 @@ struct igc_ring {  			struct sk_buff *skb;  		};  	}; + +	struct xdp_rxq_info xdp_rxq;  } ____cacheline_internodealigned_in_smp;  /* Board specific private data structure */ @@ -219,6 +226,16 @@ struct igc_adapter {  	ktime_t ptp_reset_start; /* Reset time in clock mono */  	char fw_version[32]; + +	struct bpf_prog *xdp_prog; + +	bool pps_sys_wrap_on; + +	struct ptp_pin_desc sdp_config[IGC_N_SDP]; +	struct { +		struct timespec64 start; +		struct timespec64 period; +	} perout[IGC_N_PEROUT];  };  void igc_up(struct igc_adapter *adapter); @@ -373,6 +390,8 @@ enum igc_tx_flags {  	/* olinfo flags */  	IGC_TX_FLAGS_IPV4	= 0x10,  	IGC_TX_FLAGS_CSUM	= 0x20, + +	IGC_TX_FLAGS_XDP	= 0x100,  };  enum igc_boards { @@ -395,7 +414,10 @@ enum igc_boards {  struct igc_tx_buffer {  	union igc_adv_tx_desc *next_to_watch;  	unsigned long time_stamp; -	struct sk_buff *skb; +	union { +		struct sk_buff *skb; +		struct xdp_frame *xdpf; +	};  	unsigned int bytecount;  	u16 gso_segs;  	__be16 protocol; @@ -504,6 +526,10 @@ enum igc_ring_flags_t {  #define ring_uses_large_buffer(ring) \  	test_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) +#define set_ring_uses_large_buffer(ring) \ +	set_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) +#define clear_ring_uses_large_buffer(ring) \ +	clear_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)  #define ring_uses_build_skb(ring) \  	test_bit(IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) @@ -547,8 +573,7 @@ void igc_ptp_init(struct igc_adapter *adapter);  void igc_ptp_reset(struct igc_adapter *adapter);  void igc_ptp_suspend(struct igc_adapter *adapter);  void igc_ptp_stop(struct igc_adapter *adapter); -void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va, -			 struct sk_buff *skb); +ktime_t igc_ptp_rx_pktstamp(struct igc_adapter *adapter, __le32 *buf);  int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);  int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);  void igc_ptp_tx_hang(struct igc_adapter *adapter); diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index b909f00a79e6..0103dda32f39 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -8,6 +8,8 @@  #define REQ_TX_DESCRIPTOR_MULTIPLE	8  #define REQ_RX_DESCRIPTOR_MULTIPLE	8 +#define IGC_CTRL_EXT_SDP2_DIR	0x00000400 /* SDP2 Data direction */ +#define IGC_CTRL_EXT_SDP3_DIR	0x00000800 /* SDP3 Data direction */  #define IGC_CTRL_EXT_DRV_LOAD	0x10000000 /* Drv loaded bit for FW */  /* Definitions for power management and wakeup registers */ @@ -96,6 +98,9 @@  #define IGC_CTRL_RFCE		0x08000000  /* Receive Flow Control enable */  #define IGC_CTRL_TFCE		0x10000000  /* Transmit flow control enable */ +#define IGC_CTRL_SDP0_DIR 0x00400000	/* SDP0 Data direction */ +#define IGC_CTRL_SDP1_DIR 0x00800000	/* SDP1 Data direction */ +  /* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */  #define MAX_JUMBO_FRAME_SIZE	0x2600 @@ -403,6 +408,64 @@  #define IGC_TSYNCTXCTL_START_SYNC		0x80000000  /* initiate sync */  #define IGC_TSYNCTXCTL_TXSYNSIG			0x00000020  /* Sample TX tstamp in PHY sop */ +/* Timer selection bits */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM0	(0u << 30) /* Select SYSTIM0 for auxiliary time stamp */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM1	(1u << 30) /* Select SYSTIM1 for auxiliary time stamp */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM2	(2u << 30) /* Select SYSTIM2 for auxiliary time stamp */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM3	(3u << 30) /* Select SYSTIM3 for auxiliary time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM0	(0u << 30) /* Select SYSTIM0 for target time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM1	(1u << 30) /* Select SYSTIM1 for target time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM2	(2u << 30) /* Select SYSTIM2 for target time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM3	(3u << 30) /* Select SYSTIM3 for target time stamp */ + +/* TSAUXC Configuration Bits */ +#define IGC_TSAUXC_EN_TT0	BIT(0)  /* Enable target time 0. */ +#define IGC_TSAUXC_EN_TT1	BIT(1)  /* Enable target time 1. */ +#define IGC_TSAUXC_EN_CLK0	BIT(2)  /* Enable Configurable Frequency Clock 0. */ +#define IGC_TSAUXC_EN_CLK1	BIT(5)  /* Enable Configurable Frequency Clock 1. */ +#define IGC_TSAUXC_EN_TS0	BIT(8)  /* Enable hardware timestamp 0. */ +#define IGC_TSAUXC_AUTT0	BIT(9)  /* Auxiliary Timestamp Taken. */ +#define IGC_TSAUXC_EN_TS1	BIT(10) /* Enable hardware timestamp 0. */ +#define IGC_TSAUXC_AUTT1	BIT(11) /* Auxiliary Timestamp Taken. */ +#define IGC_TSAUXC_PLSG		BIT(17) /* Generate a pulse. */ +#define IGC_TSAUXC_DISABLE1	BIT(27) /* Disable SYSTIM0 Count Operation. */ +#define IGC_TSAUXC_DISABLE2	BIT(28) /* Disable SYSTIM1 Count Operation. */ +#define IGC_TSAUXC_DISABLE3	BIT(29) /* Disable SYSTIM2 Count Operation. */ +#define IGC_TSAUXC_DIS_TS_CLEAR	BIT(30) /* Disable EN_TT0/1 auto clear. */ +#define IGC_TSAUXC_DISABLE0	BIT(31) /* Disable SYSTIM0 Count Operation. */ + +/* SDP Configuration Bits */ +#define IGC_AUX0_SEL_SDP0	(0u << 0)  /* Assign SDP0 to auxiliary time stamp 0. */ +#define IGC_AUX0_SEL_SDP1	(1u << 0)  /* Assign SDP1 to auxiliary time stamp 0. */ +#define IGC_AUX0_SEL_SDP2	(2u << 0)  /* Assign SDP2 to auxiliary time stamp 0. */ +#define IGC_AUX0_SEL_SDP3	(3u << 0)  /* Assign SDP3 to auxiliary time stamp 0. */ +#define IGC_AUX0_TS_SDP_EN	(1u << 2)  /* Enable auxiliary time stamp trigger 0. */ +#define IGC_AUX1_SEL_SDP0	(0u << 3)  /* Assign SDP0 to auxiliary time stamp 1. */ +#define IGC_AUX1_SEL_SDP1	(1u << 3)  /* Assign SDP1 to auxiliary time stamp 1. */ +#define IGC_AUX1_SEL_SDP2	(2u << 3)  /* Assign SDP2 to auxiliary time stamp 1. */ +#define IGC_AUX1_SEL_SDP3	(3u << 3)  /* Assign SDP3 to auxiliary time stamp 1. */ +#define IGC_AUX1_TS_SDP_EN	(1u << 5)  /* Enable auxiliary time stamp trigger 1. */ +#define IGC_TS_SDP0_SEL_TT0	(0u << 6)  /* Target time 0 is output on SDP0. */ +#define IGC_TS_SDP0_SEL_TT1	(1u << 6)  /* Target time 1 is output on SDP0. */ +#define IGC_TS_SDP0_SEL_FC0	(2u << 6)  /* Freq clock  0 is output on SDP0. */ +#define IGC_TS_SDP0_SEL_FC1	(3u << 6)  /* Freq clock  1 is output on SDP0. */ +#define IGC_TS_SDP0_EN		(1u << 8)  /* SDP0 is assigned to Tsync. */ +#define IGC_TS_SDP1_SEL_TT0	(0u << 9)  /* Target time 0 is output on SDP1. */ +#define IGC_TS_SDP1_SEL_TT1	(1u << 9)  /* Target time 1 is output on SDP1. */ +#define IGC_TS_SDP1_SEL_FC0	(2u << 9)  /* Freq clock  0 is output on SDP1. */ +#define IGC_TS_SDP1_SEL_FC1	(3u << 9)  /* Freq clock  1 is output on SDP1. */ +#define IGC_TS_SDP1_EN		(1u << 11) /* SDP1 is assigned to Tsync. */ +#define IGC_TS_SDP2_SEL_TT0	(0u << 12) /* Target time 0 is output on SDP2. */ +#define IGC_TS_SDP2_SEL_TT1	(1u << 12) /* Target time 1 is output on SDP2. */ +#define IGC_TS_SDP2_SEL_FC0	(2u << 12) /* Freq clock  0 is output on SDP2. */ +#define IGC_TS_SDP2_SEL_FC1	(3u << 12) /* Freq clock  1 is output on SDP2. */ +#define IGC_TS_SDP2_EN		(1u << 14) /* SDP2 is assigned to Tsync. */ +#define IGC_TS_SDP3_SEL_TT0	(0u << 15) /* Target time 0 is output on SDP3. */ +#define IGC_TS_SDP3_SEL_TT1	(1u << 15) /* Target time 1 is output on SDP3. */ +#define IGC_TS_SDP3_SEL_FC0	(2u << 15) /* Freq clock  0 is output on SDP3. */ +#define IGC_TS_SDP3_SEL_FC1	(3u << 15) /* Freq clock  1 is output on SDP3. */ +#define IGC_TS_SDP3_EN		(1u << 17) /* SDP3 is assigned to Tsync. */ +  /* Transmit Scheduling */  #define IGC_TQAVCTRL_TRANSMIT_MODE_TSN	0x00000001  #define IGC_TQAVCTRL_ENHANCED_QAV	0x00000008 @@ -441,11 +504,6 @@  #define MII_CR_RESTART_AUTO_NEG	0x0200  /* Restart auto negotiation */  #define MII_CR_POWER_DOWN	0x0800  /* Power down */  #define MII_CR_AUTO_NEG_EN	0x1000  /* Auto Neg Enable */ -#define MII_CR_LOOPBACK		0x4000  /* 0 = normal, 1 = loopback */ -#define MII_CR_RESET		0x8000  /* 0 = normal, 1 = PHY reset */ -#define MII_CR_SPEED_1000	0x0040 -#define MII_CR_SPEED_100	0x2000 -#define MII_CR_SPEED_10		0x0000  /* PHY Status Register */  #define MII_SR_LINK_STATUS	0x0004 /* Link Status 1 = link */ diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index 8722294ab90c..9722449d7633 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -65,6 +65,8 @@ static const struct igc_stats igc_gstrings_stats[] = {  	IGC_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),  	IGC_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped),  	IGC_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), +	IGC_STAT("tx_lpi_counter", stats.tlpic), +	IGC_STAT("rx_lpi_counter", stats.rlpic),  };  #define IGC_NETDEV_STAT(_net_stat) { \ diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c index 7ec04e48860c..b2ef9fde97b3 100644 --- a/drivers/net/ethernet/intel/igc/igc_i225.c +++ b/drivers/net/ethernet/intel/igc/igc_i225.c @@ -6,7 +6,7 @@  #include "igc_hw.h"  /** - * igc_get_hw_semaphore_i225 - Acquire hardware semaphore + * igc_acquire_nvm_i225 - Acquire exclusive access to EEPROM   * @hw: pointer to the HW structure   *   * Acquire the necessary semaphores for exclusive access to the EEPROM. @@ -229,10 +229,11 @@ static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words,  	if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) ||  	    words == 0) {  		hw_dbg("nvm parameter(s) out of bounds\n"); -		goto out; +		return ret_val;  	}  	for (i = 0; i < words; i++) { +		ret_val = -IGC_ERR_NVM;  		eewr = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) |  			(data[i] << IGC_NVM_RW_REG_DATA) |  			IGC_NVM_RW_REG_START; @@ -254,7 +255,6 @@ static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words,  		}  	} -out:  	return ret_val;  } diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 4d989ebc9713..f1adf154ec4a 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -10,17 +10,24 @@  #include <linux/ip.h>  #include <linux/pm_runtime.h>  #include <net/pkt_sched.h> +#include <linux/bpf_trace.h>  #include <net/ipv6.h>  #include "igc.h"  #include "igc_hw.h"  #include "igc_tsn.h" +#include "igc_xdp.h"  #define DRV_SUMMARY	"Intel(R) 2.5G Ethernet Linux Driver"  #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) +#define IGC_XDP_PASS		0 +#define IGC_XDP_CONSUMED	BIT(0) +#define IGC_XDP_TX		BIT(1) +#define IGC_XDP_REDIRECT	BIT(2) +  static int debug = -1;  MODULE_AUTHOR("Intel Corporation, <[email protected]>"); @@ -176,8 +183,10 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)  	while (i != tx_ring->next_to_use) {  		union igc_adv_tx_desc *eop_desc, *tx_desc; -		/* Free all the Tx ring sk_buffs */ -		dev_kfree_skb_any(tx_buffer->skb); +		if (tx_buffer->tx_flags & IGC_TX_FLAGS_XDP) +			xdp_return_frame(tx_buffer->xdpf); +		else +			dev_kfree_skb_any(tx_buffer->skb);  		/* unmap skb header data */  		dma_unmap_single(tx_ring->dev, @@ -375,6 +384,8 @@ static void igc_clean_rx_ring(struct igc_ring *rx_ring)  			i = 0;  	} +	clear_ring_uses_large_buffer(rx_ring); +  	rx_ring->next_to_alloc = 0;  	rx_ring->next_to_clean = 0;  	rx_ring->next_to_use = 0; @@ -403,6 +414,8 @@ void igc_free_rx_resources(struct igc_ring *rx_ring)  {  	igc_clean_rx_ring(rx_ring); +	igc_xdp_unregister_rxq_info(rx_ring); +  	vfree(rx_ring->rx_buffer_info);  	rx_ring->rx_buffer_info = NULL; @@ -440,7 +453,11 @@ int igc_setup_rx_resources(struct igc_ring *rx_ring)  {  	struct net_device *ndev = rx_ring->netdev;  	struct device *dev = rx_ring->dev; -	int size, desc_len; +	int size, desc_len, res; + +	res = igc_xdp_register_rxq_info(rx_ring); +	if (res < 0) +		return res;  	size = sizeof(struct igc_rx_buffer) * rx_ring->count;  	rx_ring->rx_buffer_info = vzalloc(size); @@ -466,6 +483,7 @@ int igc_setup_rx_resources(struct igc_ring *rx_ring)  	return 0;  err: +	igc_xdp_unregister_rxq_info(rx_ring);  	vfree(rx_ring->rx_buffer_info);  	rx_ring->rx_buffer_info = NULL;  	netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n"); @@ -497,6 +515,11 @@ static int igc_setup_all_rx_resources(struct igc_adapter *adapter)  	return err;  } +static bool igc_xdp_is_enabled(struct igc_adapter *adapter) +{ +	return !!adapter->xdp_prog; +} +  /**   * igc_configure_rx_ring - Configure a receive ring after Reset   * @adapter: board private structure @@ -513,6 +536,9 @@ static void igc_configure_rx_ring(struct igc_adapter *adapter,  	u32 srrctl = 0, rxdctl = 0;  	u64 rdba = ring->dma; +	if (igc_xdp_is_enabled(adapter)) +		set_ring_uses_large_buffer(ring); +  	/* disable the queue */  	wr32(IGC_RXDCTL(reg_idx), 0); @@ -941,7 +967,7 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,  		struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);  		ktime_t txtime = first->skb->tstamp; -		first->skb->tstamp = ktime_set(0, 0); +		skb_txtime_consumed(first->skb);  		context_desc->launch_time = igc_tx_launchtime(adapter,  							      txtime);  	} else { @@ -1029,7 +1055,7 @@ static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)  	 ((u32)((_input) & (_flag)) * ((_result) / (_flag))) :	\  	 ((u32)((_input) & (_flag)) / ((_flag) / (_result)))) -static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) +static u32 igc_tx_cmd_type(u32 tx_flags)  {  	/* set type for advanced descriptor with frame checksum insertion */  	u32 cmd_type = IGC_ADVTXD_DTYP_DATA | @@ -1078,7 +1104,7 @@ static int igc_tx_map(struct igc_ring *tx_ring,  	u16 i = tx_ring->next_to_use;  	unsigned int data_len, size;  	dma_addr_t dma; -	u32 cmd_type = igc_tx_cmd_type(skb, tx_flags); +	u32 cmd_type = igc_tx_cmd_type(tx_flags);  	tx_desc = IGC_TX_DESC(tx_ring, i); @@ -1480,11 +1506,18 @@ static void igc_process_skb_fields(struct igc_ring *rx_ring,  }  static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring, -					       const unsigned int size) +					       const unsigned int size, +					       int *rx_buffer_pgcnt)  {  	struct igc_rx_buffer *rx_buffer;  	rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; +	*rx_buffer_pgcnt = +#if (PAGE_SIZE < 8192) +		page_count(rx_buffer->page); +#else +		0; +#endif  	prefetchw(rx_buffer->page);  	/* we are reusing so sync this buffer for CPU use */ @@ -1499,6 +1532,32 @@ static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring,  	return rx_buffer;  } +static void igc_rx_buffer_flip(struct igc_rx_buffer *buffer, +			       unsigned int truesize) +{ +#if (PAGE_SIZE < 8192) +	buffer->page_offset ^= truesize; +#else +	buffer->page_offset += truesize; +#endif +} + +static unsigned int igc_get_rx_frame_truesize(struct igc_ring *ring, +					      unsigned int size) +{ +	unsigned int truesize; + +#if (PAGE_SIZE < 8192) +	truesize = igc_rx_pg_size(ring) / 2; +#else +	truesize = ring_uses_build_skb(ring) ? +		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + +		   SKB_DATA_ALIGN(IGC_SKB_PAD + size) : +		   SKB_DATA_ALIGN(size); +#endif +	return truesize; +} +  /**   * igc_add_rx_frag - Add contents of Rx buffer to sk_buff   * @rx_ring: rx descriptor ring to transact packets on @@ -1513,20 +1572,19 @@ static void igc_add_rx_frag(struct igc_ring *rx_ring,  			    struct sk_buff *skb,  			    unsigned int size)  { -#if (PAGE_SIZE < 8192) -	unsigned int truesize = igc_rx_pg_size(rx_ring) / 2; +	unsigned int truesize; -	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, -			rx_buffer->page_offset, size, truesize); -	rx_buffer->page_offset ^= truesize; +#if (PAGE_SIZE < 8192) +	truesize = igc_rx_pg_size(rx_ring) / 2;  #else -	unsigned int truesize = ring_uses_build_skb(rx_ring) ? -				SKB_DATA_ALIGN(IGC_SKB_PAD + size) : -				SKB_DATA_ALIGN(size); +	truesize = ring_uses_build_skb(rx_ring) ? +		   SKB_DATA_ALIGN(IGC_SKB_PAD + size) : +		   SKB_DATA_ALIGN(size); +#endif  	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,  			rx_buffer->page_offset, size, truesize); -	rx_buffer->page_offset += truesize; -#endif + +	igc_rx_buffer_flip(rx_buffer, truesize);  }  static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring, @@ -1535,12 +1593,7 @@ static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,  				     unsigned int size)  {  	void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; -#if (PAGE_SIZE < 8192) -	unsigned int truesize = igc_rx_pg_size(rx_ring) / 2; -#else -	unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + -				SKB_DATA_ALIGN(IGC_SKB_PAD + size); -#endif +	unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);  	struct sk_buff *skb;  	/* prefetch first cache line of first page */ @@ -1555,27 +1608,18 @@ static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,  	skb_reserve(skb, IGC_SKB_PAD);  	__skb_put(skb, size); -	/* update buffer offset */ -#if (PAGE_SIZE < 8192) -	rx_buffer->page_offset ^= truesize; -#else -	rx_buffer->page_offset += truesize; -#endif - +	igc_rx_buffer_flip(rx_buffer, truesize);  	return skb;  }  static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,  					 struct igc_rx_buffer *rx_buffer, -					 union igc_adv_rx_desc *rx_desc, -					 unsigned int size) +					 struct xdp_buff *xdp, +					 ktime_t timestamp)  { -	void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; -#if (PAGE_SIZE < 8192) -	unsigned int truesize = igc_rx_pg_size(rx_ring) / 2; -#else -	unsigned int truesize = SKB_DATA_ALIGN(size); -#endif +	unsigned int size = xdp->data_end - xdp->data; +	unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); +	void *va = xdp->data;  	unsigned int headlen;  	struct sk_buff *skb; @@ -1587,11 +1631,8 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,  	if (unlikely(!skb))  		return NULL; -	if (unlikely(igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP))) { -		igc_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); -		va += IGC_TS_HDR_LEN; -		size -= IGC_TS_HDR_LEN; -	} +	if (timestamp) +		skb_hwtstamps(skb)->hwtstamp = timestamp;  	/* Determine available headroom for copy */  	headlen = size; @@ -1607,11 +1648,7 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,  		skb_add_rx_frag(skb, 0, rx_buffer->page,  				(va + headlen) - page_address(rx_buffer->page),  				size, truesize); -#if (PAGE_SIZE < 8192) -		rx_buffer->page_offset ^= truesize; -#else -		rx_buffer->page_offset += truesize; -#endif +		igc_rx_buffer_flip(rx_buffer, truesize);  	} else {  		rx_buffer->pagecnt_bias++;  	} @@ -1648,7 +1685,8 @@ static void igc_reuse_rx_page(struct igc_ring *rx_ring,  	new_buff->pagecnt_bias	= old_buff->pagecnt_bias;  } -static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer) +static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer, +				  int rx_buffer_pgcnt)  {  	unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;  	struct page *page = rx_buffer->page; @@ -1659,7 +1697,7 @@ static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer)  #if (PAGE_SIZE < 8192)  	/* if we are only owner of page we can reuse it */ -	if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) +	if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))  		return false;  #else  #define IGC_LAST_OFFSET \ @@ -1673,8 +1711,8 @@ static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer)  	 * the pagecnt_bias and page count so that we fully restock the  	 * number of references the driver holds.  	 */ -	if (unlikely(!pagecnt_bias)) { -		page_ref_add(page, USHRT_MAX); +	if (unlikely(pagecnt_bias == 1)) { +		page_ref_add(page, USHRT_MAX - 1);  		rx_buffer->pagecnt_bias = USHRT_MAX;  	} @@ -1726,6 +1764,10 @@ static bool igc_cleanup_headers(struct igc_ring *rx_ring,  				union igc_adv_rx_desc *rx_desc,  				struct sk_buff *skb)  { +	/* XDP packets use error pointer so abort at this point */ +	if (IS_ERR(skb)) +		return true; +  	if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) {  		struct net_device *netdev = rx_ring->netdev; @@ -1743,9 +1785,10 @@ static bool igc_cleanup_headers(struct igc_ring *rx_ring,  }  static void igc_put_rx_buffer(struct igc_ring *rx_ring, -			      struct igc_rx_buffer *rx_buffer) +			      struct igc_rx_buffer *rx_buffer, +			      int rx_buffer_pgcnt)  { -	if (igc_can_reuse_rx_page(rx_buffer)) { +	if (igc_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {  		/* hand second half of page back to the ring */  		igc_reuse_rx_page(rx_ring, rx_buffer);  	} else { @@ -1765,7 +1808,14 @@ static void igc_put_rx_buffer(struct igc_ring *rx_ring,  static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring)  { -	return ring_uses_build_skb(rx_ring) ? IGC_SKB_PAD : 0; +	struct igc_adapter *adapter = rx_ring->q_vector->adapter; + +	if (ring_uses_build_skb(rx_ring)) +		return IGC_SKB_PAD; +	if (igc_xdp_is_enabled(adapter)) +		return XDP_PACKET_HEADROOM; + +	return 0;  }  static bool igc_alloc_mapped_page(struct igc_ring *rx_ring, @@ -1804,7 +1854,8 @@ static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,  	bi->dma = dma;  	bi->page = page;  	bi->page_offset = igc_rx_offset(rx_ring); -	bi->pagecnt_bias = 1; +	page_ref_add(page, USHRT_MAX - 1); +	bi->pagecnt_bias = USHRT_MAX;  	return true;  } @@ -1879,17 +1930,195 @@ static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count)  	}  } +static int igc_xdp_init_tx_buffer(struct igc_tx_buffer *buffer, +				  struct xdp_frame *xdpf, +				  struct igc_ring *ring) +{ +	dma_addr_t dma; + +	dma = dma_map_single(ring->dev, xdpf->data, xdpf->len, DMA_TO_DEVICE); +	if (dma_mapping_error(ring->dev, dma)) { +		netdev_err_once(ring->netdev, "Failed to map DMA for TX\n"); +		return -ENOMEM; +	} + +	buffer->xdpf = xdpf; +	buffer->tx_flags = IGC_TX_FLAGS_XDP; +	buffer->protocol = 0; +	buffer->bytecount = xdpf->len; +	buffer->gso_segs = 1; +	buffer->time_stamp = jiffies; +	dma_unmap_len_set(buffer, len, xdpf->len); +	dma_unmap_addr_set(buffer, dma, dma); +	return 0; +} + +/* This function requires __netif_tx_lock is held by the caller. */ +static int igc_xdp_init_tx_descriptor(struct igc_ring *ring, +				      struct xdp_frame *xdpf) +{ +	struct igc_tx_buffer *buffer; +	union igc_adv_tx_desc *desc; +	u32 cmd_type, olinfo_status; +	int err; + +	if (!igc_desc_unused(ring)) +		return -EBUSY; + +	buffer = &ring->tx_buffer_info[ring->next_to_use]; +	err = igc_xdp_init_tx_buffer(buffer, xdpf, ring); +	if (err) +		return err; + +	cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | +		   IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | +		   buffer->bytecount; +	olinfo_status = buffer->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; + +	desc = IGC_TX_DESC(ring, ring->next_to_use); +	desc->read.cmd_type_len = cpu_to_le32(cmd_type); +	desc->read.olinfo_status = cpu_to_le32(olinfo_status); +	desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(buffer, dma)); + +	netdev_tx_sent_queue(txring_txq(ring), buffer->bytecount); + +	buffer->next_to_watch = desc; + +	ring->next_to_use++; +	if (ring->next_to_use == ring->count) +		ring->next_to_use = 0; + +	return 0; +} + +static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter, +					    int cpu) +{ +	int index = cpu; + +	if (unlikely(index < 0)) +		index = 0; + +	while (index >= adapter->num_tx_queues) +		index -= adapter->num_tx_queues; + +	return adapter->tx_ring[index]; +} + +static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp) +{ +	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); +	int cpu = smp_processor_id(); +	struct netdev_queue *nq; +	struct igc_ring *ring; +	int res; + +	if (unlikely(!xdpf)) +		return -EFAULT; + +	ring = igc_xdp_get_tx_ring(adapter, cpu); +	nq = txring_txq(ring); + +	__netif_tx_lock(nq, cpu); +	res = igc_xdp_init_tx_descriptor(ring, xdpf); +	__netif_tx_unlock(nq); +	return res; +} + +static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter, +					struct xdp_buff *xdp) +{ +	struct bpf_prog *prog; +	int res; +	u32 act; + +	rcu_read_lock(); + +	prog = READ_ONCE(adapter->xdp_prog); +	if (!prog) { +		res = IGC_XDP_PASS; +		goto unlock; +	} + +	act = bpf_prog_run_xdp(prog, xdp); +	switch (act) { +	case XDP_PASS: +		res = IGC_XDP_PASS; +		break; +	case XDP_TX: +		if (igc_xdp_xmit_back(adapter, xdp) < 0) +			goto out_failure; +		res = IGC_XDP_TX; +		break; +	case XDP_REDIRECT: +		if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0) +			goto out_failure; +		res = IGC_XDP_REDIRECT; +		break; +	default: +		bpf_warn_invalid_xdp_action(act); +		fallthrough; +	case XDP_ABORTED: +out_failure: +		trace_xdp_exception(adapter->netdev, prog, act); +		fallthrough; +	case XDP_DROP: +		res = IGC_XDP_CONSUMED; +		break; +	} + +unlock: +	rcu_read_unlock(); +	return ERR_PTR(-res); +} + +/* This function assumes __netif_tx_lock is held by the caller. */ +static void igc_flush_tx_descriptors(struct igc_ring *ring) +{ +	/* Once tail pointer is updated, hardware can fetch the descriptors +	 * any time so we issue a write membar here to ensure all memory +	 * writes are complete before the tail pointer is updated. +	 */ +	wmb(); +	writel(ring->next_to_use, ring->tail); +} + +static void igc_finalize_xdp(struct igc_adapter *adapter, int status) +{ +	int cpu = smp_processor_id(); +	struct netdev_queue *nq; +	struct igc_ring *ring; + +	if (status & IGC_XDP_TX) { +		ring = igc_xdp_get_tx_ring(adapter, cpu); +		nq = txring_txq(ring); + +		__netif_tx_lock(nq, cpu); +		igc_flush_tx_descriptors(ring); +		__netif_tx_unlock(nq); +	} + +	if (status & IGC_XDP_REDIRECT) +		xdp_do_flush(); +} +  static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)  {  	unsigned int total_bytes = 0, total_packets = 0; +	struct igc_adapter *adapter = q_vector->adapter;  	struct igc_ring *rx_ring = q_vector->rx.ring;  	struct sk_buff *skb = rx_ring->skb;  	u16 cleaned_count = igc_desc_unused(rx_ring); +	int xdp_status = 0, rx_buffer_pgcnt;  	while (likely(total_packets < budget)) {  		union igc_adv_rx_desc *rx_desc;  		struct igc_rx_buffer *rx_buffer; -		unsigned int size; +		unsigned int size, truesize; +		ktime_t timestamp = 0; +		struct xdp_buff xdp; +		int pkt_offset = 0; +		void *pktbuf;  		/* return some buffers to hardware, one at a time is too slow */  		if (cleaned_count >= IGC_RX_BUFFER_WRITE) { @@ -1908,16 +2137,52 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)  		 */  		dma_rmb(); -		rx_buffer = igc_get_rx_buffer(rx_ring, size); +		rx_buffer = igc_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt); +		truesize = igc_get_rx_frame_truesize(rx_ring, size); + +		pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset; + +		if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) { +			timestamp = igc_ptp_rx_pktstamp(q_vector->adapter, +							pktbuf); +			pkt_offset = IGC_TS_HDR_LEN; +			size -= IGC_TS_HDR_LEN; +		} + +		if (!skb) { +			xdp.data = pktbuf + pkt_offset; +			xdp.data_end = xdp.data + size; +			xdp.data_hard_start = pktbuf - igc_rx_offset(rx_ring); +			xdp_set_data_meta_invalid(&xdp); +			xdp.frame_sz = truesize; +			xdp.rxq = &rx_ring->xdp_rxq; + +			skb = igc_xdp_run_prog(adapter, &xdp); +		} + +		if (IS_ERR(skb)) { +			unsigned int xdp_res = -PTR_ERR(skb); + +			switch (xdp_res) { +			case IGC_XDP_CONSUMED: +				rx_buffer->pagecnt_bias++; +				break; +			case IGC_XDP_TX: +			case IGC_XDP_REDIRECT: +				igc_rx_buffer_flip(rx_buffer, truesize); +				xdp_status |= xdp_res; +				break; +			} -		/* retrieve a buffer from the ring */ -		if (skb) +			total_packets++; +			total_bytes += size; +		} else if (skb)  			igc_add_rx_frag(rx_ring, rx_buffer, skb, size);  		else if (ring_uses_build_skb(rx_ring))  			skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size);  		else -			skb = igc_construct_skb(rx_ring, rx_buffer, -						rx_desc, size); +			skb = igc_construct_skb(rx_ring, rx_buffer, &xdp, +						timestamp);  		/* exit if we failed to retrieve a buffer */  		if (!skb) { @@ -1926,7 +2191,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)  			break;  		} -		igc_put_rx_buffer(rx_ring, rx_buffer); +		igc_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt);  		cleaned_count++;  		/* fetch next buffer in frame if non-eop */ @@ -1954,6 +2219,9 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)  		total_packets++;  	} +	if (xdp_status) +		igc_finalize_xdp(adapter, xdp_status); +  	/* place incomplete frames back on ring for completion */  	rx_ring->skb = skb; @@ -2015,8 +2283,10 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)  		total_bytes += tx_buffer->bytecount;  		total_packets += tx_buffer->gso_segs; -		/* free the skb */ -		napi_consume_skb(tx_buffer->skb, napi_budget); +		if (tx_buffer->tx_flags & IGC_TX_FLAGS_XDP) +			xdp_return_frame(tx_buffer->xdpf); +		else +			napi_consume_skb(tx_buffer->skb, napi_budget);  		/* unmap skb header data */  		dma_unmap_single(tx_ring->dev, @@ -3580,7 +3850,7 @@ void igc_up(struct igc_adapter *adapter)  	netif_tx_start_all_queues(adapter->netdev);  	/* start the watchdog. */ -	hw->mac.get_link_status = 1; +	hw->mac.get_link_status = true;  	schedule_work(&adapter->watchdog_task);  } @@ -3858,6 +4128,11 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu)  	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;  	struct igc_adapter *adapter = netdev_priv(netdev); +	if (igc_xdp_is_enabled(adapter) && new_mtu > ETH_DATA_LEN) { +		netdev_dbg(netdev, "Jumbo frames not supported with XDP"); +		return -EINVAL; +	} +  	/* adjust max frame to be at least the size of a standard frame */  	if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))  		max_frame = ETH_FRAME_LEN + ETH_FCS_LEN; @@ -3974,9 +4249,20 @@ igc_features_check(struct sk_buff *skb, struct net_device *dev,  static void igc_tsync_interrupt(struct igc_adapter *adapter)  { +	u32 ack, tsauxc, sec, nsec, tsicr;  	struct igc_hw *hw = &adapter->hw; -	u32 tsicr = rd32(IGC_TSICR); -	u32 ack = 0; +	struct ptp_clock_event event; +	struct timespec64 ts; + +	tsicr = rd32(IGC_TSICR); +	ack = 0; + +	if (tsicr & IGC_TSICR_SYS_WRAP) { +		event.type = PTP_CLOCK_PPS; +		if (adapter->ptp_caps.pps) +			ptp_clock_event(adapter->ptp_clock, &event); +		ack |= IGC_TSICR_SYS_WRAP; +	}  	if (tsicr & IGC_TSICR_TXTS) {  		/* retrieve hardware timestamp */ @@ -3984,6 +4270,54 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter)  		ack |= IGC_TSICR_TXTS;  	} +	if (tsicr & IGC_TSICR_TT0) { +		spin_lock(&adapter->tmreg_lock); +		ts = timespec64_add(adapter->perout[0].start, +				    adapter->perout[0].period); +		wr32(IGC_TRGTTIML0, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); +		wr32(IGC_TRGTTIMH0, (u32)ts.tv_sec); +		tsauxc = rd32(IGC_TSAUXC); +		tsauxc |= IGC_TSAUXC_EN_TT0; +		wr32(IGC_TSAUXC, tsauxc); +		adapter->perout[0].start = ts; +		spin_unlock(&adapter->tmreg_lock); +		ack |= IGC_TSICR_TT0; +	} + +	if (tsicr & IGC_TSICR_TT1) { +		spin_lock(&adapter->tmreg_lock); +		ts = timespec64_add(adapter->perout[1].start, +				    adapter->perout[1].period); +		wr32(IGC_TRGTTIML1, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); +		wr32(IGC_TRGTTIMH1, (u32)ts.tv_sec); +		tsauxc = rd32(IGC_TSAUXC); +		tsauxc |= IGC_TSAUXC_EN_TT1; +		wr32(IGC_TSAUXC, tsauxc); +		adapter->perout[1].start = ts; +		spin_unlock(&adapter->tmreg_lock); +		ack |= IGC_TSICR_TT1; +	} + +	if (tsicr & IGC_TSICR_AUTT0) { +		nsec = rd32(IGC_AUXSTMPL0); +		sec  = rd32(IGC_AUXSTMPH0); +		event.type = PTP_CLOCK_EXTTS; +		event.index = 0; +		event.timestamp = sec * NSEC_PER_SEC + nsec; +		ptp_clock_event(adapter->ptp_clock, &event); +		ack |= IGC_TSICR_AUTT0; +	} + +	if (tsicr & IGC_TSICR_AUTT1) { +		nsec = rd32(IGC_AUXSTMPL1); +		sec  = rd32(IGC_AUXSTMPH1); +		event.type = PTP_CLOCK_EXTTS; +		event.index = 1; +		event.timestamp = sec * NSEC_PER_SEC + nsec; +		ptp_clock_event(adapter->ptp_clock, &event); +		ack |= IGC_TSICR_AUTT1; +	} +  	/* acknowledge the interrupts */  	wr32(IGC_TSICR, ack);  } @@ -4009,7 +4343,7 @@ static irqreturn_t igc_msix_other(int irq, void *data)  	}  	if (icr & IGC_ICR_LSC) { -		hw->mac.get_link_status = 1; +		hw->mac.get_link_status = true;  		/* guard against interrupt when we're going down */  		if (!test_bit(__IGC_DOWN, &adapter->state))  			mod_timer(&adapter->watchdog_timer, jiffies + 1); @@ -4387,7 +4721,7 @@ static irqreturn_t igc_intr_msi(int irq, void *data)  	}  	if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { -		hw->mac.get_link_status = 1; +		hw->mac.get_link_status = true;  		if (!test_bit(__IGC_DOWN, &adapter->state))  			mod_timer(&adapter->watchdog_timer, jiffies + 1);  	} @@ -4429,7 +4763,7 @@ static irqreturn_t igc_intr(int irq, void *data)  	}  	if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { -		hw->mac.get_link_status = 1; +		hw->mac.get_link_status = true;  		/* guard against interrupt when we're going down */  		if (!test_bit(__IGC_DOWN, &adapter->state))  			mod_timer(&adapter->watchdog_timer, jiffies + 1); @@ -4583,7 +4917,7 @@ static int __igc_open(struct net_device *netdev, bool resuming)  	netif_tx_start_all_queues(netdev);  	/* start the watchdog. */ -	hw->mac.get_link_status = 1; +	hw->mac.get_link_status = true;  	schedule_work(&adapter->watchdog_task);  	return IGC_SUCCESS; @@ -4844,6 +5178,58 @@ static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,  	}  } +static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf) +{ +	struct igc_adapter *adapter = netdev_priv(dev); + +	switch (bpf->command) { +	case XDP_SETUP_PROG: +		return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack); +	default: +		return -EOPNOTSUPP; +	} +} + +static int igc_xdp_xmit(struct net_device *dev, int num_frames, +			struct xdp_frame **frames, u32 flags) +{ +	struct igc_adapter *adapter = netdev_priv(dev); +	int cpu = smp_processor_id(); +	struct netdev_queue *nq; +	struct igc_ring *ring; +	int i, drops; + +	if (unlikely(test_bit(__IGC_DOWN, &adapter->state))) +		return -ENETDOWN; + +	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) +		return -EINVAL; + +	ring = igc_xdp_get_tx_ring(adapter, cpu); +	nq = txring_txq(ring); + +	__netif_tx_lock(nq, cpu); + +	drops = 0; +	for (i = 0; i < num_frames; i++) { +		int err; +		struct xdp_frame *xdpf = frames[i]; + +		err = igc_xdp_init_tx_descriptor(ring, xdpf); +		if (err) { +			xdp_return_frame_rx_napi(xdpf); +			drops++; +		} +	} + +	if (flags & XDP_XMIT_FLUSH) +		igc_flush_tx_descriptors(ring); + +	__netif_tx_unlock(nq); + +	return num_frames - drops; +} +  static const struct net_device_ops igc_netdev_ops = {  	.ndo_open		= igc_open,  	.ndo_stop		= igc_close, @@ -4857,6 +5243,8 @@ static const struct net_device_ops igc_netdev_ops = {  	.ndo_features_check	= igc_features_check,  	.ndo_do_ioctl		= igc_ioctl,  	.ndo_setup_tc		= igc_setup_tc, +	.ndo_bpf		= igc_bpf, +	.ndo_xdp_xmit		= igc_xdp_xmit,  };  /* PCIe configuration access */ @@ -4924,7 +5312,7 @@ int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx)  {  	struct igc_mac_info *mac = &adapter->hw.mac; -	mac->autoneg = 0; +	mac->autoneg = false;  	/* Make sure dplx is at most 1 bit and lsb of speed is not set  	 * for the switch() below to work @@ -4946,13 +5334,13 @@ int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx)  		mac->forced_speed_duplex = ADVERTISE_100_FULL;  		break;  	case SPEED_1000 + DUPLEX_FULL: -		mac->autoneg = 1; +		mac->autoneg = true;  		adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;  		break;  	case SPEED_1000 + DUPLEX_HALF: /* not supported */  		goto err_inval;  	case SPEED_2500 + DUPLEX_FULL: -		mac->autoneg = 1; +		mac->autoneg = true;  		adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL;  		break;  	case SPEED_2500 + DUPLEX_HALF: /* not supported */ diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index 545f4d0e67cf..69617d2c1be2 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -120,12 +120,289 @@ static int igc_ptp_settime_i225(struct ptp_clock_info *ptp,  	return 0;  } +static void igc_pin_direction(int pin, int input, u32 *ctrl, u32 *ctrl_ext) +{ +	u32 *ptr = pin < 2 ? ctrl : ctrl_ext; +	static const u32 mask[IGC_N_SDP] = { +		IGC_CTRL_SDP0_DIR, +		IGC_CTRL_SDP1_DIR, +		IGC_CTRL_EXT_SDP2_DIR, +		IGC_CTRL_EXT_SDP3_DIR, +	}; + +	if (input) +		*ptr &= ~mask[pin]; +	else +		*ptr |= mask[pin]; +} + +static void igc_pin_perout(struct igc_adapter *igc, int chan, int pin, int freq) +{ +	static const u32 igc_aux0_sel_sdp[IGC_N_SDP] = { +		IGC_AUX0_SEL_SDP0, IGC_AUX0_SEL_SDP1, IGC_AUX0_SEL_SDP2, IGC_AUX0_SEL_SDP3, +	}; +	static const u32 igc_aux1_sel_sdp[IGC_N_SDP] = { +		IGC_AUX1_SEL_SDP0, IGC_AUX1_SEL_SDP1, IGC_AUX1_SEL_SDP2, IGC_AUX1_SEL_SDP3, +	}; +	static const u32 igc_ts_sdp_en[IGC_N_SDP] = { +		IGC_TS_SDP0_EN, IGC_TS_SDP1_EN, IGC_TS_SDP2_EN, IGC_TS_SDP3_EN, +	}; +	static const u32 igc_ts_sdp_sel_tt0[IGC_N_SDP] = { +		IGC_TS_SDP0_SEL_TT0, IGC_TS_SDP1_SEL_TT0, +		IGC_TS_SDP2_SEL_TT0, IGC_TS_SDP3_SEL_TT0, +	}; +	static const u32 igc_ts_sdp_sel_tt1[IGC_N_SDP] = { +		IGC_TS_SDP0_SEL_TT1, IGC_TS_SDP1_SEL_TT1, +		IGC_TS_SDP2_SEL_TT1, IGC_TS_SDP3_SEL_TT1, +	}; +	static const u32 igc_ts_sdp_sel_fc0[IGC_N_SDP] = { +		IGC_TS_SDP0_SEL_FC0, IGC_TS_SDP1_SEL_FC0, +		IGC_TS_SDP2_SEL_FC0, IGC_TS_SDP3_SEL_FC0, +	}; +	static const u32 igc_ts_sdp_sel_fc1[IGC_N_SDP] = { +		IGC_TS_SDP0_SEL_FC1, IGC_TS_SDP1_SEL_FC1, +		IGC_TS_SDP2_SEL_FC1, IGC_TS_SDP3_SEL_FC1, +	}; +	static const u32 igc_ts_sdp_sel_clr[IGC_N_SDP] = { +		IGC_TS_SDP0_SEL_FC1, IGC_TS_SDP1_SEL_FC1, +		IGC_TS_SDP2_SEL_FC1, IGC_TS_SDP3_SEL_FC1, +	}; +	struct igc_hw *hw = &igc->hw; +	u32 ctrl, ctrl_ext, tssdp = 0; + +	ctrl = rd32(IGC_CTRL); +	ctrl_ext = rd32(IGC_CTRL_EXT); +	tssdp = rd32(IGC_TSSDP); + +	igc_pin_direction(pin, 0, &ctrl, &ctrl_ext); + +	/* Make sure this pin is not enabled as an input. */ +	if ((tssdp & IGC_AUX0_SEL_SDP3) == igc_aux0_sel_sdp[pin]) +		tssdp &= ~IGC_AUX0_TS_SDP_EN; + +	if ((tssdp & IGC_AUX1_SEL_SDP3) == igc_aux1_sel_sdp[pin]) +		tssdp &= ~IGC_AUX1_TS_SDP_EN; + +	tssdp &= ~igc_ts_sdp_sel_clr[pin]; +	if (freq) { +		if (chan == 1) +			tssdp |= igc_ts_sdp_sel_fc1[pin]; +		else +			tssdp |= igc_ts_sdp_sel_fc0[pin]; +	} else { +		if (chan == 1) +			tssdp |= igc_ts_sdp_sel_tt1[pin]; +		else +			tssdp |= igc_ts_sdp_sel_tt0[pin]; +	} +	tssdp |= igc_ts_sdp_en[pin]; + +	wr32(IGC_TSSDP, tssdp); +	wr32(IGC_CTRL, ctrl); +	wr32(IGC_CTRL_EXT, ctrl_ext); +} + +static void igc_pin_extts(struct igc_adapter *igc, int chan, int pin) +{ +	static const u32 igc_aux0_sel_sdp[IGC_N_SDP] = { +		IGC_AUX0_SEL_SDP0, IGC_AUX0_SEL_SDP1, IGC_AUX0_SEL_SDP2, IGC_AUX0_SEL_SDP3, +	}; +	static const u32 igc_aux1_sel_sdp[IGC_N_SDP] = { +		IGC_AUX1_SEL_SDP0, IGC_AUX1_SEL_SDP1, IGC_AUX1_SEL_SDP2, IGC_AUX1_SEL_SDP3, +	}; +	static const u32 igc_ts_sdp_en[IGC_N_SDP] = { +		IGC_TS_SDP0_EN, IGC_TS_SDP1_EN, IGC_TS_SDP2_EN, IGC_TS_SDP3_EN, +	}; +	struct igc_hw *hw = &igc->hw; +	u32 ctrl, ctrl_ext, tssdp = 0; + +	ctrl = rd32(IGC_CTRL); +	ctrl_ext = rd32(IGC_CTRL_EXT); +	tssdp = rd32(IGC_TSSDP); + +	igc_pin_direction(pin, 1, &ctrl, &ctrl_ext); + +	/* Make sure this pin is not enabled as an output. */ +	tssdp &= ~igc_ts_sdp_en[pin]; + +	if (chan == 1) { +		tssdp &= ~IGC_AUX1_SEL_SDP3; +		tssdp |= igc_aux1_sel_sdp[pin] | IGC_AUX1_TS_SDP_EN; +	} else { +		tssdp &= ~IGC_AUX0_SEL_SDP3; +		tssdp |= igc_aux0_sel_sdp[pin] | IGC_AUX0_TS_SDP_EN; +	} + +	wr32(IGC_TSSDP, tssdp); +	wr32(IGC_CTRL, ctrl); +	wr32(IGC_CTRL_EXT, ctrl_ext); +} +  static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,  				       struct ptp_clock_request *rq, int on)  { +	struct igc_adapter *igc = +		container_of(ptp, struct igc_adapter, ptp_caps); +	struct igc_hw *hw = &igc->hw; +	unsigned long flags; +	struct timespec64 ts; +	int use_freq = 0, pin = -1; +	u32 tsim, tsauxc, tsauxc_mask, tsim_mask, trgttiml, trgttimh, freqout; +	s64 ns; + +	switch (rq->type) { +	case PTP_CLK_REQ_EXTTS: +		/* Reject requests with unsupported flags */ +		if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | +					PTP_RISING_EDGE | +					PTP_FALLING_EDGE | +					PTP_STRICT_FLAGS)) +			return -EOPNOTSUPP; + +		/* Reject requests failing to enable both edges. */ +		if ((rq->extts.flags & PTP_STRICT_FLAGS) && +		    (rq->extts.flags & PTP_ENABLE_FEATURE) && +		    (rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES) +			return -EOPNOTSUPP; + +		if (on) { +			pin = ptp_find_pin(igc->ptp_clock, PTP_PF_EXTTS, +					   rq->extts.index); +			if (pin < 0) +				return -EBUSY; +		} +		if (rq->extts.index == 1) { +			tsauxc_mask = IGC_TSAUXC_EN_TS1; +			tsim_mask = IGC_TSICR_AUTT1; +		} else { +			tsauxc_mask = IGC_TSAUXC_EN_TS0; +			tsim_mask = IGC_TSICR_AUTT0; +		} +		spin_lock_irqsave(&igc->tmreg_lock, flags); +		tsauxc = rd32(IGC_TSAUXC); +		tsim = rd32(IGC_TSIM); +		if (on) { +			igc_pin_extts(igc, rq->extts.index, pin); +			tsauxc |= tsauxc_mask; +			tsim |= tsim_mask; +		} else { +			tsauxc &= ~tsauxc_mask; +			tsim &= ~tsim_mask; +		} +		wr32(IGC_TSAUXC, tsauxc); +		wr32(IGC_TSIM, tsim); +		spin_unlock_irqrestore(&igc->tmreg_lock, flags); +		return 0; + +	case PTP_CLK_REQ_PEROUT: +		/* Reject requests with unsupported flags */ +		if (rq->perout.flags) +			return -EOPNOTSUPP; + +		if (on) { +			pin = ptp_find_pin(igc->ptp_clock, PTP_PF_PEROUT, +					   rq->perout.index); +			if (pin < 0) +				return -EBUSY; +		} +		ts.tv_sec = rq->perout.period.sec; +		ts.tv_nsec = rq->perout.period.nsec; +		ns = timespec64_to_ns(&ts); +		ns = ns >> 1; +		if (on && (ns <= 70000000LL || ns == 125000000LL || +			   ns == 250000000LL || ns == 500000000LL)) { +			if (ns < 8LL) +				return -EINVAL; +			use_freq = 1; +		} +		ts = ns_to_timespec64(ns); +		if (rq->perout.index == 1) { +			if (use_freq) { +				tsauxc_mask = IGC_TSAUXC_EN_CLK1; +				tsim_mask = 0; +			} else { +				tsauxc_mask = IGC_TSAUXC_EN_TT1; +				tsim_mask = IGC_TSICR_TT1; +			} +			trgttiml = IGC_TRGTTIML1; +			trgttimh = IGC_TRGTTIMH1; +			freqout = IGC_FREQOUT1; +		} else { +			if (use_freq) { +				tsauxc_mask = IGC_TSAUXC_EN_CLK0; +				tsim_mask = 0; +			} else { +				tsauxc_mask = IGC_TSAUXC_EN_TT0; +				tsim_mask = IGC_TSICR_TT0; +			} +			trgttiml = IGC_TRGTTIML0; +			trgttimh = IGC_TRGTTIMH0; +			freqout = IGC_FREQOUT0; +		} +		spin_lock_irqsave(&igc->tmreg_lock, flags); +		tsauxc = rd32(IGC_TSAUXC); +		tsim = rd32(IGC_TSIM); +		if (rq->perout.index == 1) { +			tsauxc &= ~(IGC_TSAUXC_EN_TT1 | IGC_TSAUXC_EN_CLK1); +			tsim &= ~IGC_TSICR_TT1; +		} else { +			tsauxc &= ~(IGC_TSAUXC_EN_TT0 | IGC_TSAUXC_EN_CLK0); +			tsim &= ~IGC_TSICR_TT0; +		} +		if (on) { +			int i = rq->perout.index; + +			igc_pin_perout(igc, i, pin, use_freq); +			igc->perout[i].start.tv_sec = rq->perout.start.sec; +			igc->perout[i].start.tv_nsec = rq->perout.start.nsec; +			igc->perout[i].period.tv_sec = ts.tv_sec; +			igc->perout[i].period.tv_nsec = ts.tv_nsec; +			wr32(trgttimh, rq->perout.start.sec); +			/* For now, always select timer 0 as source. */ +			wr32(trgttiml, rq->perout.start.nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); +			if (use_freq) +				wr32(freqout, ns); +			tsauxc |= tsauxc_mask; +			tsim |= tsim_mask; +		} +		wr32(IGC_TSAUXC, tsauxc); +		wr32(IGC_TSIM, tsim); +		spin_unlock_irqrestore(&igc->tmreg_lock, flags); +		return 0; + +	case PTP_CLK_REQ_PPS: +		spin_lock_irqsave(&igc->tmreg_lock, flags); +		tsim = rd32(IGC_TSIM); +		if (on) +			tsim |= IGC_TSICR_SYS_WRAP; +		else +			tsim &= ~IGC_TSICR_SYS_WRAP; +		igc->pps_sys_wrap_on = on; +		wr32(IGC_TSIM, tsim); +		spin_unlock_irqrestore(&igc->tmreg_lock, flags); +		return 0; + +	default: +		break; +	} +  	return -EOPNOTSUPP;  } +static int igc_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin, +			      enum ptp_pin_function func, unsigned int chan) +{ +	switch (func) { +	case PTP_PF_NONE: +	case PTP_PF_EXTTS: +	case PTP_PF_PEROUT: +		break; +	case PTP_PF_PHYSYNC: +		return -1; +	} +	return 0; +} +  /**   * igc_ptp_systim_to_hwtstamp - convert system time value to HW timestamp   * @adapter: board private structure @@ -153,20 +430,20 @@ static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter,  /**   * igc_ptp_rx_pktstamp - Retrieve timestamp from Rx packet buffer - * @q_vector: Pointer to interrupt specific structure - * @va: Pointer to address containing Rx buffer - * @skb: Buffer containing timestamp and packet + * @adapter: Pointer to adapter the packet buffer belongs to + * @buf: Pointer to packet buffer   *   * This function retrieves the timestamp saved in the beginning of packet   * buffer. While two timestamps are available, one in timer0 reference and the   * other in timer1 reference, this function considers only the timestamp in   * timer0 reference. + * + * Returns timestamp value.   */ -void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va, -			 struct sk_buff *skb) +ktime_t igc_ptp_rx_pktstamp(struct igc_adapter *adapter, __le32 *buf)  { -	struct igc_adapter *adapter = q_vector->adapter; -	u64 regval; +	ktime_t timestamp; +	u32 secs, nsecs;  	int adjust;  	/* Timestamps are saved in little endian at the beginning of the packet @@ -178,9 +455,10 @@ void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va,  	 * SYSTIML holds the nanoseconds part while SYSTIMH holds the seconds  	 * part of the timestamp.  	 */ -	regval = le32_to_cpu(va[2]); -	regval |= (u64)le32_to_cpu(va[3]) << 32; -	igc_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); +	nsecs = le32_to_cpu(buf[2]); +	secs = le32_to_cpu(buf[3]); + +	timestamp = ktime_set(secs, nsecs);  	/* Adjust timestamp for the RX latency based on link speed */  	switch (adapter->link_speed) { @@ -201,8 +479,8 @@ void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va,  		netdev_warn_once(adapter->netdev, "Imprecise timestamp\n");  		break;  	} -	skb_hwtstamps(skb)->hwtstamp = -		ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust); + +	return ktime_sub_ns(timestamp, adjust);  }  static void igc_ptp_disable_rx_timestamp(struct igc_adapter *adapter) @@ -485,9 +763,17 @@ void igc_ptp_init(struct igc_adapter *adapter)  {  	struct net_device *netdev = adapter->netdev;  	struct igc_hw *hw = &adapter->hw; +	int i;  	switch (hw->mac.type) {  	case igc_i225: +		for (i = 0; i < IGC_N_SDP; i++) { +			struct ptp_pin_desc *ppd = &adapter->sdp_config[i]; + +			snprintf(ppd->name, sizeof(ppd->name), "SDP%d", i); +			ppd->index = i; +			ppd->func = PTP_PF_NONE; +		}  		snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);  		adapter->ptp_caps.owner = THIS_MODULE;  		adapter->ptp_caps.max_adj = 62499999; @@ -496,6 +782,12 @@ void igc_ptp_init(struct igc_adapter *adapter)  		adapter->ptp_caps.gettimex64 = igc_ptp_gettimex64_i225;  		adapter->ptp_caps.settime64 = igc_ptp_settime_i225;  		adapter->ptp_caps.enable = igc_ptp_feature_enable_i225; +		adapter->ptp_caps.pps = 1; +		adapter->ptp_caps.pin_config = adapter->sdp_config; +		adapter->ptp_caps.n_ext_ts = IGC_N_EXTTS; +		adapter->ptp_caps.n_per_out = IGC_N_PEROUT; +		adapter->ptp_caps.n_pins = IGC_N_SDP; +		adapter->ptp_caps.verify = igc_ptp_verify_pin;  		break;  	default:  		adapter->ptp_clock = NULL; @@ -597,7 +889,9 @@ void igc_ptp_reset(struct igc_adapter *adapter)  	case igc_i225:  		wr32(IGC_TSAUXC, 0x0);  		wr32(IGC_TSSDP, 0x0); -		wr32(IGC_TSIM, IGC_TSICR_INTERRUPTS); +		wr32(IGC_TSIM, +		     IGC_TSICR_INTERRUPTS | +		     (adapter->pps_sys_wrap_on ? IGC_TSICR_SYS_WRAP : 0));  		wr32(IGC_IMS, IGC_IMS_TS);  		break;  	default: diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h index 3e5cb7aef9da..cc174853554b 100644 --- a/drivers/net/ethernet/intel/igc/igc_regs.h +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -192,6 +192,16 @@  #define IGC_TSYNCTXCTL	0x0B614  /* Tx Time Sync Control register - RW */  #define IGC_TSYNCRXCFG	0x05F50  /* Time Sync Rx Configuration - RW */  #define IGC_TSSDP	0x0003C  /* Time Sync SDP Configuration Register - RW */ +#define IGC_TRGTTIML0	0x0B644 /* Target Time Register 0 Low  - RW */ +#define IGC_TRGTTIMH0	0x0B648 /* Target Time Register 0 High - RW */ +#define IGC_TRGTTIML1	0x0B64C /* Target Time Register 1 Low  - RW */ +#define IGC_TRGTTIMH1	0x0B650 /* Target Time Register 1 High - RW */ +#define IGC_FREQOUT0	0x0B654 /* Frequency Out 0 Control Register - RW */ +#define IGC_FREQOUT1	0x0B658 /* Frequency Out 1 Control Register - RW */ +#define IGC_AUXSTMPL0	0x0B65C /* Auxiliary Time Stamp 0 Register Low  - RO */ +#define IGC_AUXSTMPH0	0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */ +#define IGC_AUXSTMPL1	0x0B664 /* Auxiliary Time Stamp 1 Register Low  - RO */ +#define IGC_AUXSTMPH1	0x0B668 /* Auxiliary Time Stamp 1 Register High - RO */  #define IGC_IMIR(_i)	(0x05A80 + ((_i) * 4))  /* Immediate Interrupt */  #define IGC_IMIREXT(_i)	(0x05AA0 + ((_i) * 4))  /* Immediate INTR Ext*/ diff --git a/drivers/net/ethernet/intel/igc/igc_xdp.c b/drivers/net/ethernet/intel/igc/igc_xdp.c new file mode 100644 index 000000000000..11133c4619bb --- /dev/null +++ b/drivers/net/ethernet/intel/igc/igc_xdp.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020, Intel Corporation. */ + +#include "igc.h" +#include "igc_xdp.h" + +int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog, +		     struct netlink_ext_ack *extack) +{ +	struct net_device *dev = adapter->netdev; +	bool if_running = netif_running(dev); +	struct bpf_prog *old_prog; + +	if (dev->mtu > ETH_DATA_LEN) { +		/* For now, the driver doesn't support XDP functionality with +		 * jumbo frames so we return error. +		 */ +		NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported"); +		return -EOPNOTSUPP; +	} + +	if (if_running) +		igc_close(dev); + +	old_prog = xchg(&adapter->xdp_prog, prog); +	if (old_prog) +		bpf_prog_put(old_prog); + +	if (if_running) +		igc_open(dev); + +	return 0; +} + +int igc_xdp_register_rxq_info(struct igc_ring *ring) +{ +	struct net_device *dev = ring->netdev; +	int err; + +	err = xdp_rxq_info_reg(&ring->xdp_rxq, dev, ring->queue_index, 0); +	if (err) { +		netdev_err(dev, "Failed to register xdp rxq info\n"); +		return err; +	} + +	err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_PAGE_SHARED, +					 NULL); +	if (err) { +		netdev_err(dev, "Failed to register xdp rxq mem model\n"); +		xdp_rxq_info_unreg(&ring->xdp_rxq); +		return err; +	} + +	return 0; +} + +void igc_xdp_unregister_rxq_info(struct igc_ring *ring) +{ +	xdp_rxq_info_unreg(&ring->xdp_rxq); +} diff --git a/drivers/net/ethernet/intel/igc/igc_xdp.h b/drivers/net/ethernet/intel/igc/igc_xdp.h new file mode 100644 index 000000000000..cfecb515b718 --- /dev/null +++ b/drivers/net/ethernet/intel/igc/igc_xdp.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020, Intel Corporation. */ + +#ifndef _IGC_XDP_H_ +#define _IGC_XDP_H_ + +int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog, +		     struct netlink_ext_ack *extack); + +int igc_xdp_register_rxq_info(struct igc_ring *ring); +void igc_xdp_unregister_rxq_info(struct igc_ring *ring); + +#endif /* _IGC_XDP_H_ */  |