aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/igc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/igc')
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h86
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c10
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h100
-rw-r--r--drivers/net/ethernet/intel/igc/igc_dump.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c90
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c1233
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c182
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h45
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.c176
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_xdp.c109
-rw-r--r--drivers/net/ethernet/intel/igc/igc_xdp.h8
14 files changed, 1767 insertions, 283 deletions
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 25871351730b..3e386c38d016 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -33,6 +33,8 @@ void igc_ethtool_set_ops(struct net_device *);
#define IGC_N_PEROUT 2
#define IGC_N_SDP 4
+#define MAX_FLEX_FILTER 32
+
enum igc_mac_filter_type {
IGC_MAC_FILTER_TYPE_DST = 0,
IGC_MAC_FILTER_TYPE_SRC
@@ -96,6 +98,13 @@ struct igc_ring {
u32 start_time;
u32 end_time;
+ /* CBS parameters */
+ bool cbs_enable; /* indicates if CBS is enabled */
+ s32 idleslope; /* idleSlope in kbps */
+ s32 sendslope; /* sendSlope in kbps */
+ s32 hicredit; /* hiCredit in bytes */
+ s32 locredit; /* loCredit in bytes */
+
/* everything past this point are written often */
u16 next_to_clean;
u16 next_to_use;
@@ -118,6 +127,7 @@ struct igc_ring {
};
struct xdp_rxq_info xdp_rxq;
+ struct xsk_buff_pool *xsk_pool;
} ____cacheline_internodealigned_in_smp;
/* Board specific private data structure */
@@ -224,6 +234,7 @@ struct igc_adapter {
struct timecounter tc;
struct timespec64 prev_ptp_time; /* Pre-reset PTP clock */
ktime_t ptp_reset_start; /* Reset time in clock mono */
+ struct system_time_snapshot snapshot;
char fw_version[32];
@@ -255,6 +266,11 @@ bool igc_has_link(struct igc_adapter *adapter);
void igc_reset(struct igc_adapter *adapter);
int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx);
void igc_update_stats(struct igc_adapter *adapter);
+void igc_disable_rx_ring(struct igc_ring *ring);
+void igc_enable_rx_ring(struct igc_ring *ring);
+void igc_disable_tx_ring(struct igc_ring *ring);
+void igc_enable_tx_ring(struct igc_ring *ring);
+int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
/* igc_dump declarations */
void igc_rings_dump(struct igc_adapter *adapter);
@@ -281,6 +297,10 @@ extern char igc_driver_name[];
#define IGC_FLAG_VLAN_PROMISC BIT(15)
#define IGC_FLAG_RX_LEGACY BIT(16)
#define IGC_FLAG_TSN_QBV_ENABLED BIT(17)
+#define IGC_FLAG_TSN_QAV_ENABLED BIT(18)
+
+#define IGC_FLAG_TSN_ANY_ENABLED \
+ (IGC_FLAG_TSN_QBV_ENABLED | IGC_FLAG_TSN_QAV_ENABLED)
#define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6)
#define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7)
@@ -366,6 +386,7 @@ extern char igc_driver_name[];
/* VLAN info */
#define IGC_TX_FLAGS_VLAN_MASK 0xffff0000
+#define IGC_TX_FLAGS_VLAN_SHIFT 16
/* igc_test_staterr - tests bits within Rx descriptor status and error fields */
static inline __le32 igc_test_staterr(union igc_adv_rx_desc *rx_desc,
@@ -390,8 +411,6 @@ enum igc_tx_flags {
/* olinfo flags */
IGC_TX_FLAGS_IPV4 = 0x10,
IGC_TX_FLAGS_CSUM = 0x20,
-
- IGC_TX_FLAGS_XDP = 0x100,
};
enum igc_boards {
@@ -408,12 +427,19 @@ enum igc_boards {
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGC_MAX_DATA_PER_TXD)
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+enum igc_tx_buffer_type {
+ IGC_TX_BUFFER_TYPE_SKB,
+ IGC_TX_BUFFER_TYPE_XDP,
+ IGC_TX_BUFFER_TYPE_XSK,
+};
+
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer
*/
struct igc_tx_buffer {
union igc_adv_tx_desc *next_to_watch;
unsigned long time_stamp;
+ enum igc_tx_buffer_type type;
union {
struct sk_buff *skb;
struct xdp_frame *xdpf;
@@ -428,14 +454,19 @@ struct igc_tx_buffer {
};
struct igc_rx_buffer {
- dma_addr_t dma;
- struct page *page;
+ union {
+ struct {
+ dma_addr_t dma;
+ struct page *page;
#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
- __u32 page_offset;
+ __u32 page_offset;
#else
- __u16 page_offset;
+ __u16 page_offset;
#endif
- __u16 pagecnt_bias;
+ __u16 pagecnt_bias;
+ };
+ struct xdp_buff *xdp;
+ };
};
struct igc_q_vector {
@@ -459,18 +490,28 @@ struct igc_q_vector {
};
enum igc_filter_match_flags {
- IGC_FILTER_FLAG_ETHER_TYPE = 0x1,
- IGC_FILTER_FLAG_VLAN_TCI = 0x2,
- IGC_FILTER_FLAG_SRC_MAC_ADDR = 0x4,
- IGC_FILTER_FLAG_DST_MAC_ADDR = 0x8,
+ IGC_FILTER_FLAG_ETHER_TYPE = BIT(0),
+ IGC_FILTER_FLAG_VLAN_TCI = BIT(1),
+ IGC_FILTER_FLAG_SRC_MAC_ADDR = BIT(2),
+ IGC_FILTER_FLAG_DST_MAC_ADDR = BIT(3),
+ IGC_FILTER_FLAG_USER_DATA = BIT(4),
+ IGC_FILTER_FLAG_VLAN_ETYPE = BIT(5),
};
struct igc_nfc_filter {
u8 match_flags;
u16 etype;
+ __be16 vlan_etype;
u16 vlan_tci;
u8 src_addr[ETH_ALEN];
u8 dst_addr[ETH_ALEN];
+ u8 user_data[8];
+ u8 user_mask[8];
+ u8 flex_index;
+ u8 rx_queue;
+ u8 prio;
+ u8 immediate_irq;
+ u8 drop;
};
struct igc_nfc_rule {
@@ -478,12 +519,24 @@ struct igc_nfc_rule {
struct igc_nfc_filter filter;
u32 location;
u16 action;
+ bool flex;
};
-/* IGC supports a total of 32 NFC rules: 16 MAC address based,, 8 VLAN priority
- * based, and 8 ethertype based.
+/* IGC supports a total of 32 NFC rules: 16 MAC address based, 8 VLAN priority
+ * based, 8 ethertype based and 32 Flex filter based rules.
*/
-#define IGC_MAX_RXNFC_RULES 32
+#define IGC_MAX_RXNFC_RULES 64
+
+struct igc_flex_filter {
+ u8 index;
+ u8 data[128];
+ u8 mask[16];
+ u8 length;
+ u8 rx_queue;
+ u8 prio;
+ u8 immediate_irq;
+ u8 drop;
+};
/* igc_desc_unused - calculate if we have unused descriptors */
static inline u16 igc_desc_unused(const struct igc_ring *ring)
@@ -521,7 +574,8 @@ enum igc_ring_flags_t {
IGC_RING_FLAG_RX_SCTP_CSUM,
IGC_RING_FLAG_RX_LB_VLAN_BSWAP,
IGC_RING_FLAG_TX_CTX_IDX,
- IGC_RING_FLAG_TX_DETECT_HANG
+ IGC_RING_FLAG_TX_DETECT_HANG,
+ IGC_RING_FLAG_AF_XDP_ZC,
};
#define ring_uses_large_buffer(ring) \
@@ -560,7 +614,7 @@ static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data)
if (hw->phy.ops.read_reg)
return hw->phy.ops.read_reg(hw, offset, data);
- return 0;
+ return -EOPNOTSUPP;
}
void igc_reinit_locked(struct igc_adapter *);
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index d0700d48ecf9..84f142f5e472 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -187,15 +187,7 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw)
igc_check_for_copper_link(hw);
- /* Verify phy id and set remaining function pointers */
- switch (phy->id) {
- case I225_I_PHY_ID:
- phy->type = igc_phy_i225;
- break;
- default:
- ret_val = -IGC_ERR_PHY;
- goto out;
- }
+ phy->type = igc_phy_i225;
out:
return ret_val;
diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h
index ea627ce52525..ce530f5fd7bd 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.h
+++ b/drivers/net/ethernet/intel/igc/igc_base.h
@@ -78,9 +78,11 @@ union igc_adv_rx_desc {
/* Additional Transmit Descriptor Control definitions */
#define IGC_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */
+#define IGC_TXDCTL_SWFLUSH 0x04000000 /* Transmit Software Flush */
/* Additional Receive Descriptor Control definitions */
#define IGC_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */
+#define IGC_RXDCTL_SWFLUSH 0x04000000 /* Receive Software Flush */
/* SRRCTL bit definitions */
#define IGC_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 0103dda32f39..a4bbee748798 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -17,11 +17,22 @@
#define IGC_WUC_PME_EN 0x00000002 /* PME Enable */
/* Wake Up Filter Control */
-#define IGC_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
-#define IGC_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
-#define IGC_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
-#define IGC_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
-#define IGC_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+#define IGC_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define IGC_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+#define IGC_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
+#define IGC_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
+#define IGC_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+#define IGC_WUFC_FLEX_HQ BIT(14) /* Flex Filters Host Queuing */
+#define IGC_WUFC_FLX0 BIT(16) /* Flexible Filter 0 Enable */
+#define IGC_WUFC_FLX1 BIT(17) /* Flexible Filter 1 Enable */
+#define IGC_WUFC_FLX2 BIT(18) /* Flexible Filter 2 Enable */
+#define IGC_WUFC_FLX3 BIT(19) /* Flexible Filter 3 Enable */
+#define IGC_WUFC_FLX4 BIT(20) /* Flexible Filter 4 Enable */
+#define IGC_WUFC_FLX5 BIT(21) /* Flexible Filter 5 Enable */
+#define IGC_WUFC_FLX6 BIT(22) /* Flexible Filter 6 Enable */
+#define IGC_WUFC_FLX7 BIT(23) /* Flexible Filter 7 Enable */
+
+#define IGC_WUFC_FILTER_MASK GENMASK(23, 14)
#define IGC_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
@@ -46,6 +57,37 @@
/* Wake Up Packet Memory stores the first 128 bytes of the wake up packet */
#define IGC_WUPM_BYTES 128
+/* Wakeup Filter Control Extended */
+#define IGC_WUFC_EXT_FLX8 BIT(8) /* Flexible Filter 8 Enable */
+#define IGC_WUFC_EXT_FLX9 BIT(9) /* Flexible Filter 9 Enable */
+#define IGC_WUFC_EXT_FLX10 BIT(10) /* Flexible Filter 10 Enable */
+#define IGC_WUFC_EXT_FLX11 BIT(11) /* Flexible Filter 11 Enable */
+#define IGC_WUFC_EXT_FLX12 BIT(12) /* Flexible Filter 12 Enable */
+#define IGC_WUFC_EXT_FLX13 BIT(13) /* Flexible Filter 13 Enable */
+#define IGC_WUFC_EXT_FLX14 BIT(14) /* Flexible Filter 14 Enable */
+#define IGC_WUFC_EXT_FLX15 BIT(15) /* Flexible Filter 15 Enable */
+#define IGC_WUFC_EXT_FLX16 BIT(16) /* Flexible Filter 16 Enable */
+#define IGC_WUFC_EXT_FLX17 BIT(17) /* Flexible Filter 17 Enable */
+#define IGC_WUFC_EXT_FLX18 BIT(18) /* Flexible Filter 18 Enable */
+#define IGC_WUFC_EXT_FLX19 BIT(19) /* Flexible Filter 19 Enable */
+#define IGC_WUFC_EXT_FLX20 BIT(20) /* Flexible Filter 20 Enable */
+#define IGC_WUFC_EXT_FLX21 BIT(21) /* Flexible Filter 21 Enable */
+#define IGC_WUFC_EXT_FLX22 BIT(22) /* Flexible Filter 22 Enable */
+#define IGC_WUFC_EXT_FLX23 BIT(23) /* Flexible Filter 23 Enable */
+#define IGC_WUFC_EXT_FLX24 BIT(24) /* Flexible Filter 24 Enable */
+#define IGC_WUFC_EXT_FLX25 BIT(25) /* Flexible Filter 25 Enable */
+#define IGC_WUFC_EXT_FLX26 BIT(26) /* Flexible Filter 26 Enable */
+#define IGC_WUFC_EXT_FLX27 BIT(27) /* Flexible Filter 27 Enable */
+#define IGC_WUFC_EXT_FLX28 BIT(28) /* Flexible Filter 28 Enable */
+#define IGC_WUFC_EXT_FLX29 BIT(29) /* Flexible Filter 29 Enable */
+#define IGC_WUFC_EXT_FLX30 BIT(30) /* Flexible Filter 30 Enable */
+#define IGC_WUFC_EXT_FLX31 BIT(31) /* Flexible Filter 31 Enable */
+
+#define IGC_WUFC_EXT_FILTER_MASK GENMASK(31, 8)
+
+/* Physical Func Reset Done Indication */
+#define IGC_CTRL_EXT_LINK_MODE_MASK 0x00C00000
+
/* Loop limit on how long we wait for auto-negotiation to complete */
#define COPPER_LINK_UP_LIMIT 10
#define PHY_AUTO_NEG_LIMIT 45
@@ -94,12 +136,13 @@
#define IGC_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
#define IGC_CTRL_FRCSPD 0x00000800 /* Force Speed */
#define IGC_CTRL_FRCDPX 0x00001000 /* Force Duplex */
+#define IGC_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
#define IGC_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
#define IGC_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
-#define IGC_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */
-#define IGC_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */
+#define IGC_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */
+#define IGC_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */
/* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */
#define MAX_JUMBO_FRAME_SIZE 0x2600
@@ -128,7 +171,6 @@
#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */
/* 1000BASE-T Control Register */
-#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */
#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
@@ -323,6 +365,9 @@
#define IGC_RXD_STAT_IXSM 0x04 /* Ignore checksum */
#define IGC_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
#define IGC_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
+#define IGC_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
+
+#define IGC_RXDEXT_STATERR_LB 0x00040000
/* Advanced Receive Descriptor bit definitions */
#define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
@@ -473,11 +518,50 @@
#define IGC_TXQCTL_QUEUE_MODE_LAUNCHT 0x00000001
#define IGC_TXQCTL_STRICT_CYCLE 0x00000002
#define IGC_TXQCTL_STRICT_END 0x00000004
+#define IGC_TXQCTL_QAV_SEL_MASK 0x000000C0
+#define IGC_TXQCTL_QAV_SEL_CBS0 0x00000080
+#define IGC_TXQCTL_QAV_SEL_CBS1 0x000000C0
+
+#define IGC_TQAVCC_IDLESLOPE_MASK 0xFFFF
+#define IGC_TQAVCC_KEEP_CREDITS BIT(30)
+
+#define IGC_MAX_SR_QUEUES 2
/* Receive Checksum Control */
#define IGC_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */
#define IGC_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
+/* PCIe PTM Control */
+#define IGC_PTM_CTRL_START_NOW BIT(29) /* Start PTM Now */
+#define IGC_PTM_CTRL_EN BIT(30) /* Enable PTM */
+#define IGC_PTM_CTRL_TRIG BIT(31) /* PTM Cycle trigger */
+#define IGC_PTM_CTRL_SHRT_CYC(usec) (((usec) & 0x2f) << 2)
+#define IGC_PTM_CTRL_PTM_TO(usec) (((usec) & 0xff) << 8)
+
+#define IGC_PTM_SHORT_CYC_DEFAULT 10 /* Default Short/interrupted cycle interval */
+#define IGC_PTM_CYC_TIME_DEFAULT 5 /* Default PTM cycle time */
+#define IGC_PTM_TIMEOUT_DEFAULT 255 /* Default timeout for PTM errors */
+
+/* PCIe Digital Delay */
+#define IGC_PCIE_DIG_DELAY_DEFAULT 0x01440000
+
+/* PCIe PHY Delay */
+#define IGC_PCIE_PHY_DELAY_DEFAULT 0x40900000
+
+#define IGC_TIMADJ_ADJUST_METH 0x40000000
+
+/* PCIe PTM Status */
+#define IGC_PTM_STAT_VALID BIT(0) /* PTM Status */
+#define IGC_PTM_STAT_RET_ERR BIT(1) /* Root port timeout */
+#define IGC_PTM_STAT_BAD_PTM_RES BIT(2) /* PTM Response msg instead of PTM Response Data */
+#define IGC_PTM_STAT_T4M1_OVFL BIT(3) /* T4 minus T1 overflow */
+#define IGC_PTM_STAT_ADJUST_1ST BIT(4) /* 1588 timer adjusted during 1st PTM cycle */
+#define IGC_PTM_STAT_ADJUST_CYC BIT(5) /* 1588 timer adjusted during non-1st PTM cycle */
+
+/* PCIe PTM Cycle Control */
+#define IGC_PTM_CYCLE_CTRL_CYC_TIME(msec) ((msec) & 0x3ff) /* PTM Cycle Time (msec) */
+#define IGC_PTM_CYCLE_CTRL_AUTO_CYC_EN BIT(31) /* PTM Cycle Control */
+
/* GPY211 - I225 defines */
#define GPY_MMD_MASK 0xFFFF0000
#define GPY_MMD_SHIFT 16
diff --git a/drivers/net/ethernet/intel/igc/igc_dump.c b/drivers/net/ethernet/intel/igc/igc_dump.c
index 495bed47ed0a..c09c95cc5f70 100644
--- a/drivers/net/ethernet/intel/igc/igc_dump.c
+++ b/drivers/net/ethernet/intel/igc/igc_dump.c
@@ -112,7 +112,7 @@ static void igc_regdump(struct igc_hw *hw, struct igc_reg_info *reginfo)
void igc_rings_dump(struct igc_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- struct my_u0 { u64 a; u64 b; } *u0;
+ struct my_u0 { __le64 a; __le64 b; } *u0;
union igc_adv_tx_desc *tx_desc;
union igc_adv_rx_desc *rx_desc;
struct igc_ring *tx_ring;
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 9722449d7633..e0a76ac1bbbc 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -554,7 +554,7 @@ static int igc_ethtool_set_eeprom(struct net_device *netdev,
memcpy(ptr, bytes, eeprom->len);
for (i = 0; i < last_word - first_word + 1; i++)
- eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
+ cpu_to_le16s(&eeprom_buff[i]);
ret_val = hw->nvm.ops.write(hw, first_word,
last_word - first_word + 1, eeprom_buff);
@@ -765,35 +765,22 @@ static void igc_ethtool_get_strings(struct net_device *netdev, u32 stringset,
IGC_TEST_LEN * ETH_GSTRING_LEN);
break;
case ETH_SS_STATS:
- for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) {
- memcpy(p, igc_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < IGC_NETDEV_STATS_LEN; i++) {
- memcpy(p, igc_gstrings_net_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++)
+ ethtool_sprintf(&p, igc_gstrings_stats[i].stat_string);
+ for (i = 0; i < IGC_NETDEV_STATS_LEN; i++)
+ ethtool_sprintf(&p,
+ igc_gstrings_net_stats[i].stat_string);
for (i = 0; i < adapter->num_tx_queues; i++) {
- sprintf(p, "tx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_queue_%u_restart", i);
- p += ETH_GSTRING_LEN;
+ ethtool_sprintf(&p, "tx_queue_%u_packets", i);
+ ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
+ ethtool_sprintf(&p, "tx_queue_%u_restart", i);
}
for (i = 0; i < adapter->num_rx_queues; i++) {
- sprintf(p, "rx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_drops", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_csum_err", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_alloc_failed", i);
- p += ETH_GSTRING_LEN;
+ ethtool_sprintf(&p, "rx_queue_%u_packets", i);
+ ethtool_sprintf(&p, "rx_queue_%u_bytes", i);
+ ethtool_sprintf(&p, "rx_queue_%u_drops", i);
+ ethtool_sprintf(&p, "rx_queue_%u_csum_err", i);
+ ethtool_sprintf(&p, "rx_queue_%u_alloc_failed", i);
}
/* BUG_ON(p - data != IGC_STATS_LEN * ETH_GSTRING_LEN); */
break;
@@ -875,7 +862,9 @@ static void igc_ethtool_get_stats(struct net_device *netdev,
}
static int igc_ethtool_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
{
struct igc_adapter *adapter = netdev_priv(netdev);
@@ -895,7 +884,9 @@ static int igc_ethtool_get_coalesce(struct net_device *netdev,
}
static int igc_ethtool_set_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
{
struct igc_adapter *adapter = netdev_priv(netdev);
int i;
@@ -992,6 +983,12 @@ static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter,
eth_broadcast_addr(fsp->m_u.ether_spec.h_source);
}
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) {
+ fsp->flow_type |= FLOW_EXT;
+ memcpy(fsp->h_ext.data, rule->filter.user_data, sizeof(fsp->h_ext.data));
+ memcpy(fsp->m_ext.data, rule->filter.user_mask, sizeof(fsp->m_ext.data));
+ }
+
mutex_unlock(&adapter->nfc_rule_lock);
return 0;
@@ -1228,6 +1225,30 @@ static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule,
ether_addr_copy(rule->filter.dst_addr,
fsp->h_u.ether_spec.h_dest);
}
+
+ /* VLAN etype matching */
+ if ((fsp->flow_type & FLOW_EXT) && fsp->h_ext.vlan_etype) {
+ rule->filter.vlan_etype = fsp->h_ext.vlan_etype;
+ rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_ETYPE;
+ }
+
+ /* Check for user defined data */
+ if ((fsp->flow_type & FLOW_EXT) &&
+ (fsp->h_ext.data[0] || fsp->h_ext.data[1])) {
+ rule->filter.match_flags |= IGC_FILTER_FLAG_USER_DATA;
+ memcpy(rule->filter.user_data, fsp->h_ext.data, sizeof(fsp->h_ext.data));
+ memcpy(rule->filter.user_mask, fsp->m_ext.data, sizeof(fsp->m_ext.data));
+ }
+
+ /* When multiple filter options or user data or vlan etype is set, use a
+ * flex filter.
+ */
+ if ((rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) ||
+ (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) ||
+ (rule->filter.match_flags & (rule->filter.match_flags - 1)))
+ rule->flex = true;
+ else
+ rule->flex = false;
}
/**
@@ -1257,11 +1278,6 @@ static int igc_ethtool_check_nfc_rule(struct igc_adapter *adapter,
return -EINVAL;
}
- if (flags & (flags - 1)) {
- netdev_dbg(dev, "Rule with multiple matches not supported\n");
- return -EOPNOTSUPP;
- }
-
list_for_each_entry(tmp, &adapter->nfc_rule_list, list) {
if (!memcmp(&rule->filter, &tmp->filter,
sizeof(rule->filter)) &&
@@ -1293,12 +1309,6 @@ static int igc_ethtool_add_nfc_rule(struct igc_adapter *adapter,
return -EOPNOTSUPP;
}
- if ((fsp->flow_type & FLOW_EXT) &&
- fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) {
- netdev_dbg(netdev, "VLAN mask not supported\n");
- return -EOPNOTSUPP;
- }
-
if (fsp->ring_cookie >= adapter->num_rx_queues) {
netdev_dbg(netdev, "Invalid action\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index f1adf154ec4a..b877efae61df 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -11,6 +11,8 @@
#include <linux/pm_runtime.h>
#include <net/pkt_sched.h>
#include <linux/bpf_trace.h>
+#include <net/xdp_sock_drv.h>
+#include <linux/pci.h>
#include <net/ipv6.h>
@@ -111,11 +113,14 @@ void igc_reset(struct igc_adapter *adapter)
if (!netif_running(adapter->netdev))
igc_power_down_phy_copper_base(&adapter->hw);
+ /* Enable HW to recognize an 802.1Q VLAN Ethernet packet */
+ wr32(IGC_VET, ETH_P_8021Q);
+
/* Re-enable PTP, where applicable. */
igc_ptp_reset(adapter);
/* Re-enable TSN offloading, where applicable. */
- igc_tsn_offload_apply(adapter);
+ igc_tsn_reset(adapter);
igc_get_phy_info(hw);
}
@@ -146,6 +151,9 @@ static void igc_release_hw_control(struct igc_adapter *adapter)
struct igc_hw *hw = &adapter->hw;
u32 ctrl_ext;
+ if (!pci_device_is_present(adapter->pdev))
+ return;
+
/* Let firmware take over control of h/w */
ctrl_ext = rd32(IGC_CTRL_EXT);
wr32(IGC_CTRL_EXT,
@@ -171,6 +179,14 @@ static void igc_get_hw_control(struct igc_adapter *adapter)
ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
}
+static void igc_unmap_tx_buffer(struct device *dev, struct igc_tx_buffer *buf)
+{
+ dma_unmap_single(dev, dma_unmap_addr(buf, dma),
+ dma_unmap_len(buf, len), DMA_TO_DEVICE);
+
+ dma_unmap_len_set(buf, len, 0);
+}
+
/**
* igc_clean_tx_ring - Free Tx Buffers
* @tx_ring: ring to be cleaned
@@ -179,20 +195,27 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
{
u16 i = tx_ring->next_to_clean;
struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
+ u32 xsk_frames = 0;
while (i != tx_ring->next_to_use) {
union igc_adv_tx_desc *eop_desc, *tx_desc;
- if (tx_buffer->tx_flags & IGC_TX_FLAGS_XDP)
+ switch (tx_buffer->type) {
+ case IGC_TX_BUFFER_TYPE_XSK:
+ xsk_frames++;
+ break;
+ case IGC_TX_BUFFER_TYPE_XDP:
xdp_return_frame(tx_buffer->xdpf);
- else
+ igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
+ break;
+ case IGC_TX_BUFFER_TYPE_SKB:
dev_kfree_skb_any(tx_buffer->skb);
-
- /* unmap skb header data */
- dma_unmap_single(tx_ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
+ igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
+ break;
+ default:
+ netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n");
+ break;
+ }
/* check for eop_desc to determine the end of the packet */
eop_desc = tx_buffer->next_to_watch;
@@ -211,12 +234,11 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
/* unmap any remaining paged data */
if (dma_unmap_len(tx_buffer, len))
- dma_unmap_page(tx_ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
+ igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
}
+ tx_buffer->next_to_watch = NULL;
+
/* move us one more past the eop_desc for start of next pkt */
tx_buffer++;
i++;
@@ -226,6 +248,9 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
}
}
+ if (tx_ring->xsk_pool && xsk_frames)
+ xsk_tx_completed(tx_ring->xsk_pool, xsk_frames);
+
/* reset BQL for queue */
netdev_tx_reset_queue(txring_txq(tx_ring));
@@ -346,11 +371,7 @@ static int igc_setup_all_tx_resources(struct igc_adapter *adapter)
return err;
}
-/**
- * igc_clean_rx_ring - Free Rx Buffers per Queue
- * @rx_ring: ring to free buffers from
- */
-static void igc_clean_rx_ring(struct igc_ring *rx_ring)
+static void igc_clean_rx_ring_page_shared(struct igc_ring *rx_ring)
{
u16 i = rx_ring->next_to_clean;
@@ -383,12 +404,39 @@ static void igc_clean_rx_ring(struct igc_ring *rx_ring)
if (i == rx_ring->count)
i = 0;
}
+}
- clear_ring_uses_large_buffer(rx_ring);
+static void igc_clean_rx_ring_xsk_pool(struct igc_ring *ring)
+{
+ struct igc_rx_buffer *bi;
+ u16 i;
- rx_ring->next_to_alloc = 0;
- rx_ring->next_to_clean = 0;
- rx_ring->next_to_use = 0;
+ for (i = 0; i < ring->count; i++) {
+ bi = &ring->rx_buffer_info[i];
+ if (!bi->xdp)
+ continue;
+
+ xsk_buff_free(bi->xdp);
+ bi->xdp = NULL;
+ }
+}
+
+/**
+ * igc_clean_rx_ring - Free Rx Buffers per Queue
+ * @ring: ring to free buffers from
+ */
+static void igc_clean_rx_ring(struct igc_ring *ring)
+{
+ if (ring->xsk_pool)
+ igc_clean_rx_ring_xsk_pool(ring);
+ else
+ igc_clean_rx_ring_page_shared(ring);
+
+ clear_ring_uses_large_buffer(ring);
+
+ ring->next_to_alloc = 0;
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
}
/**
@@ -414,7 +462,7 @@ void igc_free_rx_resources(struct igc_ring *rx_ring)
{
igc_clean_rx_ring(rx_ring);
- igc_xdp_unregister_rxq_info(rx_ring);
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
@@ -453,11 +501,16 @@ int igc_setup_rx_resources(struct igc_ring *rx_ring)
{
struct net_device *ndev = rx_ring->netdev;
struct device *dev = rx_ring->dev;
+ u8 index = rx_ring->queue_index;
int size, desc_len, res;
- res = igc_xdp_register_rxq_info(rx_ring);
- if (res < 0)
+ res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index,
+ rx_ring->q_vector->napi.napi_id);
+ if (res < 0) {
+ netdev_err(ndev, "Failed to register xdp_rxq index %u\n",
+ index);
return res;
+ }
size = sizeof(struct igc_rx_buffer) * rx_ring->count;
rx_ring->rx_buffer_info = vzalloc(size);
@@ -483,7 +536,7 @@ int igc_setup_rx_resources(struct igc_ring *rx_ring)
return 0;
err:
- igc_xdp_unregister_rxq_info(rx_ring);
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n");
@@ -515,9 +568,14 @@ static int igc_setup_all_rx_resources(struct igc_adapter *adapter)
return err;
}
-static bool igc_xdp_is_enabled(struct igc_adapter *adapter)
+static struct xsk_buff_pool *igc_get_xsk_pool(struct igc_adapter *adapter,
+ struct igc_ring *ring)
{
- return !!adapter->xdp_prog;
+ if (!igc_xdp_is_enabled(adapter) ||
+ !test_bit(IGC_RING_FLAG_AF_XDP_ZC, &ring->flags))
+ return NULL;
+
+ return xsk_get_pool_from_qid(ring->netdev, ring->queue_index);
}
/**
@@ -535,6 +593,20 @@ static void igc_configure_rx_ring(struct igc_adapter *adapter,
int reg_idx = ring->reg_idx;
u32 srrctl = 0, rxdctl = 0;
u64 rdba = ring->dma;
+ u32 buf_size;
+
+ xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
+ ring->xsk_pool = igc_get_xsk_pool(adapter, ring);
+ if (ring->xsk_pool) {
+ WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL));
+ xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
+ } else {
+ WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED,
+ NULL));
+ }
if (igc_xdp_is_enabled(adapter))
set_ring_uses_large_buffer(ring);
@@ -558,12 +630,15 @@ static void igc_configure_rx_ring(struct igc_adapter *adapter,
ring->next_to_clean = 0;
ring->next_to_use = 0;
- /* set descriptor configuration */
- srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT;
- if (ring_uses_large_buffer(ring))
- srrctl |= IGC_RXBUFFER_3072 >> IGC_SRRCTL_BSIZEPKT_SHIFT;
+ if (ring->xsk_pool)
+ buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool);
+ else if (ring_uses_large_buffer(ring))
+ buf_size = IGC_RXBUFFER_3072;
else
- srrctl |= IGC_RXBUFFER_2048 >> IGC_SRRCTL_BSIZEPKT_SHIFT;
+ buf_size = IGC_RXBUFFER_2048;
+
+ srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT;
+ srrctl |= buf_size >> IGC_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;
wr32(IGC_SRRCTL(reg_idx), srrctl);
@@ -618,6 +693,8 @@ static void igc_configure_tx_ring(struct igc_adapter *adapter,
u64 tdba = ring->dma;
u32 txdctl = 0;
+ ring->xsk_pool = igc_get_xsk_pool(adapter, ring);
+
/* disable the queue */
wr32(IGC_TXDCTL(reg_idx), 0);
wrfl();
@@ -1055,13 +1132,17 @@ static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \
((u32)((_input) & (_flag)) / ((_flag) / (_result))))
-static u32 igc_tx_cmd_type(u32 tx_flags)
+static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
{
/* set type for advanced descriptor with frame checksum insertion */
u32 cmd_type = IGC_ADVTXD_DTYP_DATA |
IGC_ADVTXD_DCMD_DEXT |
IGC_ADVTXD_DCMD_IFCS;
+ /* set HW vlan bit if vlan is present */
+ cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_VLAN,
+ IGC_ADVTXD_DCMD_VLE);
+
/* set segmentation bits for TSO */
cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO,
(IGC_ADVTXD_DCMD_TSE));
@@ -1070,6 +1151,9 @@ static u32 igc_tx_cmd_type(u32 tx_flags)
cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP,
(IGC_ADVTXD_MAC_TSTAMP));
+ /* insert frame checksum */
+ cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS);
+
return cmd_type;
}
@@ -1104,8 +1188,9 @@ static int igc_tx_map(struct igc_ring *tx_ring,
u16 i = tx_ring->next_to_use;
unsigned int data_len, size;
dma_addr_t dma;
- u32 cmd_type = igc_tx_cmd_type(tx_flags);
+ u32 cmd_type;
+ cmd_type = igc_tx_cmd_type(skb, tx_flags);
tx_desc = IGC_TX_DESC(tx_ring, i);
igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
@@ -1211,11 +1296,7 @@ dma_error:
/* clear dma mappings for failed tx_buffer_info map */
while (tx_buffer != first) {
if (dma_unmap_len(tx_buffer, len))
- dma_unmap_page(tx_ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buffer, len, 0);
+ igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
if (i-- == 0)
i += tx_ring->count;
@@ -1223,11 +1304,7 @@ dma_error:
}
if (dma_unmap_len(tx_buffer, len))
- dma_unmap_single(tx_ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buffer, len, 0);
+ igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
dev_kfree_skb_any(tx_buffer->skb);
tx_buffer->skb = NULL;
@@ -1359,6 +1436,7 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
/* record the location of the first descriptor for this packet */
first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ first->type = IGC_TX_BUFFER_TYPE_SKB;
first->skb = skb;
first->bytecount = skb->len;
first->gso_segs = 1;
@@ -1383,6 +1461,11 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
}
}
+ if (skb_vlan_tag_present(skb)) {
+ tx_flags |= IGC_TX_FLAGS_VLAN;
+ tx_flags |= (skb_vlan_tag_get(skb) << IGC_TX_FLAGS_VLAN_SHIFT);
+ }
+
/* record initial flags and protocol */
first->tx_flags = tx_flags;
first->protocol = protocol;
@@ -1482,6 +1565,25 @@ static inline void igc_rx_hash(struct igc_ring *ring,
PKT_HASH_TYPE_L3);
}
+static void igc_rx_vlan(struct igc_ring *rx_ring,
+ union igc_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct net_device *dev = rx_ring->netdev;
+ u16 vid;
+
+ if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ igc_test_staterr(rx_desc, IGC_RXD_STAT_VP)) {
+ if (igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_LB) &&
+ test_bit(IGC_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
+ vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan);
+ else
+ vid = le16_to_cpu(rx_desc->wb.upper.vlan);
+
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+ }
+}
+
/**
* igc_process_skb_fields - Populate skb header fields from Rx descriptor
* @rx_ring: rx descriptor ring packet is being transacted on
@@ -1500,11 +1602,37 @@ static void igc_process_skb_fields(struct igc_ring *rx_ring,
igc_rx_checksum(rx_ring, rx_desc, skb);
+ igc_rx_vlan(rx_ring, rx_desc, skb);
+
skb_record_rx_queue(skb, rx_ring->queue_index);
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
}
+static void igc_vlan_mode(struct net_device *netdev, netdev_features_t features)
+{
+ bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ u32 ctrl;
+
+ ctrl = rd32(IGC_CTRL);
+
+ if (enable) {
+ /* enable VLAN tag insert/strip */
+ ctrl |= IGC_CTRL_VME;
+ } else {
+ /* disable VLAN tag insert/strip */
+ ctrl &= ~IGC_CTRL_VME;
+ }
+ wr32(IGC_CTRL, ctrl);
+}
+
+static void igc_restore_vlan(struct igc_adapter *adapter)
+{
+ igc_vlan_mode(adapter->netdev, adapter->netdev->features);
+}
+
static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring,
const unsigned int size,
int *rx_buffer_pgcnt)
@@ -1930,6 +2058,63 @@ static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count)
}
}
+static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count)
+{
+ union igc_adv_rx_desc *desc;
+ u16 i = ring->next_to_use;
+ struct igc_rx_buffer *bi;
+ dma_addr_t dma;
+ bool ok = true;
+
+ if (!count)
+ return ok;
+
+ desc = IGC_RX_DESC(ring, i);
+ bi = &ring->rx_buffer_info[i];
+ i -= ring->count;
+
+ do {
+ bi->xdp = xsk_buff_alloc(ring->xsk_pool);
+ if (!bi->xdp) {
+ ok = false;
+ break;
+ }
+
+ dma = xsk_buff_xdp_get_dma(bi->xdp);
+ desc->read.pkt_addr = cpu_to_le64(dma);
+
+ desc++;
+ bi++;
+ i++;
+ if (unlikely(!i)) {
+ desc = IGC_RX_DESC(ring, 0);
+ bi = ring->rx_buffer_info;
+ i -= ring->count;
+ }
+
+ /* Clear the length for the next_to_use descriptor. */
+ desc->wb.upper.length = 0;
+
+ count--;
+ } while (count);
+
+ i += ring->count;
+
+ if (ring->next_to_use != i) {
+ ring->next_to_use = i;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(i, ring->tail);
+ }
+
+ return ok;
+}
+
static int igc_xdp_init_tx_buffer(struct igc_tx_buffer *buffer,
struct xdp_frame *xdpf,
struct igc_ring *ring)
@@ -1942,8 +2127,8 @@ static int igc_xdp_init_tx_buffer(struct igc_tx_buffer *buffer,
return -ENOMEM;
}
+ buffer->type = IGC_TX_BUFFER_TYPE_XDP;
buffer->xdpf = xdpf;
- buffer->tx_flags = IGC_TX_FLAGS_XDP;
buffer->protocol = 0;
buffer->bytecount = xdpf->len;
buffer->gso_segs = 1;
@@ -2025,35 +2210,24 @@ static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp)
return res;
}
-static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
- struct xdp_buff *xdp)
+/* This function assumes rcu_read_lock() is held by the caller. */
+static int __igc_xdp_run_prog(struct igc_adapter *adapter,
+ struct bpf_prog *prog,
+ struct xdp_buff *xdp)
{
- struct bpf_prog *prog;
- int res;
- u32 act;
-
- rcu_read_lock();
+ u32 act = bpf_prog_run_xdp(prog, xdp);
- prog = READ_ONCE(adapter->xdp_prog);
- if (!prog) {
- res = IGC_XDP_PASS;
- goto unlock;
- }
-
- act = bpf_prog_run_xdp(prog, xdp);
switch (act) {
case XDP_PASS:
- res = IGC_XDP_PASS;
- break;
+ return IGC_XDP_PASS;
case XDP_TX:
if (igc_xdp_xmit_back(adapter, xdp) < 0)
goto out_failure;
- res = IGC_XDP_TX;
- break;
+ return IGC_XDP_TX;
case XDP_REDIRECT:
if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0)
goto out_failure;
- res = IGC_XDP_REDIRECT;
+ return IGC_XDP_REDIRECT;
break;
default:
bpf_warn_invalid_xdp_action(act);
@@ -2063,12 +2237,25 @@ out_failure:
trace_xdp_exception(adapter->netdev, prog, act);
fallthrough;
case XDP_DROP:
- res = IGC_XDP_CONSUMED;
- break;
+ return IGC_XDP_CONSUMED;
}
+}
-unlock:
- rcu_read_unlock();
+static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
+ struct xdp_buff *xdp)
+{
+ struct bpf_prog *prog;
+ int res;
+
+ prog = READ_ONCE(adapter->xdp_prog);
+ if (!prog) {
+ res = IGC_XDP_PASS;
+ goto out;
+ }
+
+ res = __igc_xdp_run_prog(adapter, prog, xdp);
+
+out:
return ERR_PTR(-res);
}
@@ -2102,6 +2289,20 @@ static void igc_finalize_xdp(struct igc_adapter *adapter, int status)
xdp_do_flush();
}
+static void igc_update_rx_stats(struct igc_q_vector *q_vector,
+ unsigned int packets, unsigned int bytes)
+{
+ struct igc_ring *ring = q_vector->rx.ring;
+
+ u64_stats_update_begin(&ring->rx_syncp);
+ ring->rx_stats.packets += packets;
+ ring->rx_stats.bytes += bytes;
+ u64_stats_update_end(&ring->rx_syncp);
+
+ q_vector->rx.total_packets += packets;
+ q_vector->rx.total_bytes += bytes;
+}
+
static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
{
unsigned int total_bytes = 0, total_packets = 0;
@@ -2150,12 +2351,9 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
}
if (!skb) {
- xdp.data = pktbuf + pkt_offset;
- xdp.data_end = xdp.data + size;
- xdp.data_hard_start = pktbuf - igc_rx_offset(rx_ring);
- xdp_set_data_meta_invalid(&xdp);
- xdp.frame_sz = truesize;
- xdp.rxq = &rx_ring->xdp_rxq;
+ xdp_init_buff(&xdp, truesize, &rx_ring->xdp_rxq);
+ xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring),
+ igc_rx_offset(rx_ring) + pkt_offset, size, false);
skb = igc_xdp_run_prog(adapter, &xdp);
}
@@ -2225,12 +2423,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
/* place incomplete frames back on ring for completion */
rx_ring->skb = skb;
- u64_stats_update_begin(&rx_ring->rx_syncp);
- rx_ring->rx_stats.packets += total_packets;
- rx_ring->rx_stats.bytes += total_bytes;
- u64_stats_update_end(&rx_ring->rx_syncp);
- q_vector->rx.total_packets += total_packets;
- q_vector->rx.total_bytes += total_bytes;
+ igc_update_rx_stats(q_vector, total_packets, total_bytes);
if (cleaned_count)
igc_alloc_rx_buffers(rx_ring, cleaned_count);
@@ -2238,6 +2431,221 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
return total_packets;
}
+static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
+ struct xdp_buff *xdp)
+{
+ unsigned int metasize = xdp->data - xdp->data_meta;
+ unsigned int datasize = xdp->data_end - xdp->data;
+ unsigned int totalsize = metasize + datasize;
+ struct sk_buff *skb;
+
+ skb = __napi_alloc_skb(&ring->q_vector->napi,
+ xdp->data_end - xdp->data_hard_start,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb))
+ return NULL;
+
+ skb_reserve(skb, xdp->data_meta - xdp->data_hard_start);
+ memcpy(__skb_put(skb, totalsize), xdp->data_meta, totalsize);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
+
+ return skb;
+}
+
+static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector,
+ union igc_adv_rx_desc *desc,
+ struct xdp_buff *xdp,
+ ktime_t timestamp)
+{
+ struct igc_ring *ring = q_vector->rx.ring;
+ struct sk_buff *skb;
+
+ skb = igc_construct_skb_zc(ring, xdp);
+ if (!skb) {
+ ring->rx_stats.alloc_failed++;
+ return;
+ }
+
+ if (timestamp)
+ skb_hwtstamps(skb)->hwtstamp = timestamp;
+
+ if (igc_cleanup_headers(ring, desc, skb))
+ return;
+
+ igc_process_skb_fields(ring, desc, skb);
+ napi_gro_receive(&q_vector->napi, skb);
+}
+
+static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
+{
+ struct igc_adapter *adapter = q_vector->adapter;
+ struct igc_ring *ring = q_vector->rx.ring;
+ u16 cleaned_count = igc_desc_unused(ring);
+ int total_bytes = 0, total_packets = 0;
+ u16 ntc = ring->next_to_clean;
+ struct bpf_prog *prog;
+ bool failure = false;
+ int xdp_status = 0;
+
+ rcu_read_lock();
+
+ prog = READ_ONCE(adapter->xdp_prog);
+
+ while (likely(total_packets < budget)) {
+ union igc_adv_rx_desc *desc;
+ struct igc_rx_buffer *bi;
+ ktime_t timestamp = 0;
+ unsigned int size;
+ int res;
+
+ desc = IGC_RX_DESC(ring, ntc);
+ size = le16_to_cpu(desc->wb.upper.length);
+ if (!size)
+ break;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * descriptor has been written back
+ */
+ dma_rmb();
+
+ bi = &ring->rx_buffer_info[ntc];
+
+ if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) {
+ timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,
+ bi->xdp->data);
+
+ bi->xdp->data += IGC_TS_HDR_LEN;
+
+ /* HW timestamp has been copied into local variable. Metadata
+ * length when XDP program is called should be 0.
+ */
+ bi->xdp->data_meta += IGC_TS_HDR_LEN;
+ size -= IGC_TS_HDR_LEN;
+ }
+
+ bi->xdp->data_end = bi->xdp->data + size;
+ xsk_buff_dma_sync_for_cpu(bi->xdp, ring->xsk_pool);
+
+ res = __igc_xdp_run_prog(adapter, prog, bi->xdp);
+ switch (res) {
+ case IGC_XDP_PASS:
+ igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp);
+ fallthrough;
+ case IGC_XDP_CONSUMED:
+ xsk_buff_free(bi->xdp);
+ break;
+ case IGC_XDP_TX:
+ case IGC_XDP_REDIRECT:
+ xdp_status |= res;
+ break;
+ }
+
+ bi->xdp = NULL;
+ total_bytes += size;
+ total_packets++;
+ cleaned_count++;
+ ntc++;
+ if (ntc == ring->count)
+ ntc = 0;
+ }
+
+ ring->next_to_clean = ntc;
+ rcu_read_unlock();
+
+ if (cleaned_count >= IGC_RX_BUFFER_WRITE)
+ failure = !igc_alloc_rx_buffers_zc(ring, cleaned_count);
+
+ if (xdp_status)
+ igc_finalize_xdp(adapter, xdp_status);
+
+ igc_update_rx_stats(q_vector, total_packets, total_bytes);
+
+ if (xsk_uses_need_wakeup(ring->xsk_pool)) {
+ if (failure || ring->next_to_clean == ring->next_to_use)
+ xsk_set_rx_need_wakeup(ring->xsk_pool);
+ else
+ xsk_clear_rx_need_wakeup(ring->xsk_pool);
+ return total_packets;
+ }
+
+ return failure ? budget : total_packets;
+}
+
+static void igc_update_tx_stats(struct igc_q_vector *q_vector,
+ unsigned int packets, unsigned int bytes)
+{
+ struct igc_ring *ring = q_vector->tx.ring;
+
+ u64_stats_update_begin(&ring->tx_syncp);
+ ring->tx_stats.bytes += bytes;
+ ring->tx_stats.packets += packets;
+ u64_stats_update_end(&ring->tx_syncp);
+
+ q_vector->tx.total_bytes += bytes;
+ q_vector->tx.total_packets += packets;
+}
+
+static void igc_xdp_xmit_zc(struct igc_ring *ring)
+{
+ struct xsk_buff_pool *pool = ring->xsk_pool;
+ struct netdev_queue *nq = txring_txq(ring);
+ union igc_adv_tx_desc *tx_desc = NULL;
+ int cpu = smp_processor_id();
+ u16 ntu = ring->next_to_use;
+ struct xdp_desc xdp_desc;
+ u16 budget;
+
+ if (!netif_carrier_ok(ring->netdev))
+ return;
+
+ __netif_tx_lock(nq, cpu);
+
+ budget = igc_desc_unused(ring);
+
+ while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) {
+ u32 cmd_type, olinfo_status;
+ struct igc_tx_buffer *bi;
+ dma_addr_t dma;
+
+ cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
+ IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
+ xdp_desc.len;
+ olinfo_status = xdp_desc.len << IGC_ADVTXD_PAYLEN_SHIFT;
+
+ dma = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
+ xsk_buff_raw_dma_sync_for_device(pool, dma, xdp_desc.len);
+
+ tx_desc = IGC_TX_DESC(ring, ntu);
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+
+ bi = &ring->tx_buffer_info[ntu];
+ bi->type = IGC_TX_BUFFER_TYPE_XSK;
+ bi->protocol = 0;
+ bi->bytecount = xdp_desc.len;
+ bi->gso_segs = 1;
+ bi->time_stamp = jiffies;
+ bi->next_to_watch = tx_desc;
+
+ netdev_tx_sent_queue(txring_txq(ring), xdp_desc.len);
+
+ ntu++;
+ if (ntu == ring->count)
+ ntu = 0;
+ }
+
+ ring->next_to_use = ntu;
+ if (tx_desc) {
+ igc_flush_tx_descriptors(ring);
+ xsk_tx_release(pool);
+ }
+
+ __netif_tx_unlock(nq);
+}
+
/**
* igc_clean_tx_irq - Reclaim resources after transmit completes
* @q_vector: pointer to q_vector containing needed info
@@ -2254,6 +2662,7 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
unsigned int i = tx_ring->next_to_clean;
struct igc_tx_buffer *tx_buffer;
union igc_adv_tx_desc *tx_desc;
+ u32 xsk_frames = 0;
if (test_bit(__IGC_DOWN, &adapter->state))
return true;
@@ -2283,19 +2692,22 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
total_bytes += tx_buffer->bytecount;
total_packets += tx_buffer->gso_segs;
- if (tx_buffer->tx_flags & IGC_TX_FLAGS_XDP)
+ switch (tx_buffer->type) {
+ case IGC_TX_BUFFER_TYPE_XSK:
+ xsk_frames++;
+ break;
+ case IGC_TX_BUFFER_TYPE_XDP:
xdp_return_frame(tx_buffer->xdpf);
- else
+ igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
+ break;
+ case IGC_TX_BUFFER_TYPE_SKB:
napi_consume_skb(tx_buffer->skb, napi_budget);
-
- /* unmap skb header data */
- dma_unmap_single(tx_ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
-
- /* clear tx_buffer data */
- dma_unmap_len_set(tx_buffer, len, 0);
+ igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
+ break;
+ default:
+ netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n");
+ break;
+ }
/* clear last DMA location and unmap remaining buffers */
while (tx_desc != eop_desc) {
@@ -2309,13 +2721,8 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
}
/* unmap any remaining paged data */
- if (dma_unmap_len(tx_buffer, len)) {
- dma_unmap_page(tx_ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buffer, len, 0);
- }
+ if (dma_unmap_len(tx_buffer, len))
+ igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
}
/* move us one more past the eop_desc for start of next pkt */
@@ -2340,12 +2747,16 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
i += tx_ring->count;
tx_ring->next_to_clean = i;
- u64_stats_update_begin(&tx_ring->tx_syncp);
- tx_ring->tx_stats.bytes += total_bytes;
- tx_ring->tx_stats.packets += total_packets;
- u64_stats_update_end(&tx_ring->tx_syncp);
- q_vector->tx.total_bytes += total_bytes;
- q_vector->tx.total_packets += total_packets;
+
+ igc_update_tx_stats(q_vector, total_packets, total_bytes);
+
+ if (tx_ring->xsk_pool) {
+ if (xsk_frames)
+ xsk_tx_completed(tx_ring->xsk_pool, xsk_frames);
+ if (xsk_uses_need_wakeup(tx_ring->xsk_pool))
+ xsk_set_tx_need_wakeup(tx_ring->xsk_pool);
+ igc_xdp_xmit_zc(tx_ring);
+ }
if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
struct igc_hw *hw = &adapter->hw;
@@ -2669,11 +3080,320 @@ static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype)
etype);
}
+static int igc_flex_filter_select(struct igc_adapter *adapter,
+ struct igc_flex_filter *input,
+ u32 *fhft)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u8 fhft_index;
+ u32 fhftsl;
+
+ if (input->index >= MAX_FLEX_FILTER) {
+ dev_err(&adapter->pdev->dev, "Wrong Flex Filter index selected!\n");
+ return -EINVAL;
+ }
+
+ /* Indirect table select register */
+ fhftsl = rd32(IGC_FHFTSL);
+ fhftsl &= ~IGC_FHFTSL_FTSL_MASK;
+ switch (input->index) {
+ case 0 ... 7:
+ fhftsl |= 0x00;
+ break;
+ case 8 ... 15:
+ fhftsl |= 0x01;
+ break;
+ case 16 ... 23:
+ fhftsl |= 0x02;
+ break;
+ case 24 ... 31:
+ fhftsl |= 0x03;
+ break;
+ }
+ wr32(IGC_FHFTSL, fhftsl);
+
+ /* Normalize index down to host table register */
+ fhft_index = input->index % 8;
+
+ *fhft = (fhft_index < 4) ? IGC_FHFT(fhft_index) :
+ IGC_FHFT_EXT(fhft_index - 4);
+
+ return 0;
+}
+
+static int igc_write_flex_filter_ll(struct igc_adapter *adapter,
+ struct igc_flex_filter *input)
+{
+ struct device *dev = &adapter->pdev->dev;
+ struct igc_hw *hw = &adapter->hw;
+ u8 *data = input->data;
+ u8 *mask = input->mask;
+ u32 queuing;
+ u32 fhft;
+ u32 wufc;
+ int ret;
+ int i;
+
+ /* Length has to be aligned to 8. Otherwise the filter will fail. Bail
+ * out early to avoid surprises later.
+ */
+ if (input->length % 8 != 0) {
+ dev_err(dev, "The length of a flex filter has to be 8 byte aligned!\n");
+ return -EINVAL;
+ }
+
+ /* Select corresponding flex filter register and get base for host table. */
+ ret = igc_flex_filter_select(adapter, input, &fhft);
+ if (ret)
+ return ret;
+
+ /* When adding a filter globally disable flex filter feature. That is
+ * recommended within the datasheet.
+ */
+ wufc = rd32(IGC_WUFC);
+ wufc &= ~IGC_WUFC_FLEX_HQ;
+ wr32(IGC_WUFC, wufc);
+
+ /* Configure filter */
+ queuing = input->length & IGC_FHFT_LENGTH_MASK;
+ queuing |= (input->rx_queue << IGC_FHFT_QUEUE_SHIFT) & IGC_FHFT_QUEUE_MASK;
+ queuing |= (input->prio << IGC_FHFT_PRIO_SHIFT) & IGC_FHFT_PRIO_MASK;
+
+ if (input->immediate_irq)
+ queuing |= IGC_FHFT_IMM_INT;
+
+ if (input->drop)
+ queuing |= IGC_FHFT_DROP;
+
+ wr32(fhft + 0xFC, queuing);
+
+ /* Write data (128 byte) and mask (128 bit) */
+ for (i = 0; i < 16; ++i) {
+ const size_t data_idx = i * 8;
+ const size_t row_idx = i * 16;
+ u32 dw0 =
+ (data[data_idx + 0] << 0) |
+ (data[data_idx + 1] << 8) |
+ (data[data_idx + 2] << 16) |
+ (data[data_idx + 3] << 24);
+ u32 dw1 =
+ (data[data_idx + 4] << 0) |
+ (data[data_idx + 5] << 8) |
+ (data[data_idx + 6] << 16) |
+ (data[data_idx + 7] << 24);
+ u32 tmp;
+
+ /* Write row: dw0, dw1 and mask */
+ wr32(fhft + row_idx, dw0);
+ wr32(fhft + row_idx + 4, dw1);
+
+ /* mask is only valid for MASK(7, 0) */
+ tmp = rd32(fhft + row_idx + 8);
+ tmp &= ~GENMASK(7, 0);
+ tmp |= mask[i];
+ wr32(fhft + row_idx + 8, tmp);
+ }
+
+ /* Enable filter. */
+ wufc |= IGC_WUFC_FLEX_HQ;
+ if (input->index > 8) {
+ /* Filter 0-7 are enabled via WUFC. The other 24 filters are not. */
+ u32 wufc_ext = rd32(IGC_WUFC_EXT);
+
+ wufc_ext |= (IGC_WUFC_EXT_FLX8 << (input->index - 8));
+
+ wr32(IGC_WUFC_EXT, wufc_ext);
+ } else {
+ wufc |= (IGC_WUFC_FLX0 << input->index);
+ }
+ wr32(IGC_WUFC, wufc);
+
+ dev_dbg(&adapter->pdev->dev, "Added flex filter %u to HW.\n",
+ input->index);
+
+ return 0;
+}
+
+static void igc_flex_filter_add_field(struct igc_flex_filter *flex,
+ const void *src, unsigned int offset,
+ size_t len, const void *mask)
+{
+ int i;
+
+ /* data */
+ memcpy(&flex->data[offset], src, len);
+
+ /* mask */
+ for (i = 0; i < len; ++i) {
+ const unsigned int idx = i + offset;
+ const u8 *ptr = mask;
+
+ if (mask) {
+ if (ptr[i] & 0xff)
+ flex->mask[idx / 8] |= BIT(idx % 8);
+
+ continue;
+ }
+
+ flex->mask[idx / 8] |= BIT(idx % 8);
+ }
+}
+
+static int igc_find_avail_flex_filter_slot(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 wufc, wufc_ext;
+ int i;
+
+ wufc = rd32(IGC_WUFC);
+ wufc_ext = rd32(IGC_WUFC_EXT);
+
+ for (i = 0; i < MAX_FLEX_FILTER; i++) {
+ if (i < 8) {
+ if (!(wufc & (IGC_WUFC_FLX0 << i)))
+ return i;
+ } else {
+ if (!(wufc_ext & (IGC_WUFC_EXT_FLX8 << (i - 8))))
+ return i;
+ }
+ }
+
+ return -ENOSPC;
+}
+
+static bool igc_flex_filter_in_use(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 wufc, wufc_ext;
+
+ wufc = rd32(IGC_WUFC);
+ wufc_ext = rd32(IGC_WUFC_EXT);
+
+ if (wufc & IGC_WUFC_FILTER_MASK)
+ return true;
+
+ if (wufc_ext & IGC_WUFC_EXT_FILTER_MASK)
+ return true;
+
+ return false;
+}
+
+static int igc_add_flex_filter(struct igc_adapter *adapter,
+ struct igc_nfc_rule *rule)
+{
+ struct igc_flex_filter flex = { };
+ struct igc_nfc_filter *filter = &rule->filter;
+ unsigned int eth_offset, user_offset;
+ int ret, index;
+ bool vlan;
+
+ index = igc_find_avail_flex_filter_slot(adapter);
+ if (index < 0)
+ return -ENOSPC;
+
+ /* Construct the flex filter:
+ * -> dest_mac [6]
+ * -> src_mac [6]
+ * -> tpid [2]
+ * -> vlan tci [2]
+ * -> ether type [2]
+ * -> user data [8]
+ * -> = 26 bytes => 32 length
+ */
+ flex.index = index;
+ flex.length = 32;
+ flex.rx_queue = rule->action;
+
+ vlan = rule->filter.vlan_tci || rule->filter.vlan_etype;
+ eth_offset = vlan ? 16 : 12;
+ user_offset = vlan ? 18 : 14;
+
+ /* Add destination MAC */
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
+ igc_flex_filter_add_field(&flex, &filter->dst_addr, 0,
+ ETH_ALEN, NULL);
+
+ /* Add source MAC */
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR)
+ igc_flex_filter_add_field(&flex, &filter->src_addr, 6,
+ ETH_ALEN, NULL);
+
+ /* Add VLAN etype */
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE)
+ igc_flex_filter_add_field(&flex, &filter->vlan_etype, 12,
+ sizeof(filter->vlan_etype),
+ NULL);
+
+ /* Add VLAN TCI */
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI)
+ igc_flex_filter_add_field(&flex, &filter->vlan_tci, 14,
+ sizeof(filter->vlan_tci), NULL);
+
+ /* Add Ether type */
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
+ __be16 etype = cpu_to_be16(filter->etype);
+
+ igc_flex_filter_add_field(&flex, &etype, eth_offset,
+ sizeof(etype), NULL);
+ }
+
+ /* Add user data */
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA)
+ igc_flex_filter_add_field(&flex, &filter->user_data,
+ user_offset,
+ sizeof(filter->user_data),
+ filter->user_mask);
+
+ /* Add it down to the hardware and enable it. */
+ ret = igc_write_flex_filter_ll(adapter, &flex);
+ if (ret)
+ return ret;
+
+ filter->flex_index = index;
+
+ return 0;
+}
+
+static void igc_del_flex_filter(struct igc_adapter *adapter,
+ u16 reg_index)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 wufc;
+
+ /* Just disable the filter. The filter table itself is kept
+ * intact. Another flex_filter_add() should override the "old" data
+ * then.
+ */
+ if (reg_index > 8) {
+ u32 wufc_ext = rd32(IGC_WUFC_EXT);
+
+ wufc_ext &= ~(IGC_WUFC_EXT_FLX8 << (reg_index - 8));
+ wr32(IGC_WUFC_EXT, wufc_ext);
+ } else {
+ wufc = rd32(IGC_WUFC);
+
+ wufc &= ~(IGC_WUFC_FLX0 << reg_index);
+ wr32(IGC_WUFC, wufc);
+ }
+
+ if (igc_flex_filter_in_use(adapter))
+ return;
+
+ /* No filters are in use, we may disable flex filters */
+ wufc = rd32(IGC_WUFC);
+ wufc &= ~IGC_WUFC_FLEX_HQ;
+ wr32(IGC_WUFC, wufc);
+}
+
static int igc_enable_nfc_rule(struct igc_adapter *adapter,
- const struct igc_nfc_rule *rule)
+ struct igc_nfc_rule *rule)
{
int err;
+ if (rule->flex) {
+ return igc_add_flex_filter(adapter, rule);
+ }
+
if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
err = igc_add_etype_filter(adapter, rule->filter.etype,
rule->action);
@@ -2710,6 +3430,11 @@ static int igc_enable_nfc_rule(struct igc_adapter *adapter,
static void igc_disable_nfc_rule(struct igc_adapter *adapter,
const struct igc_nfc_rule *rule)
{
+ if (rule->flex) {
+ igc_del_flex_filter(adapter, rule->filter.flex_index);
+ return;
+ }
+
if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE)
igc_del_etype_filter(adapter, rule->filter.etype);
@@ -2906,6 +3631,8 @@ static void igc_configure(struct igc_adapter *adapter)
igc_get_hw_control(adapter);
igc_set_rx_mode(netdev);
+ igc_restore_vlan(adapter);
+
igc_setup_tctl(adapter);
igc_setup_mrqc(adapter);
igc_setup_rctl(adapter);
@@ -2925,7 +3652,10 @@ static void igc_configure(struct igc_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++) {
struct igc_ring *ring = adapter->rx_ring[i];
- igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
+ if (ring->xsk_pool)
+ igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring));
+ else
+ igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
}
}
@@ -3540,14 +4270,17 @@ static int igc_poll(struct napi_struct *napi, int budget)
struct igc_q_vector *q_vector = container_of(napi,
struct igc_q_vector,
napi);
+ struct igc_ring *rx_ring = q_vector->rx.ring;
bool clean_complete = true;
int work_done = 0;
if (q_vector->tx.ring)
clean_complete = igc_clean_tx_irq(q_vector, budget);
- if (q_vector->rx.ring) {
- int cleaned = igc_clean_rx_irq(q_vector, budget);
+ if (rx_ring) {
+ int cleaned = rx_ring->xsk_pool ?
+ igc_clean_rx_irq_zc(q_vector, budget) :
+ igc_clean_rx_irq(q_vector, budget);
work_done += cleaned;
if (cleaned >= budget)
@@ -4035,26 +4768,29 @@ void igc_down(struct igc_adapter *adapter)
igc_ptp_suspend(adapter);
- /* disable receives in the hardware */
- rctl = rd32(IGC_RCTL);
- wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN);
- /* flush and sleep below */
-
+ if (pci_device_is_present(adapter->pdev)) {
+ /* disable receives in the hardware */
+ rctl = rd32(IGC_RCTL);
+ wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN);
+ /* flush and sleep below */
+ }
/* set trans_start so we don't get spurious watchdogs during reset */
netif_trans_update(netdev);
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
- /* disable transmits in the hardware */
- tctl = rd32(IGC_TCTL);
- tctl &= ~IGC_TCTL_EN;
- wr32(IGC_TCTL, tctl);
- /* flush both disables and wait for them to finish */
- wrfl();
- usleep_range(10000, 20000);
+ if (pci_device_is_present(adapter->pdev)) {
+ /* disable transmits in the hardware */
+ tctl = rd32(IGC_TCTL);
+ tctl &= ~IGC_TCTL_EN;
+ wr32(IGC_TCTL, tctl);
+ /* flush both disables and wait for them to finish */
+ wrfl();
+ usleep_range(10000, 20000);
- igc_irq_disable(adapter);
+ igc_irq_disable(adapter);
+ }
adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
@@ -4199,6 +4935,9 @@ static int igc_set_features(struct net_device *netdev,
netdev_features_t changed = netdev->features ^ features;
struct igc_adapter *adapter = netdev_priv(netdev);
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX)
+ igc_vlan_mode(netdev, features);
+
/* Add VLAN support */
if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
return 0;
@@ -4394,6 +5133,7 @@ static irqreturn_t igc_msix_ring(int irq, void *data)
*/
static int igc_request_msix(struct igc_adapter *adapter)
{
+ unsigned int num_q_vectors = adapter->num_q_vectors;
int i = 0, err = 0, vector = 0, free_vector = 0;
struct net_device *netdev = adapter->netdev;
@@ -4402,7 +5142,13 @@ static int igc_request_msix(struct igc_adapter *adapter)
if (err)
goto err_out;
- for (i = 0; i < adapter->num_q_vectors; i++) {
+ if (num_q_vectors > MAX_Q_VECTORS) {
+ num_q_vectors = MAX_Q_VECTORS;
+ dev_warn(&adapter->pdev->dev,
+ "The number of queue vectors (%d) is higher than max allowed (%d)\n",
+ adapter->num_q_vectors, MAX_Q_VECTORS);
+ }
+ for (i = 0; i < num_q_vectors; i++) {
struct igc_q_vector *q_vector = adapter->q_vector[i];
vector++;
@@ -4481,20 +5227,12 @@ bool igc_has_link(struct igc_adapter *adapter)
* false until the igc_check_for_link establishes link
* for copper adapters ONLY
*/
- switch (hw->phy.media_type) {
- case igc_media_type_copper:
- if (!hw->mac.get_link_status)
- return true;
- hw->mac.ops.check_for_link(hw);
- link_active = !hw->mac.get_link_status;
- break;
- default:
- case igc_media_type_unknown:
- break;
- }
+ if (!hw->mac.get_link_status)
+ return true;
+ hw->mac.ops.check_for_link(hw);
+ link_active = !hw->mac.get_link_status;
- if (hw->mac.type == igc_i225 &&
- hw->phy.id == I225_I_PHY_ID) {
+ if (hw->mac.type == igc_i225) {
if (!netif_carrier_ok(adapter->netdev)) {
adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
} else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) {
@@ -4582,7 +5320,9 @@ static void igc_watchdog_task(struct work_struct *work)
adapter->tx_timeout_factor = 14;
break;
case SPEED_100:
- /* maybe add some timeout factor ? */
+ case SPEED_1000:
+ case SPEED_2500:
+ adapter->tx_timeout_factor = 7;
break;
}
@@ -5009,7 +5749,6 @@ static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue,
bool enable)
{
struct igc_ring *ring;
- int i;
if (queue < 0 || queue >= adapter->num_tx_queues)
return -EINVAL;
@@ -5017,17 +5756,6 @@ static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue,
ring = adapter->tx_ring[queue];
ring->launchtime_enable = enable;
- if (adapter->base_time)
- return 0;
-
- adapter->cycle_time = NSEC_PER_SEC;
-
- for (i = 0; i < adapter->num_tx_queues; i++) {
- ring = adapter->tx_ring[i];
- ring->start_time = 0;
- ring->end_time = NSEC_PER_SEC;
- }
-
return 0;
}
@@ -5072,7 +5800,7 @@ static bool validate_schedule(struct igc_adapter *adapter,
if (e->command != TC_TAPRIO_CMD_SET_GATES)
return false;
- for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
+ for (i = 0; i < adapter->num_tx_queues; i++) {
if (e->gate_mask & BIT(i))
queue_uses[i]++;
@@ -5100,16 +5828,31 @@ static int igc_tsn_enable_launchtime(struct igc_adapter *adapter,
return igc_tsn_offload_apply(adapter);
}
+static int igc_tsn_clear_schedule(struct igc_adapter *adapter)
+{
+ int i;
+
+ adapter->base_time = 0;
+ adapter->cycle_time = NSEC_PER_SEC;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct igc_ring *ring = adapter->tx_ring[i];
+
+ ring->start_time = 0;
+ ring->end_time = NSEC_PER_SEC;
+ }
+
+ return 0;
+}
+
static int igc_save_qbv_schedule(struct igc_adapter *adapter,
struct tc_taprio_qopt_offload *qopt)
{
u32 start_time = 0, end_time = 0;
size_t n;
- if (!qopt->enable) {
- adapter->base_time = 0;
- return 0;
- }
+ if (!qopt->enable)
+ return igc_tsn_clear_schedule(adapter);
if (adapter->base_time)
return -EALREADY;
@@ -5129,7 +5872,7 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
end_time += e->interval;
- for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
+ for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *ring = adapter->tx_ring[i];
if (!(e->gate_mask & BIT(i)))
@@ -5161,6 +5904,74 @@ static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter,
return igc_tsn_offload_apply(adapter);
}
+static int igc_save_cbs_params(struct igc_adapter *adapter, int queue,
+ bool enable, int idleslope, int sendslope,
+ int hicredit, int locredit)
+{
+ bool cbs_status[IGC_MAX_SR_QUEUES] = { false };
+ struct net_device *netdev = adapter->netdev;
+ struct igc_ring *ring;
+ int i;
+
+ /* i225 has two sets of credit-based shaper logic.
+ * Supporting it only on the top two priority queues
+ */
+ if (queue < 0 || queue > 1)
+ return -EINVAL;
+
+ ring = adapter->tx_ring[queue];
+
+ for (i = 0; i < IGC_MAX_SR_QUEUES; i++)
+ if (adapter->tx_ring[i])
+ cbs_status[i] = adapter->tx_ring[i]->cbs_enable;
+
+ /* CBS should be enabled on the highest priority queue first in order
+ * for the CBS algorithm to operate as intended.
+ */
+ if (enable) {
+ if (queue == 1 && !cbs_status[0]) {
+ netdev_err(netdev,
+ "Enabling CBS on queue1 before queue0\n");
+ return -EINVAL;
+ }
+ } else {
+ if (queue == 0 && cbs_status[1]) {
+ netdev_err(netdev,
+ "Disabling CBS on queue0 before queue1\n");
+ return -EINVAL;
+ }
+ }
+
+ ring->cbs_enable = enable;
+ ring->idleslope = idleslope;
+ ring->sendslope = sendslope;
+ ring->hicredit = hicredit;
+ ring->locredit = locredit;
+
+ return 0;
+}
+
+static int igc_tsn_enable_cbs(struct igc_adapter *adapter,
+ struct tc_cbs_qopt_offload *qopt)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int err;
+
+ if (hw->mac.type != igc_i225)
+ return -EOPNOTSUPP;
+
+ if (qopt->queue < 0 || qopt->queue > 1)
+ return -EINVAL;
+
+ err = igc_save_cbs_params(adapter, qopt->queue, qopt->enable,
+ qopt->idleslope, qopt->sendslope,
+ qopt->hicredit, qopt->locredit);
+ if (err)
+ return err;
+
+ return igc_tsn_offload_apply(adapter);
+}
+
static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
@@ -5173,6 +5984,9 @@ static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
case TC_SETUP_QDISC_ETF:
return igc_tsn_enable_launchtime(adapter, type_data);
+ case TC_SETUP_QDISC_CBS:
+ return igc_tsn_enable_cbs(adapter, type_data);
+
default:
return -EOPNOTSUPP;
}
@@ -5185,6 +5999,9 @@ static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf)
switch (bpf->command) {
case XDP_SETUP_PROG:
return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack);
+ case XDP_SETUP_XSK_POOL:
+ return igc_xdp_setup_pool(adapter, bpf->xsk.pool,
+ bpf->xsk.queue_id);
default:
return -EOPNOTSUPP;
}
@@ -5230,6 +6047,43 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
return num_frames - drops;
}
+static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter,
+ struct igc_q_vector *q_vector)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 eics = 0;
+
+ eics |= q_vector->eims_value;
+ wr32(IGC_EICS, eics);
+}
+
+int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
+{
+ struct igc_adapter *adapter = netdev_priv(dev);
+ struct igc_q_vector *q_vector;
+ struct igc_ring *ring;
+
+ if (test_bit(__IGC_DOWN, &adapter->state))
+ return -ENETDOWN;
+
+ if (!igc_xdp_is_enabled(adapter))
+ return -ENXIO;
+
+ if (queue_id >= adapter->num_rx_queues)
+ return -EINVAL;
+
+ ring = adapter->rx_ring[queue_id];
+
+ if (!ring->xsk_pool)
+ return -ENXIO;
+
+ q_vector = adapter->q_vector[queue_id];
+ if (!napi_if_scheduled_mark_missed(&q_vector->napi))
+ igc_trigger_rxtxq_interrupt(adapter, q_vector);
+
+ return 0;
+}
+
static const struct net_device_ops igc_netdev_ops = {
.ndo_open = igc_open,
.ndo_stop = igc_close,
@@ -5241,10 +6095,11 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_fix_features = igc_fix_features,
.ndo_set_features = igc_set_features,
.ndo_features_check = igc_features_check,
- .ndo_do_ioctl = igc_ioctl,
+ .ndo_eth_ioctl = igc_ioctl,
.ndo_setup_tc = igc_setup_tc,
.ndo_bpf = igc_bpf,
.ndo_xdp_xmit = igc_xdp_xmit,
+ .ndo_xsk_wakeup = igc_xsk_wakeup,
};
/* PCIe configuration access */
@@ -5401,6 +6256,10 @@ static int igc_probe(struct pci_dev *pdev,
pci_enable_pcie_error_reporting(pdev);
+ err = pci_enable_ptm(pdev, NULL);
+ if (err < 0)
+ dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n");
+
pci_set_master(pdev);
err = -ENOMEM;
@@ -5484,11 +6343,15 @@ static int igc_probe(struct pci_dev *pdev,
/* copy netdev features into list of user selectable features */
netdev->hw_features |= NETIF_F_NTUPLE;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
netdev->hw_features |= netdev->features;
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= netdev->features;
+
/* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
@@ -5550,6 +6413,8 @@ static int igc_probe(struct pci_dev *pdev,
igc_ptp_init(adapter);
+ igc_tsn_clear_schedule(adapter);
+
/* reset the hardware with the new settings */
igc_reset(adapter);
@@ -5594,6 +6459,7 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
+ pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
@@ -5997,6 +6863,61 @@ struct net_device *igc_get_hw_dev(struct igc_hw *hw)
return adapter->netdev;
}
+static void igc_disable_rx_ring_hw(struct igc_ring *ring)
+{
+ struct igc_hw *hw = &ring->q_vector->adapter->hw;
+ u8 idx = ring->reg_idx;
+ u32 rxdctl;
+
+ rxdctl = rd32(IGC_RXDCTL(idx));
+ rxdctl &= ~IGC_RXDCTL_QUEUE_ENABLE;
+ rxdctl |= IGC_RXDCTL_SWFLUSH;
+ wr32(IGC_RXDCTL(idx), rxdctl);
+}
+
+void igc_disable_rx_ring(struct igc_ring *ring)
+{
+ igc_disable_rx_ring_hw(ring);
+ igc_clean_rx_ring(ring);
+}
+
+void igc_enable_rx_ring(struct igc_ring *ring)
+{
+ struct igc_adapter *adapter = ring->q_vector->adapter;
+
+ igc_configure_rx_ring(adapter, ring);
+
+ if (ring->xsk_pool)
+ igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring));
+ else
+ igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
+}
+
+static void igc_disable_tx_ring_hw(struct igc_ring *ring)
+{
+ struct igc_hw *hw = &ring->q_vector->adapter->hw;
+ u8 idx = ring->reg_idx;
+ u32 txdctl;
+
+ txdctl = rd32(IGC_TXDCTL(idx));
+ txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE;
+ txdctl |= IGC_TXDCTL_SWFLUSH;
+ wr32(IGC_TXDCTL(idx), txdctl);
+}
+
+void igc_disable_tx_ring(struct igc_ring *ring)
+{
+ igc_disable_tx_ring_hw(ring);
+ igc_clean_tx_ring(ring);
+}
+
+void igc_enable_tx_ring(struct igc_ring *ring)
+{
+ struct igc_adapter *adapter = ring->q_vector->adapter;
+
+ igc_configure_tx_ring(adapter, ring);
+}
+
/**
* igc_init_module - Driver Registration Routine
*
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
index 83aeb5e7076f..5cad31c3c7b0 100644
--- a/drivers/net/ethernet/intel/igc/igc_phy.c
+++ b/drivers/net/ethernet/intel/igc/igc_phy.c
@@ -249,8 +249,7 @@ static s32 igc_phy_setup_autoneg(struct igc_hw *hw)
return ret_val;
}
- if ((phy->autoneg_mask & ADVERTISE_2500_FULL) &&
- hw->phy.id == I225_I_PHY_ID) {
+ if (phy->autoneg_mask & ADVERTISE_2500_FULL) {
/* Read the MULTI GBT AN Control Register - reg 7.32 */
ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK <<
MMD_DEVADDR_SHIFT) |
@@ -390,8 +389,7 @@ static s32 igc_phy_setup_autoneg(struct igc_hw *hw)
ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL,
mii_1000t_ctrl_reg);
- if ((phy->autoneg_mask & ADVERTISE_2500_FULL) &&
- hw->phy.id == I225_I_PHY_ID)
+ if (phy->autoneg_mask & ADVERTISE_2500_FULL)
ret_val = phy->ops.write_reg(hw,
(STANDARD_AN_REG_MASK <<
MMD_DEVADDR_SHIFT) |
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 69617d2c1be2..0f021909b430 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -9,6 +9,8 @@
#include <linux/ptp_classify.h>
#include <linux/clocksource.h>
#include <linux/ktime.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
#define INCVALUE_MASK 0x7fffffff
#define ISGN 0x80000000
@@ -16,6 +18,9 @@
#define IGC_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9)
#define IGC_PTP_TX_TIMEOUT (HZ * 15)
+#define IGC_PTM_STAT_SLEEP 2
+#define IGC_PTM_STAT_TIMEOUT 100
+
/* SYSTIM read access for I225 */
void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts)
{
@@ -752,6 +757,147 @@ int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
-EFAULT : 0;
}
+/* The two conditions below must be met for cross timestamping via
+ * PCIe PTM:
+ *
+ * 1. We have an way to convert the timestamps in the PTM messages
+ * to something related to the system clocks (right now, only
+ * X86 systems with support for the Always Running Timer allow that);
+ *
+ * 2. We have PTM enabled in the path from the device to the PCIe root port.
+ */
+static bool igc_is_crosststamp_supported(struct igc_adapter *adapter)
+{
+ return IS_ENABLED(CONFIG_X86_TSC) ? pcie_ptm_enabled(adapter->pdev) : false;
+}
+
+static struct system_counterval_t igc_device_tstamp_to_system(u64 tstamp)
+{
+#if IS_ENABLED(CONFIG_X86_TSC)
+ return convert_art_ns_to_tsc(tstamp);
+#else
+ return (struct system_counterval_t) { };
+#endif
+}
+
+static void igc_ptm_log_error(struct igc_adapter *adapter, u32 ptm_stat)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ switch (ptm_stat) {
+ case IGC_PTM_STAT_RET_ERR:
+ netdev_err(netdev, "PTM Error: Root port timeout\n");
+ break;
+ case IGC_PTM_STAT_BAD_PTM_RES:
+ netdev_err(netdev, "PTM Error: Bad response, PTM Response Data expected\n");
+ break;
+ case IGC_PTM_STAT_T4M1_OVFL:
+ netdev_err(netdev, "PTM Error: T4 minus T1 overflow\n");
+ break;
+ case IGC_PTM_STAT_ADJUST_1ST:
+ netdev_err(netdev, "PTM Error: 1588 timer adjusted during first PTM cycle\n");
+ break;
+ case IGC_PTM_STAT_ADJUST_CYC:
+ netdev_err(netdev, "PTM Error: 1588 timer adjusted during non-first PTM cycle\n");
+ break;
+ default:
+ netdev_err(netdev, "PTM Error: Unknown error (%#x)\n", ptm_stat);
+ break;
+ }
+}
+
+static int igc_phc_get_syncdevicetime(ktime_t *device,
+ struct system_counterval_t *system,
+ void *ctx)
+{
+ u32 stat, t2_curr_h, t2_curr_l, ctrl;
+ struct igc_adapter *adapter = ctx;
+ struct igc_hw *hw = &adapter->hw;
+ int err, count = 100;
+ ktime_t t1, t2_curr;
+
+ /* Get a snapshot of system clocks to use as historic value. */
+ ktime_get_snapshot(&adapter->snapshot);
+
+ do {
+ /* Doing this in a loop because in the event of a
+ * badly timed (ha!) system clock adjustment, we may
+ * get PTM errors from the PCI root, but these errors
+ * are transitory. Repeating the process returns valid
+ * data eventually.
+ */
+
+ /* To "manually" start the PTM cycle we need to clear and
+ * then set again the TRIG bit.
+ */
+ ctrl = rd32(IGC_PTM_CTRL);
+ ctrl &= ~IGC_PTM_CTRL_TRIG;
+ wr32(IGC_PTM_CTRL, ctrl);
+ ctrl |= IGC_PTM_CTRL_TRIG;
+ wr32(IGC_PTM_CTRL, ctrl);
+
+ /* The cycle only starts "for real" when software notifies
+ * that it has read the registers, this is done by setting
+ * VALID bit.
+ */
+ wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID);
+
+ err = readx_poll_timeout(rd32, IGC_PTM_STAT, stat,
+ stat, IGC_PTM_STAT_SLEEP,
+ IGC_PTM_STAT_TIMEOUT);
+ if (err < 0) {
+ netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n");
+ return err;
+ }
+
+ if ((stat & IGC_PTM_STAT_VALID) == IGC_PTM_STAT_VALID)
+ break;
+
+ if (stat & ~IGC_PTM_STAT_VALID) {
+ /* An error occurred, log it. */
+ igc_ptm_log_error(adapter, stat);
+ /* The STAT register is write-1-to-clear (W1C),
+ * so write the previous error status to clear it.
+ */
+ wr32(IGC_PTM_STAT, stat);
+ continue;
+ }
+ } while (--count);
+
+ if (!count) {
+ netdev_err(adapter->netdev, "Exceeded number of tries for PTM cycle\n");
+ return -ETIMEDOUT;
+ }
+
+ t1 = ktime_set(rd32(IGC_PTM_T1_TIM0_H), rd32(IGC_PTM_T1_TIM0_L));
+
+ t2_curr_l = rd32(IGC_PTM_CURR_T2_L);
+ t2_curr_h = rd32(IGC_PTM_CURR_T2_H);
+
+ /* FIXME: When the register that tells the endianness of the
+ * PTM registers are implemented, check them here and add the
+ * appropriate conversion.
+ */
+ t2_curr_h = swab32(t2_curr_h);
+
+ t2_curr = ((s64)t2_curr_h << 32 | t2_curr_l);
+
+ *device = t1;
+ *system = igc_device_tstamp_to_system(t2_curr);
+
+ return 0;
+}
+
+static int igc_ptp_getcrosststamp(struct ptp_clock_info *ptp,
+ struct system_device_crosststamp *cts)
+{
+ struct igc_adapter *adapter = container_of(ptp, struct igc_adapter,
+ ptp_caps);
+
+ return get_device_system_crosststamp(igc_phc_get_syncdevicetime,
+ adapter, &adapter->snapshot, cts);
+}
+
/**
* igc_ptp_init - Initialize PTP functionality
* @adapter: Board private structure
@@ -788,6 +934,11 @@ void igc_ptp_init(struct igc_adapter *adapter)
adapter->ptp_caps.n_per_out = IGC_N_PEROUT;
adapter->ptp_caps.n_pins = IGC_N_SDP;
adapter->ptp_caps.verify = igc_ptp_verify_pin;
+
+ if (!igc_is_crosststamp_supported(adapter))
+ break;
+
+ adapter->ptp_caps.getcrosststamp = igc_ptp_getcrosststamp;
break;
default:
adapter->ptp_clock = NULL;
@@ -849,7 +1000,8 @@ void igc_ptp_suspend(struct igc_adapter *adapter)
adapter->ptp_tx_skb = NULL;
clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
- igc_ptp_time_save(adapter);
+ if (pci_device_is_present(adapter->pdev))
+ igc_ptp_time_save(adapter);
}
/**
@@ -878,7 +1030,9 @@ void igc_ptp_stop(struct igc_adapter *adapter)
void igc_ptp_reset(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
+ u32 cycle_ctrl, ctrl;
unsigned long flags;
+ u32 timadj;
/* reset the tstamp_config */
igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
@@ -887,12 +1041,38 @@ void igc_ptp_reset(struct igc_adapter *adapter)
switch (adapter->hw.mac.type) {
case igc_i225:
+ timadj = rd32(IGC_TIMADJ);
+ timadj |= IGC_TIMADJ_ADJUST_METH;
+ wr32(IGC_TIMADJ, timadj);
+
wr32(IGC_TSAUXC, 0x0);
wr32(IGC_TSSDP, 0x0);
wr32(IGC_TSIM,
IGC_TSICR_INTERRUPTS |
(adapter->pps_sys_wrap_on ? IGC_TSICR_SYS_WRAP : 0));
wr32(IGC_IMS, IGC_IMS_TS);
+
+ if (!igc_is_crosststamp_supported(adapter))
+ break;
+
+ wr32(IGC_PCIE_DIG_DELAY, IGC_PCIE_DIG_DELAY_DEFAULT);
+ wr32(IGC_PCIE_PHY_DELAY, IGC_PCIE_PHY_DELAY_DEFAULT);
+
+ cycle_ctrl = IGC_PTM_CYCLE_CTRL_CYC_TIME(IGC_PTM_CYC_TIME_DEFAULT);
+
+ wr32(IGC_PTM_CYCLE_CTRL, cycle_ctrl);
+
+ ctrl = IGC_PTM_CTRL_EN |
+ IGC_PTM_CTRL_START_NOW |
+ IGC_PTM_CTRL_SHRT_CYC(IGC_PTM_SHORT_CYC_DEFAULT) |
+ IGC_PTM_CTRL_PTM_TO(IGC_PTM_TIMEOUT_DEFAULT) |
+ IGC_PTM_CTRL_TRIG;
+
+ wr32(IGC_PTM_CTRL, ctrl);
+
+ /* Force the first cycle to run. */
+ wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID);
+
break;
default:
/* No work to do. */
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index cc174853554b..e197a33d93a0 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -10,8 +10,8 @@
#define IGC_EECD 0x00010 /* EEPROM/Flash Control - RW */
#define IGC_CTRL_EXT 0x00018 /* Extended Device Control - RW */
#define IGC_MDIC 0x00020 /* MDI Control - RW */
-#define IGC_MDICNFG 0x00E04 /* MDC/MDIO Configuration - RW */
#define IGC_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
+#define IGC_VET 0x00038 /* VLAN Ether Type - RW */
#define IGC_I225_PHPM 0x00E14 /* I225 PHY Power Management */
#define IGC_GPHY_VERSION 0x0001E /* I225 gPHY Firmware Version */
@@ -67,6 +67,9 @@
/* Filtering Registers */
#define IGC_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
+#define IGC_FHFT(_n) (0x09000 + (256 * (_n))) /* Flexible Host Filter */
+#define IGC_FHFT_EXT(_n) (0x09A00 + (256 * (_n))) /* Flexible Host Filter Extended */
+#define IGC_FHFTSL 0x05804 /* Flex Filter indirect table select */
/* ETQF register bit definitions */
#define IGC_ETQF_FILTER_ENABLE BIT(26)
@@ -75,6 +78,19 @@
#define IGC_ETQF_QUEUE_MASK 0x00070000
#define IGC_ETQF_ETYPE_MASK 0x0000FFFF
+/* FHFT register bit definitions */
+#define IGC_FHFT_LENGTH_MASK GENMASK(7, 0)
+#define IGC_FHFT_QUEUE_SHIFT 8
+#define IGC_FHFT_QUEUE_MASK GENMASK(10, 8)
+#define IGC_FHFT_PRIO_SHIFT 16
+#define IGC_FHFT_PRIO_MASK GENMASK(18, 16)
+#define IGC_FHFT_IMM_INT BIT(24)
+#define IGC_FHFT_DROP BIT(25)
+
+/* FHFTSL register bit definitions */
+#define IGC_FHFTSL_FTSL_SHIFT 0
+#define IGC_FHFTSL_FTSL_MASK GENMASK(1, 0)
+
/* Redirection Table - RW Array */
#define IGC_RETA(_i) (0x05C00 + ((_i) * 4))
/* RSS Random Key - RW Array */
@@ -220,6 +236,9 @@
#define IGC_ENDQT(_n) (0x3334 + 0x4 * (_n))
#define IGC_DTXMXPKTSZ 0x355C
+#define IGC_TQAVCC(_n) (0x3004 + ((_n) * 0x40))
+#define IGC_TQAVHC(_n) (0x300C + ((_n) * 0x40))
+
/* System Time Registers */
#define IGC_SYSTIML 0x0B600 /* System time register Low - RO */
#define IGC_SYSTIMH 0x0B604 /* System time register High - RO */
@@ -229,6 +248,29 @@
#define IGC_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */
#define IGC_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */
+#define IGC_TIMADJ 0x0B60C /* Time Adjustment Offset Register */
+
+/* PCIe Registers */
+#define IGC_PTM_CTRL 0x12540 /* PTM Control */
+#define IGC_PTM_STAT 0x12544 /* PTM Status */
+#define IGC_PTM_CYCLE_CTRL 0x1254C /* PTM Cycle Control */
+
+/* PTM Time registers */
+#define IGC_PTM_T1_TIM0_L 0x12558 /* T1 on Timer 0 Low */
+#define IGC_PTM_T1_TIM0_H 0x1255C /* T1 on Timer 0 High */
+
+#define IGC_PTM_CURR_T2_L 0x1258C /* Current T2 Low */
+#define IGC_PTM_CURR_T2_H 0x12590 /* Current T2 High */
+#define IGC_PTM_PREV_T2_L 0x12584 /* Previous T2 Low */
+#define IGC_PTM_PREV_T2_H 0x12588 /* Previous T2 High */
+#define IGC_PTM_PREV_T4M1 0x12578 /* T4 Minus T1 on previous PTM Cycle */
+#define IGC_PTM_CURR_T4M1 0x1257C /* T4 Minus T1 on this PTM Cycle */
+#define IGC_PTM_PREV_T3M2 0x12580 /* T3 Minus T2 on previous PTM Cycle */
+#define IGC_PTM_TDELAY 0x12594 /* PTM PCIe Link Delay */
+
+#define IGC_PCIE_DIG_DELAY 0x12550 /* PCIe Digital Delay */
+#define IGC_PCIE_PHY_DELAY 0x12554 /* PCIe PHY Delay */
+
/* Management registers */
#define IGC_MANC 0x05820 /* Management Control - RW */
@@ -240,6 +282,7 @@
#define IGC_WUFC 0x05808 /* Wakeup Filter Control - RW */
#define IGC_WUS 0x05810 /* Wakeup Status - R/W1C */
#define IGC_WUPL 0x05900 /* Wakeup Packet Length - RW */
+#define IGC_WUFC_EXT 0x0580C /* Wakeup Filter Control Register Extended - RW */
/* Wake Up packet memory */
#define IGC_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c
index 174103c4bea6..0fce22de2ab8 100644
--- a/drivers/net/ethernet/intel/igc/igc_tsn.c
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.c
@@ -18,8 +18,38 @@ static bool is_any_launchtime(struct igc_adapter *adapter)
return false;
}
+static bool is_cbs_enabled(struct igc_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct igc_ring *ring = adapter->tx_ring[i];
+
+ if (ring->cbs_enable)
+ return true;
+ }
+
+ return false;
+}
+
+static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter)
+{
+ unsigned int new_flags = adapter->flags & ~IGC_FLAG_TSN_ANY_ENABLED;
+
+ if (adapter->base_time)
+ new_flags |= IGC_FLAG_TSN_QBV_ENABLED;
+
+ if (is_any_launchtime(adapter))
+ new_flags |= IGC_FLAG_TSN_QBV_ENABLED;
+
+ if (is_cbs_enabled(adapter))
+ new_flags |= IGC_FLAG_TSN_QAV_ENABLED;
+
+ return new_flags;
+}
+
/* Returns the TSN specific registers to their default values after
- * TSN offloading is disabled.
+ * the adapter is reset.
*/
static int igc_tsn_disable_offload(struct igc_adapter *adapter)
{
@@ -27,11 +57,6 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
u32 tqavctrl;
int i;
- if (!(adapter->flags & IGC_FLAG_TSN_QBV_ENABLED))
- return 0;
-
- adapter->cycle_time = 0;
-
wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT);
@@ -41,18 +66,12 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
wr32(IGC_TQAVCTRL, tqavctrl);
for (i = 0; i < adapter->num_tx_queues; i++) {
- struct igc_ring *ring = adapter->tx_ring[i];
-
- ring->start_time = 0;
- ring->end_time = 0;
- ring->launchtime_enable = false;
-
wr32(IGC_TXQCTL(i), 0);
wr32(IGC_STQT(i), 0);
wr32(IGC_ENDQT(i), NSEC_PER_SEC);
}
- wr32(IGC_QBVCYCLET_S, NSEC_PER_SEC);
+ wr32(IGC_QBVCYCLET_S, 0);
wr32(IGC_QBVCYCLET, NSEC_PER_SEC);
adapter->flags &= ~IGC_FLAG_TSN_QBV_ENABLED;
@@ -68,9 +87,6 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
ktime_t base_time, systim;
int i;
- if (adapter->flags & IGC_FLAG_TSN_QBV_ENABLED)
- return 0;
-
cycle = adapter->cycle_time;
base_time = adapter->base_time;
@@ -88,6 +104,8 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *ring = adapter->tx_ring[i];
u32 txqctl = 0;
+ u16 cbs_value;
+ u32 tqavcc;
wr32(IGC_STQT(i), ring->start_time);
wr32(IGC_ENDQT(i), ring->end_time);
@@ -105,6 +123,90 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
if (ring->launchtime_enable)
txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT;
+ /* Skip configuring CBS for Q2 and Q3 */
+ if (i > 1)
+ goto skip_cbs;
+
+ if (ring->cbs_enable) {
+ if (i == 0)
+ txqctl |= IGC_TXQCTL_QAV_SEL_CBS0;
+ else
+ txqctl |= IGC_TXQCTL_QAV_SEL_CBS1;
+
+ /* According to i225 datasheet section 7.5.2.7, we
+ * should set the 'idleSlope' field from TQAVCC
+ * register following the equation:
+ *
+ * value = link-speed 0x7736 * BW * 0.2
+ * ---------- * ----------------- (E1)
+ * 100Mbps 2.5
+ *
+ * Note that 'link-speed' is in Mbps.
+ *
+ * 'BW' is the percentage bandwidth out of full
+ * link speed which can be found with the
+ * following equation. Note that idleSlope here
+ * is the parameter from this function
+ * which is in kbps.
+ *
+ * BW = idleSlope
+ * ----------------- (E2)
+ * link-speed * 1000
+ *
+ * That said, we can come up with a generic
+ * equation to calculate the value we should set
+ * it TQAVCC register by replacing 'BW' in E1 by E2.
+ * The resulting equation is:
+ *
+ * value = link-speed * 0x7736 * idleSlope * 0.2
+ * ------------------------------------- (E3)
+ * 100 * 2.5 * link-speed * 1000
+ *
+ * 'link-speed' is present in both sides of the
+ * fraction so it is canceled out. The final
+ * equation is the following:
+ *
+ * value = idleSlope * 61036
+ * ----------------- (E4)
+ * 2500000
+ *
+ * NOTE: For i225, given the above, we can see
+ * that idleslope is represented in
+ * 40.959433 kbps units by the value at
+ * the TQAVCC register (2.5Gbps / 61036),
+ * which reduces the granularity for
+ * idleslope increments.
+ *
+ * In i225 controller, the sendSlope and loCredit
+ * parameters from CBS are not configurable
+ * by software so we don't do any
+ * 'controller configuration' in respect to
+ * these parameters.
+ */
+ cbs_value = DIV_ROUND_UP_ULL(ring->idleslope
+ * 61036ULL, 2500000);
+
+ tqavcc = rd32(IGC_TQAVCC(i));
+ tqavcc &= ~IGC_TQAVCC_IDLESLOPE_MASK;
+ tqavcc |= cbs_value | IGC_TQAVCC_KEEP_CREDITS;
+ wr32(IGC_TQAVCC(i), tqavcc);
+
+ wr32(IGC_TQAVHC(i),
+ 0x80000000 + ring->hicredit * 0x7735);
+ } else {
+ /* Disable any CBS for the queue */
+ txqctl &= ~(IGC_TXQCTL_QAV_SEL_MASK);
+
+ /* Set idleSlope to zero. */
+ tqavcc = rd32(IGC_TQAVCC(i));
+ tqavcc &= ~(IGC_TQAVCC_IDLESLOPE_MASK |
+ IGC_TQAVCC_KEEP_CREDITS);
+ wr32(IGC_TQAVCC(i), tqavcc);
+
+ /* Set hiCredit to zero. */
+ wr32(IGC_TQAVHC(i), 0);
+ }
+skip_cbs:
wr32(IGC_TXQCTL(i), txqctl);
}
@@ -125,33 +227,41 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
wr32(IGC_BASET_H, baset_h);
wr32(IGC_BASET_L, baset_l);
- adapter->flags |= IGC_FLAG_TSN_QBV_ENABLED;
-
return 0;
}
-int igc_tsn_offload_apply(struct igc_adapter *adapter)
+int igc_tsn_reset(struct igc_adapter *adapter)
{
- bool is_any_enabled = adapter->base_time || is_any_launchtime(adapter);
+ unsigned int new_flags;
+ int err = 0;
- if (!(adapter->flags & IGC_FLAG_TSN_QBV_ENABLED) && !is_any_enabled)
- return 0;
+ new_flags = igc_tsn_new_flags(adapter);
- if (!is_any_enabled) {
- int err = igc_tsn_disable_offload(adapter);
+ if (!(new_flags & IGC_FLAG_TSN_ANY_ENABLED))
+ return igc_tsn_disable_offload(adapter);
- if (err < 0)
- return err;
+ err = igc_tsn_enable_offload(adapter);
+ if (err < 0)
+ return err;
- /* The BASET registers aren't cleared when writing
- * into them, force a reset if the interface is
- * running.
- */
- if (netif_running(adapter->netdev))
- schedule_work(&adapter->reset_task);
+ adapter->flags = new_flags;
+ return err;
+}
+
+int igc_tsn_offload_apply(struct igc_adapter *adapter)
+{
+ int err;
+
+ if (netif_running(adapter->netdev)) {
+ schedule_work(&adapter->reset_task);
return 0;
}
- return igc_tsn_enable_offload(adapter);
+ err = igc_tsn_enable_offload(adapter);
+ if (err < 0)
+ return err;
+
+ adapter->flags = igc_tsn_new_flags(adapter);
+ return 0;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.h b/drivers/net/ethernet/intel/igc/igc_tsn.h
index f76bc86ddccd..1512307f5a52 100644
--- a/drivers/net/ethernet/intel/igc/igc_tsn.h
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.h
@@ -5,5 +5,6 @@
#define _IGC_TSN_H_
int igc_tsn_offload_apply(struct igc_adapter *adapter);
+int igc_tsn_reset(struct igc_adapter *adapter);
#endif /* _IGC_BASE_H */
diff --git a/drivers/net/ethernet/intel/igc/igc_xdp.c b/drivers/net/ethernet/intel/igc/igc_xdp.c
index 11133c4619bb..a8cf5374be47 100644
--- a/drivers/net/ethernet/intel/igc/igc_xdp.c
+++ b/drivers/net/ethernet/intel/igc/igc_xdp.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020, Intel Corporation. */
+#include <net/xdp_sock_drv.h>
+
#include "igc.h"
#include "igc_xdp.h"
@@ -32,29 +34,112 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
return 0;
}
-int igc_xdp_register_rxq_info(struct igc_ring *ring)
+static int igc_xdp_enable_pool(struct igc_adapter *adapter,
+ struct xsk_buff_pool *pool, u16 queue_id)
{
- struct net_device *dev = ring->netdev;
+ struct net_device *ndev = adapter->netdev;
+ struct device *dev = &adapter->pdev->dev;
+ struct igc_ring *rx_ring, *tx_ring;
+ struct napi_struct *napi;
+ bool needs_reset;
+ u32 frame_size;
int err;
- err = xdp_rxq_info_reg(&ring->xdp_rxq, dev, ring->queue_index, 0);
- if (err) {
- netdev_err(dev, "Failed to register xdp rxq info\n");
- return err;
+ if (queue_id >= adapter->num_rx_queues ||
+ queue_id >= adapter->num_tx_queues)
+ return -EINVAL;
+
+ frame_size = xsk_pool_get_rx_frame_size(pool);
+ if (frame_size < ETH_FRAME_LEN + VLAN_HLEN * 2) {
+ /* When XDP is enabled, the driver doesn't support frames that
+ * span over multiple buffers. To avoid that, we check if xsk
+ * frame size is big enough to fit the max ethernet frame size
+ * + vlan double tagging.
+ */
+ return -EOPNOTSUPP;
}
- err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_PAGE_SHARED,
- NULL);
+ err = xsk_pool_dma_map(pool, dev, IGC_RX_DMA_ATTR);
if (err) {
- netdev_err(dev, "Failed to register xdp rxq mem model\n");
- xdp_rxq_info_unreg(&ring->xdp_rxq);
+ netdev_err(ndev, "Failed to map xsk pool\n");
return err;
}
+ needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter);
+
+ rx_ring = adapter->rx_ring[queue_id];
+ tx_ring = adapter->tx_ring[queue_id];
+ /* Rx and Tx rings share the same napi context. */
+ napi = &rx_ring->q_vector->napi;
+
+ if (needs_reset) {
+ igc_disable_rx_ring(rx_ring);
+ igc_disable_tx_ring(tx_ring);
+ napi_disable(napi);
+ }
+
+ set_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
+ set_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags);
+
+ if (needs_reset) {
+ napi_enable(napi);
+ igc_enable_rx_ring(rx_ring);
+ igc_enable_tx_ring(tx_ring);
+
+ err = igc_xsk_wakeup(ndev, queue_id, XDP_WAKEUP_RX);
+ if (err) {
+ xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int igc_xdp_disable_pool(struct igc_adapter *adapter, u16 queue_id)
+{
+ struct igc_ring *rx_ring, *tx_ring;
+ struct xsk_buff_pool *pool;
+ struct napi_struct *napi;
+ bool needs_reset;
+
+ if (queue_id >= adapter->num_rx_queues ||
+ queue_id >= adapter->num_tx_queues)
+ return -EINVAL;
+
+ pool = xsk_get_pool_from_qid(adapter->netdev, queue_id);
+ if (!pool)
+ return -EINVAL;
+
+ needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter);
+
+ rx_ring = adapter->rx_ring[queue_id];
+ tx_ring = adapter->tx_ring[queue_id];
+ /* Rx and Tx rings share the same napi context. */
+ napi = &rx_ring->q_vector->napi;
+
+ if (needs_reset) {
+ igc_disable_rx_ring(rx_ring);
+ igc_disable_tx_ring(tx_ring);
+ napi_disable(napi);
+ }
+
+ xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR);
+ clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
+ clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags);
+
+ if (needs_reset) {
+ napi_enable(napi);
+ igc_enable_rx_ring(rx_ring);
+ igc_enable_tx_ring(tx_ring);
+ }
+
return 0;
}
-void igc_xdp_unregister_rxq_info(struct igc_ring *ring)
+int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool,
+ u16 queue_id)
{
- xdp_rxq_info_unreg(&ring->xdp_rxq);
+ return pool ? igc_xdp_enable_pool(adapter, pool, queue_id) :
+ igc_xdp_disable_pool(adapter, queue_id);
}
diff --git a/drivers/net/ethernet/intel/igc/igc_xdp.h b/drivers/net/ethernet/intel/igc/igc_xdp.h
index cfecb515b718..a74e5487d199 100644
--- a/drivers/net/ethernet/intel/igc/igc_xdp.h
+++ b/drivers/net/ethernet/intel/igc/igc_xdp.h
@@ -6,8 +6,12 @@
int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
struct netlink_ext_ack *extack);
+int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool,
+ u16 queue_id);
-int igc_xdp_register_rxq_info(struct igc_ring *ring);
-void igc_xdp_unregister_rxq_info(struct igc_ring *ring);
+static inline bool igc_xdp_is_enabled(struct igc_adapter *adapter)
+{
+ return !!adapter->xdp_prog;
+}
#endif /* _IGC_XDP_H_ */