aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/ti/am65-cpsw-nuss.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-05-14 19:42:24 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2024-05-14 19:42:24 -0700
commit1b294a1f35616977caddaddf3e9d28e576a1adbc (patch)
tree723a406740083006b8f8724b5c5e532d4efa431d /drivers/net/ethernet/ti/am65-cpsw-nuss.c
parentb850dc206a57ae272c639e31ac202ec0c2f46960 (diff)
parent654de42f3fc6edc29d743c1dbcd1424f7793f63d (diff)
Merge tag 'net-next-6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next
Pull networking updates from Jakub Kicinski: "Core & protocols: - Complete rework of garbage collection of AF_UNIX sockets. AF_UNIX is prone to forming reference count cycles due to fd passing functionality. New method based on Tarjan's Strongly Connected Components algorithm should be both faster and remove a lot of workarounds we accumulated over the years. - Add TCP fraglist GRO support, allowing chaining multiple TCP packets and forwarding them together. Useful for small switches / routers which lack basic checksum offload in some scenarios (e.g. PPPoE). - Support using SMP threads for handling packet backlog i.e. packet processing from software interfaces and old drivers which don't use NAPI. This helps move the processing out of the softirq jumble. - Continue work of converting from rtnl lock to RCU protection. Don't require rtnl lock when reading: IPv6 routing FIB, IPv6 address labels, netdev threaded NAPI sysfs files, bonding driver's sysfs files, MPLS devconf, IPv4 FIB rules, netns IDs, tcp metrics, TC Qdiscs, neighbor entries, ARP entries via ioctl(SIOCGARP), a lot of the link information available via rtnetlink. - Small optimizations from Eric to UDP wake up handling, memory accounting, RPS/RFS implementation, TCP packet sizing etc. - Allow direct page recycling in the bulk API used by XDP, for +2% PPS. - Support peek with an offset on TCP sockets. - Add MPTCP APIs for querying last time packets were received/sent/acked and whether MPTCP "upgrade" succeeded on a TCP socket. - Add intra-node communication shortcut to improve SMC performance. - Add IPv6 (and IPv{4,6}-over-IPv{4,6}) support to the GTP protocol driver. - Add HSR-SAN (RedBOX) mode of operation to the HSR protocol driver. - Add reset reasons for tracing what caused a TCP reset to be sent. - Introduce direction attribute for xfrm (IPSec) states. State can be used either for input or output packet processing. Things we sprinkled into general kernel code: - Add bitmap_{read,write}(), bitmap_size(), expose BYTES_TO_BITS(). This required touch-ups and renaming of a few existing users. - Add Endian-dependent __counted_by_{le,be} annotations. - Make building selftests "quieter" by printing summaries like "CC object.o" rather than full commands with all the arguments. Netfilter: - Use GFP_KERNEL to clone elements, to deal better with OOM situations and avoid failures in the .commit step. BPF: - Add eBPF JIT for ARCv2 CPUs. - Support attaching kprobe BPF programs through kprobe_multi link in a session mode, meaning, a BPF program is attached to both function entry and return, the entry program can decide if the return program gets executed and the entry program can share u64 cookie value with return program. "Session mode" is a common use-case for tetragon and bpftrace. - Add the ability to specify and retrieve BPF cookie for raw tracepoint programs in order to ease migration from classic to raw tracepoints. - Add an internal-only BPF per-CPU instruction for resolving per-CPU memory addresses and implement support in x86, ARM64 and RISC-V JITs. This allows inlining functions which need to access per-CPU state. - Optimize x86 BPF JIT's emit_mov_imm64, and add support for various atomics in bpf_arena which can be JITed as a single x86 instruction. Support BPF arena on ARM64. - Add a new bpf_wq API for deferring events and refactor process-context bpf_timer code to keep common code where possible. - Harden the BPF verifier's and/or/xor value tracking. - Introduce crypto kfuncs to let BPF programs call kernel crypto APIs. - Support bpf_tail_call_static() helper for BPF programs with GCC 13. - Add bpf_preempt_{disable,enable}() kfuncs in order to allow a BPF program to have code sections where preemption is disabled. Driver API: - Skip software TC processing completely if all installed rules are marked as HW-only, instead of checking the HW-only flag rule by rule. - Add support for configuring PoE (Power over Ethernet), similar to the already existing support for PoDL (Power over Data Line) config. - Initial bits of a queue control API, for now allowing a single queue to be reset without disturbing packet flow to other queues. - Common (ethtool) statistics for hardware timestamping. Tests and tooling: - Remove the need to create a config file to run the net forwarding tests so that a naive "make run_tests" can exercise them. - Define a method of writing tests which require an external endpoint to communicate with (to send/receive data towards the test machine). Add a few such tests. - Create a shared code library for writing Python tests. Expose the YAML Netlink library from tools/ to the tests for easy Netlink access. - Move netfilter tests under net/, extend them, separate performance tests from correctness tests, and iron out issues found by running them "on every commit". - Refactor BPF selftests to use common network helpers. - Further work filling in YAML definitions of Netlink messages for: nftables, team driver, bonding interfaces, vlan interfaces, VF info, TC u32 mark, TC police action. - Teach Python YAML Netlink to decode attribute policies. - Extend the definition of the "indexed array" construct in the specs to cover arrays of scalars rather than just nests. - Add hyperlinks between definitions in generated Netlink docs. Drivers: - Make sure unsupported flower control flags are rejected by drivers, and make more drivers report errors directly to the application rather than dmesg (large number of driver changes from Asbjørn Sloth Tønnesen). - Ethernet high-speed NICs: - Broadcom (bnxt): - support multiple RSS contexts and steering traffic to them - support XDP metadata - make page pool allocations more NUMA aware - Intel (100G, ice, idpf): - extract datapath code common among Intel drivers into a library - use fewer resources in switchdev by sharing queues with the PF - add PFCP filter support - add Ethernet filter support - use a spinlock instead of HW lock in PTP clock ops - support 5 layer Tx scheduler topology - nVidia/Mellanox: - 800G link modes and 100G SerDes speeds - per-queue IRQ coalescing configuration - Marvell Octeon: - support offloading TC packet mark action - Ethernet NICs consumer, embedded and virtual: - stop lying about skb->truesize in USB Ethernet drivers, it messes up TCP memory calculations - Google cloud vNIC: - support changing ring size via ethtool - support ring reset using the queue control API - VirtIO net: - expose flow hash from RSS to XDP - per-queue statistics - add selftests - Synopsys (stmmac): - support controllers which require an RX clock signal from the MII bus to perform their hardware initialization - TI: - icssg_prueth: support ICSSG-based Ethernet on AM65x SR1.0 devices - icssg_prueth: add SW TX / RX Coalescing based on hrtimers - cpsw: minimal XDP support - Renesas (ravb): - support describing the MDIO bus - Realtek (r8169): - add support for RTL8168M - Microchip Sparx5: - matchall and flower actions mirred and redirect - Ethernet switches: - nVidia/Mellanox: - improve events processing performance - Marvell: - add support for MV88E6250 family internal PHYs - Microchip: - add DCB and DSCP mapping support for KSZ switches - vsc73xx: convert to PHYLINK - Realtek: - rtl8226b/rtl8221b: add C45 instances and SerDes switching - Many driver changes related to PHYLIB and PHYLINK deprecated API cleanup - Ethernet PHYs: - Add a new driver for Airoha EN8811H 2.5 Gigabit PHY. - micrel: lan8814: add support for PPS out and external timestamp trigger - WiFi: - Disable Wireless Extensions (WEXT) in all Wi-Fi 7 devices drivers. Modern devices can only be configured using nl80211. - mac80211/cfg80211 - handle color change per link for WiFi 7 Multi-Link Operation - Intel (iwlwifi): - don't support puncturing in 5 GHz - support monitor mode on passive channels - BZ-W device support - P2P with HE/EHT support - re-add support for firmware API 90 - provide channel survey information for Automatic Channel Selection - MediaTek (mt76): - mt7921 LED control - mt7925 EHT radiotap support - mt7920e PCI support - Qualcomm (ath11k): - P2P support for QCA6390, WCN6855 and QCA2066 - support hibernation - ieee80211-freq-limit Device Tree property support - Qualcomm (ath12k): - refactoring in preparation of multi-link support - suspend and hibernation support - ACPI support - debugfs support, including dfs_simulate_radar support - RealTek: - rtw88: RTL8723CS SDIO device support - rtw89: RTL8922AE Wi-Fi 7 PCI device support - rtw89: complete features of new WiFi 7 chip 8922AE including BT-coexistence and Wake-on-WLAN - rtw89: use BIOS ACPI settings to set TX power and channels - rtl8xxxu: enable Management Frame Protection (MFP) support - Bluetooth: - support for Intel BlazarI and Filmore Peak2 (BE201) - support for MediaTek MT7921S SDIO - initial support for Intel PCIe BT driver - remove HCI_AMP support" * tag 'net-next-6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (1827 commits) selftests: netfilter: fix packetdrill conntrack testcase net: gro: fix napi_gro_cb zeroed alignment Bluetooth: btintel_pcie: Refactor and code cleanup Bluetooth: btintel_pcie: Fix warning reported by sparse Bluetooth: hci_core: Fix not handling hdev->le_num_of_adv_sets=1 Bluetooth: btintel: Fix compiler warning for multi_v7_defconfig config Bluetooth: btintel_pcie: Fix compiler warnings Bluetooth: btintel_pcie: Add *setup* function to download firmware Bluetooth: btintel_pcie: Add support for PCIe transport Bluetooth: btintel: Export few static functions Bluetooth: HCI: Remove HCI_AMP support Bluetooth: L2CAP: Fix div-by-zero in l2cap_le_flowctl_init() Bluetooth: qca: Fix error code in qca_read_fw_build_info() Bluetooth: hci_conn: Use __counted_by() and avoid -Wfamnae warning Bluetooth: btintel: Add support for Filmore Peak2 (BE201) Bluetooth: btintel: Add support for BlazarI LE Create Connection command timeout increased to 20 secs dt-bindings: net: bluetooth: Add MediaTek MT7921S SDIO Bluetooth Bluetooth: compute LE flow credits based on recvbuf space Bluetooth: hci_sync: Use cmd->num_cis instead of magic number ...
Diffstat (limited to 'drivers/net/ethernet/ti/am65-cpsw-nuss.c')
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c704
1 files changed, 587 insertions, 117 deletions
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 1d00e21808c1..4e50b3792888 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -5,6 +5,7 @@
*
*/
+#include <linux/bpf_trace.h>
#include <linux/clk.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
@@ -30,6 +31,7 @@
#include <linux/sys_soc.h>
#include <linux/dma/ti-cppi5.h>
#include <linux/dma/k3-udma-glue.h>
+#include <net/page_pool/helpers.h>
#include <net/switchdev.h>
#include "cpsw_ale.h"
@@ -101,6 +103,12 @@
#define AM65_CPSW_PN_TS_CTL_TX_HOST_TS_EN BIT(11)
#define AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT 16
+#define AM65_CPSW_PN_TS_CTL_RX_ANX_F_EN BIT(0)
+#define AM65_CPSW_PN_TS_CTL_RX_VLAN_LT1_EN BIT(1)
+#define AM65_CPSW_PN_TS_CTL_RX_VLAN_LT2_EN BIT(2)
+#define AM65_CPSW_PN_TS_CTL_RX_ANX_D_EN BIT(3)
+#define AM65_CPSW_PN_TS_CTL_RX_ANX_E_EN BIT(9)
+
/* AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG register fields */
#define AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT 16
@@ -124,6 +132,11 @@
AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN | \
AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN)
+#define AM65_CPSW_TS_RX_ANX_ALL_EN \
+ (AM65_CPSW_PN_TS_CTL_RX_ANX_D_EN | \
+ AM65_CPSW_PN_TS_CTL_RX_ANX_E_EN | \
+ AM65_CPSW_PN_TS_CTL_RX_ANX_F_EN)
+
#define AM65_CPSW_ALE_AGEOUT_DEFAULT 30
/* Number of TX/RX descriptors */
#define AM65_CPSW_MAX_TX_DESC 500
@@ -138,6 +151,18 @@
#define AM65_CPSW_DEFAULT_TX_CHNS 8
+/* CPPI streaming packet interface */
+#define AM65_CPSW_CPPI_TX_FLOW_ID 0x3FFF
+#define AM65_CPSW_CPPI_TX_PKT_TYPE 0x7
+
+/* XDP */
+#define AM65_CPSW_XDP_CONSUMED 2
+#define AM65_CPSW_XDP_REDIRECT 1
+#define AM65_CPSW_XDP_PASS 0
+
+/* Include headroom compatible with both skb and xdpf */
+#define AM65_CPSW_HEADROOM (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN)
+
static void am65_cpsw_port_set_sl_mac(struct am65_cpsw_port *slave,
const u8 *dev_addr)
{
@@ -305,12 +330,11 @@ static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev,
}
static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
- struct sk_buff *skb)
+ struct page *page)
{
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
struct cppi5_host_desc_t *desc_rx;
struct device *dev = common->dev;
- u32 pkt_len = skb_tailroom(skb);
dma_addr_t desc_dma;
dma_addr_t buf_dma;
void *swdata;
@@ -322,20 +346,22 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
}
desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
- buf_dma = dma_map_single(rx_chn->dma_dev, skb->data, pkt_len,
- DMA_FROM_DEVICE);
+ buf_dma = dma_map_single(rx_chn->dma_dev,
+ page_address(page) + AM65_CPSW_HEADROOM,
+ AM65_CPSW_MAX_PACKET_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) {
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
- dev_err(dev, "Failed to map rx skb buffer\n");
+ dev_err(dev, "Failed to map rx buffer\n");
return -EINVAL;
}
cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
AM65_CPSW_NAV_PS_DATA_SIZE);
k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
- cppi5_hdesc_attach_buf(desc_rx, buf_dma, skb_tailroom(skb), buf_dma, skb_tailroom(skb));
+ cppi5_hdesc_attach_buf(desc_rx, buf_dma, AM65_CPSW_MAX_PACKET_SIZE,
+ buf_dma, AM65_CPSW_MAX_PACKET_SIZE);
swdata = cppi5_hdesc_get_swdata(desc_rx);
- *((void **)swdata) = skb;
+ *((void **)swdata) = page_address(page);
return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0, desc_rx, desc_dma);
}
@@ -369,25 +395,137 @@ static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common);
static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port);
static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port);
+static void am65_cpsw_destroy_xdp_rxqs(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ struct xdp_rxq_info *rxq;
+ int i;
+
+ for (i = 0; i < common->port_num; i++) {
+ if (!common->ports[i].ndev)
+ continue;
+
+ rxq = &common->ports[i].xdp_rxq;
+
+ if (xdp_rxq_info_is_reg(rxq))
+ xdp_rxq_info_unreg(rxq);
+ }
+
+ if (rx_chn->page_pool) {
+ page_pool_destroy(rx_chn->page_pool);
+ rx_chn->page_pool = NULL;
+ }
+}
+
+static int am65_cpsw_create_xdp_rxqs(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ struct page_pool_params pp_params = {
+ .flags = PP_FLAG_DMA_MAP,
+ .order = 0,
+ .pool_size = AM65_CPSW_MAX_RX_DESC,
+ .nid = dev_to_node(common->dev),
+ .dev = common->dev,
+ .dma_dir = DMA_BIDIRECTIONAL,
+ .napi = &common->napi_rx,
+ };
+ struct xdp_rxq_info *rxq;
+ struct page_pool *pool;
+ int i, ret;
+
+ pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+
+ rx_chn->page_pool = pool;
+
+ for (i = 0; i < common->port_num; i++) {
+ if (!common->ports[i].ndev)
+ continue;
+
+ rxq = &common->ports[i].xdp_rxq;
+
+ ret = xdp_rxq_info_reg(rxq, common->ports[i].ndev, i, 0);
+ if (ret)
+ goto err;
+
+ ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ am65_cpsw_destroy_xdp_rxqs(common);
+ return ret;
+}
+
+static int am65_cpsw_nuss_desc_idx(struct k3_cppi_desc_pool *desc_pool,
+ void *desc,
+ unsigned char dsize_log2)
+{
+ void *pool_addr = k3_cppi_desc_pool_cpuaddr(desc_pool);
+
+ return (desc - pool_addr) >> dsize_log2;
+}
+
+static void am65_cpsw_nuss_set_buf_type(struct am65_cpsw_tx_chn *tx_chn,
+ struct cppi5_host_desc_t *desc,
+ enum am65_cpsw_tx_buf_type buf_type)
+{
+ int desc_idx;
+
+ desc_idx = am65_cpsw_nuss_desc_idx(tx_chn->desc_pool, desc,
+ tx_chn->dsize_log2);
+ k3_cppi_desc_pool_desc_info_set(tx_chn->desc_pool, desc_idx,
+ (void *)buf_type);
+}
+
+static enum am65_cpsw_tx_buf_type am65_cpsw_nuss_buf_type(struct am65_cpsw_tx_chn *tx_chn,
+ dma_addr_t desc_dma)
+{
+ struct cppi5_host_desc_t *desc_tx;
+ int desc_idx;
+
+ desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
+ desc_idx = am65_cpsw_nuss_desc_idx(tx_chn->desc_pool, desc_tx,
+ tx_chn->dsize_log2);
+
+ return (enum am65_cpsw_tx_buf_type)k3_cppi_desc_pool_desc_info(tx_chn->desc_pool,
+ desc_idx);
+}
+
+static inline void am65_cpsw_put_page(struct am65_cpsw_rx_chn *rx_chn,
+ struct page *page,
+ bool allow_direct,
+ int desc_idx)
+{
+ page_pool_put_full_page(rx_chn->page_pool, page, allow_direct);
+ rx_chn->pages[desc_idx] = NULL;
+}
+
static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
{
struct am65_cpsw_rx_chn *rx_chn = data;
struct cppi5_host_desc_t *desc_rx;
- struct sk_buff *skb;
dma_addr_t buf_dma;
u32 buf_dma_len;
+ void *page_addr;
void **swdata;
+ int desc_idx;
desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_rx);
- skb = *swdata;
+ page_addr = *swdata;
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
-
dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
- dev_kfree_skb_any(skb);
+ desc_idx = am65_cpsw_nuss_desc_idx(rx_chn->desc_pool, desc_rx,
+ rx_chn->dsize_log2);
+ am65_cpsw_put_page(rx_chn, virt_to_page(page_addr), false, desc_idx);
}
static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
@@ -440,12 +578,32 @@ static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
dev_kfree_skb_any(skb);
}
+static struct sk_buff *am65_cpsw_build_skb(void *page_addr,
+ struct net_device *ndev,
+ unsigned int len)
+{
+ struct sk_buff *skb;
+
+ len += AM65_CPSW_HEADROOM;
+
+ skb = build_skb(page_addr, len);
+ if (unlikely(!skb))
+ return NULL;
+
+ skb_reserve(skb, AM65_CPSW_HEADROOM);
+ skb->dev = ndev;
+
+ return skb;
+}
+
static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
{
struct am65_cpsw_host *host_p = am65_common_get_host(common);
+ struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ struct am65_cpsw_tx_chn *tx_chn = common->tx_chns;
int port_idx, i, ret, tx;
- struct sk_buff *skb;
u32 val, port_mask;
+ struct page *page;
if (common->usage_count)
return 0;
@@ -505,25 +663,29 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
am65_cpsw_qos_tx_p0_rate_init(common);
- for (i = 0; i < common->rx_chns.descs_num; i++) {
- skb = __netdev_alloc_skb_ip_align(NULL,
- AM65_CPSW_MAX_PACKET_SIZE,
- GFP_KERNEL);
- if (!skb) {
+ ret = am65_cpsw_create_xdp_rxqs(common);
+ if (ret) {
+ dev_err(common->dev, "Failed to create XDP rx queues\n");
+ return ret;
+ }
+
+ for (i = 0; i < rx_chn->descs_num; i++) {
+ page = page_pool_dev_alloc_pages(rx_chn->page_pool);
+ if (!page) {
ret = -ENOMEM;
- dev_err(common->dev, "cannot allocate skb\n");
if (i)
goto fail_rx;
return ret;
}
+ rx_chn->pages[i] = page;
- ret = am65_cpsw_nuss_rx_push(common, skb);
+ ret = am65_cpsw_nuss_rx_push(common, page);
if (ret < 0) {
dev_err(common->dev,
- "cannot submit skb to channel rx, error %d\n",
+ "cannot submit page to channel rx: %d\n",
ret);
- kfree_skb(skb);
+ am65_cpsw_put_page(rx_chn, page, false, i);
if (i)
goto fail_rx;
@@ -531,27 +693,27 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
}
}
- ret = k3_udma_glue_enable_rx_chn(common->rx_chns.rx_chn);
+ ret = k3_udma_glue_enable_rx_chn(rx_chn->rx_chn);
if (ret) {
dev_err(common->dev, "couldn't enable rx chn: %d\n", ret);
goto fail_rx;
}
for (tx = 0; tx < common->tx_ch_num; tx++) {
- ret = k3_udma_glue_enable_tx_chn(common->tx_chns[tx].tx_chn);
+ ret = k3_udma_glue_enable_tx_chn(tx_chn[tx].tx_chn);
if (ret) {
dev_err(common->dev, "couldn't enable tx chn %d: %d\n",
tx, ret);
tx--;
goto fail_tx;
}
- napi_enable(&common->tx_chns[tx].napi_tx);
+ napi_enable(&tx_chn[tx].napi_tx);
}
napi_enable(&common->napi_rx);
if (common->rx_irq_disabled) {
common->rx_irq_disabled = false;
- enable_irq(common->rx_chns.irq);
+ enable_irq(rx_chn->irq);
}
dev_dbg(common->dev, "cpsw_nuss started\n");
@@ -559,22 +721,23 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
fail_tx:
while (tx >= 0) {
- napi_disable(&common->tx_chns[tx].napi_tx);
- k3_udma_glue_disable_tx_chn(common->tx_chns[tx].tx_chn);
+ napi_disable(&tx_chn[tx].napi_tx);
+ k3_udma_glue_disable_tx_chn(tx_chn[tx].tx_chn);
tx--;
}
- k3_udma_glue_disable_rx_chn(common->rx_chns.rx_chn);
+ k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);
fail_rx:
- k3_udma_glue_reset_rx_chn(common->rx_chns.rx_chn, 0,
- &common->rx_chns,
+ k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, 0, rx_chn,
am65_cpsw_nuss_rx_cleanup, 0);
return ret;
}
static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
{
+ struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ struct am65_cpsw_tx_chn *tx_chn = common->tx_chns;
int i;
if (common->usage_count != 1)
@@ -590,26 +753,25 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
reinit_completion(&common->tdown_complete);
for (i = 0; i < common->tx_ch_num; i++)
- k3_udma_glue_tdown_tx_chn(common->tx_chns[i].tx_chn, false);
+ k3_udma_glue_tdown_tx_chn(tx_chn[i].tx_chn, false);
i = wait_for_completion_timeout(&common->tdown_complete,
msecs_to_jiffies(1000));
if (!i)
dev_err(common->dev, "tx timeout\n");
for (i = 0; i < common->tx_ch_num; i++) {
- napi_disable(&common->tx_chns[i].napi_tx);
- hrtimer_cancel(&common->tx_chns[i].tx_hrtimer);
+ napi_disable(&tx_chn[i].napi_tx);
+ hrtimer_cancel(&tx_chn[i].tx_hrtimer);
}
for (i = 0; i < common->tx_ch_num; i++) {
- k3_udma_glue_reset_tx_chn(common->tx_chns[i].tx_chn,
- &common->tx_chns[i],
+ k3_udma_glue_reset_tx_chn(tx_chn[i].tx_chn, &tx_chn[i],
am65_cpsw_nuss_tx_cleanup);
- k3_udma_glue_disable_tx_chn(common->tx_chns[i].tx_chn);
+ k3_udma_glue_disable_tx_chn(tx_chn[i].tx_chn);
}
reinit_completion(&common->tdown_complete);
- k3_udma_glue_tdown_rx_chn(common->rx_chns.rx_chn, true);
+ k3_udma_glue_tdown_rx_chn(rx_chn->rx_chn, true);
if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) {
i = wait_for_completion_timeout(&common->tdown_complete, msecs_to_jiffies(1000));
@@ -621,17 +783,22 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
hrtimer_cancel(&common->rx_hrtimer);
for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
- k3_udma_glue_reset_rx_chn(common->rx_chns.rx_chn, i,
- &common->rx_chns,
+ k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn,
am65_cpsw_nuss_rx_cleanup, !!i);
- k3_udma_glue_disable_rx_chn(common->rx_chns.rx_chn);
+ k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);
cpsw_ale_stop(common->ale);
writel(0, common->cpsw_base + AM65_CPSW_REG_CTL);
writel(0, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN);
+ for (i = 0; i < rx_chn->descs_num; i++) {
+ if (rx_chn->pages[i])
+ am65_cpsw_put_page(rx_chn, rx_chn->pages[i], false, i);
+ }
+ am65_cpsw_destroy_xdp_rxqs(common);
+
dev_dbg(common->dev, "cpsw_nuss stopped\n");
return 0;
}
@@ -749,16 +916,149 @@ runtime_put:
return ret;
}
-static void am65_cpsw_nuss_rx_ts(struct sk_buff *skb, u32 *psdata)
+static int am65_cpsw_xdp_tx_frame(struct net_device *ndev,
+ struct am65_cpsw_tx_chn *tx_chn,
+ struct xdp_frame *xdpf,
+ enum am65_cpsw_tx_buf_type buf_type)
{
- struct skb_shared_hwtstamps *ssh;
- u64 ns;
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct cppi5_host_desc_t *host_desc;
+ struct netdev_queue *netif_txq;
+ dma_addr_t dma_desc, dma_buf;
+ u32 pkt_len = xdpf->len;
+ void **swdata;
+ int ret;
+
+ host_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
+ if (unlikely(!host_desc)) {
+ ndev->stats.tx_dropped++;
+ return -ENOMEM;
+ }
+
+ am65_cpsw_nuss_set_buf_type(tx_chn, host_desc, buf_type);
- ns = ((u64)psdata[1] << 32) | psdata[0];
+ dma_buf = dma_map_single(tx_chn->dma_dev, xdpf->data,
+ pkt_len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx_chn->dma_dev, dma_buf))) {
+ ndev->stats.tx_dropped++;
+ ret = -ENOMEM;
+ goto pool_free;
+ }
+
+ cppi5_hdesc_init(host_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ AM65_CPSW_NAV_PS_DATA_SIZE);
+ cppi5_hdesc_set_pkttype(host_desc, AM65_CPSW_CPPI_TX_PKT_TYPE);
+ cppi5_hdesc_set_pktlen(host_desc, pkt_len);
+ cppi5_desc_set_pktids(&host_desc->hdr, 0, AM65_CPSW_CPPI_TX_FLOW_ID);
+ cppi5_desc_set_tags_ids(&host_desc->hdr, 0, port->port_id);
+
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &dma_buf);
+ cppi5_hdesc_attach_buf(host_desc, dma_buf, pkt_len, dma_buf, pkt_len);
+
+ swdata = cppi5_hdesc_get_swdata(host_desc);
+ *(swdata) = xdpf;
+
+ /* Report BQL before sending the packet */
+ netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
+ netdev_tx_sent_queue(netif_txq, pkt_len);
+
+ dma_desc = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, host_desc);
+ if (AM65_CPSW_IS_CPSW2G(common)) {
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, host_desc,
+ dma_desc);
+ } else {
+ spin_lock_bh(&tx_chn->lock);
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, host_desc,
+ dma_desc);
+ spin_unlock_bh(&tx_chn->lock);
+ }
+ if (ret) {
+ /* Inform BQL */
+ netdev_tx_completed_queue(netif_txq, 1, pkt_len);
+ ndev->stats.tx_errors++;
+ goto dma_unmap;
+ }
+
+ return 0;
- ssh = skb_hwtstamps(skb);
- memset(ssh, 0, sizeof(*ssh));
- ssh->hwtstamp = ns_to_ktime(ns);
+dma_unmap:
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &dma_buf);
+ dma_unmap_single(tx_chn->dma_dev, dma_buf, pkt_len, DMA_TO_DEVICE);
+pool_free:
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, host_desc);
+ return ret;
+}
+
+static int am65_cpsw_run_xdp(struct am65_cpsw_common *common,
+ struct am65_cpsw_port *port,
+ struct xdp_buff *xdp,
+ int desc_idx, int cpu, int *len)
+{
+ struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ struct net_device *ndev = port->ndev;
+ int ret = AM65_CPSW_XDP_CONSUMED;
+ struct am65_cpsw_tx_chn *tx_chn;
+ struct netdev_queue *netif_txq;
+ struct xdp_frame *xdpf;
+ struct bpf_prog *prog;
+ struct page *page;
+ u32 act;
+
+ prog = READ_ONCE(port->xdp_prog);
+ if (!prog)
+ return AM65_CPSW_XDP_PASS;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+ /* XDP prog might have changed packet data and boundaries */
+ *len = xdp->data_end - xdp->data;
+
+ switch (act) {
+ case XDP_PASS:
+ ret = AM65_CPSW_XDP_PASS;
+ goto out;
+ case XDP_TX:
+ tx_chn = &common->tx_chns[cpu % AM65_CPSW_MAX_TX_QUEUES];
+ netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
+
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf))
+ break;
+
+ __netif_tx_lock(netif_txq, cpu);
+ ret = am65_cpsw_xdp_tx_frame(ndev, tx_chn, xdpf,
+ AM65_CPSW_TX_BUF_TYPE_XDP_TX);
+ __netif_tx_unlock(netif_txq);
+ if (ret)
+ break;
+
+ ndev->stats.rx_bytes += *len;
+ ndev->stats.rx_packets++;
+ ret = AM65_CPSW_XDP_CONSUMED;
+ goto out;
+ case XDP_REDIRECT:
+ if (unlikely(xdp_do_redirect(ndev, xdp, prog)))
+ break;
+
+ ndev->stats.rx_bytes += *len;
+ ndev->stats.rx_packets++;
+ ret = AM65_CPSW_XDP_REDIRECT;
+ goto out;
+ default:
+ bpf_warn_invalid_xdp_action(ndev, prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(ndev, prog, act);
+ fallthrough;
+ case XDP_DROP:
+ ndev->stats.rx_dropped++;
+ }
+
+ page = virt_to_head_page(xdp->data);
+ am65_cpsw_put_page(rx_chn, page, true, desc_idx);
+
+out:
+ return ret;
}
/* RX psdata[2] word format - checksum information */
@@ -795,7 +1095,7 @@ static void am65_cpsw_nuss_rx_csum(struct sk_buff *skb, u32 csum_info)
}
static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
- u32 flow_idx)
+ u32 flow_idx, int cpu)
{
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
u32 buf_dma_len, pkt_len, port_id = 0, csum_info;
@@ -803,13 +1103,16 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
struct am65_cpsw_ndev_stats *stats;
struct cppi5_host_desc_t *desc_rx;
struct device *dev = common->dev;
- struct sk_buff *skb, *new_skb;
+ struct page *page, *new_page;
dma_addr_t desc_dma, buf_dma;
struct am65_cpsw_port *port;
+ int headroom, desc_idx, ret;
struct net_device *ndev;
+ struct sk_buff *skb;
+ struct xdp_buff xdp;
+ void *page_addr;
void **swdata;
u32 *psdata;
- int ret = 0;
ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_idx, &desc_dma);
if (ret) {
@@ -830,7 +1133,8 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
__func__, flow_idx, &desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_rx);
- skb = *swdata;
+ page_addr = *swdata;
+ page = virt_to_page(page_addr);
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
@@ -838,12 +1142,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
dev_dbg(dev, "%s rx port_id:%d\n", __func__, port_id);
port = am65_common_get_port(common, port_id);
ndev = port->ndev;
- skb->dev = ndev;
-
psdata = cppi5_hdesc_get_psdata(desc_rx);
- /* add RX timestamp */
- if (port->rx_ts_enabled)
- am65_cpsw_nuss_rx_ts(skb, psdata);
csum_info = psdata[2];
dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info);
@@ -851,36 +1150,64 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
- new_skb = netdev_alloc_skb_ip_align(ndev, AM65_CPSW_MAX_PACKET_SIZE);
- if (new_skb) {
- ndev_priv = netdev_priv(ndev);
- am65_cpsw_nuss_set_offload_fwd_mark(skb, ndev_priv->offload_fwd_mark);
- skb_put(skb, pkt_len);
- skb->protocol = eth_type_trans(skb, ndev);
- am65_cpsw_nuss_rx_csum(skb, csum_info);
- napi_gro_receive(&common->napi_rx, skb);
-
- stats = this_cpu_ptr(ndev_priv->stats);
-
- u64_stats_update_begin(&stats->syncp);
- stats->rx_packets++;
- stats->rx_bytes += pkt_len;
- u64_stats_update_end(&stats->syncp);
- kmemleak_not_leak(new_skb);
- } else {
- ndev->stats.rx_dropped++;
- new_skb = skb;
+ desc_idx = am65_cpsw_nuss_desc_idx(rx_chn->desc_pool, desc_rx,
+ rx_chn->dsize_log2);
+
+ skb = am65_cpsw_build_skb(page_addr, ndev,
+ AM65_CPSW_MAX_PACKET_SIZE);
+ if (unlikely(!skb)) {
+ new_page = page;
+ goto requeue;
}
+ if (port->xdp_prog) {
+ xdp_init_buff(&xdp, AM65_CPSW_MAX_PACKET_SIZE, &port->xdp_rxq);
+
+ xdp_prepare_buff(&xdp, page_addr, skb_headroom(skb),
+ pkt_len, false);
+
+ ret = am65_cpsw_run_xdp(common, port, &xdp, desc_idx,
+ cpu, &pkt_len);
+ if (ret != AM65_CPSW_XDP_PASS)
+ return ret;
+
+ /* Compute additional headroom to be reserved */
+ headroom = (xdp.data - xdp.data_hard_start) - skb_headroom(skb);
+ skb_reserve(skb, headroom);
+ }
+
+ ndev_priv = netdev_priv(ndev);
+ am65_cpsw_nuss_set_offload_fwd_mark(skb, ndev_priv->offload_fwd_mark);
+ skb_put(skb, pkt_len);
+ if (port->rx_ts_enabled)
+ am65_cpts_rx_timestamp(common->cpts, skb);
+ skb_mark_for_recycle(skb);
+ skb->protocol = eth_type_trans(skb, ndev);
+ am65_cpsw_nuss_rx_csum(skb, csum_info);
+ napi_gro_receive(&common->napi_rx, skb);
+
+ stats = this_cpu_ptr(ndev_priv->stats);
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets++;
+ stats->rx_bytes += pkt_len;
+ u64_stats_update_end(&stats->syncp);
+
+ new_page = page_pool_dev_alloc_pages(rx_chn->page_pool);
+ if (unlikely(!new_page))
+ return -ENOMEM;
+ rx_chn->pages[desc_idx] = new_page;
+
if (netif_dormant(ndev)) {
- dev_kfree_skb_any(new_skb);
+ am65_cpsw_put_page(rx_chn, new_page, true, desc_idx);
ndev->stats.rx_dropped++;
return 0;
}
- ret = am65_cpsw_nuss_rx_push(common, new_skb);
+requeue:
+ ret = am65_cpsw_nuss_rx_push(common, new_page);
if (WARN_ON(ret < 0)) {
- dev_kfree_skb_any(new_skb);
+ am65_cpsw_put_page(rx_chn, new_page, true, desc_idx);
ndev->stats.rx_errors++;
ndev->stats.rx_dropped++;
}
@@ -901,6 +1228,8 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
{
struct am65_cpsw_common *common = am65_cpsw_napi_to_common(napi_rx);
int flow = AM65_CPSW_MAX_RX_FLOWS;
+ int cpu = smp_processor_id();
+ bool xdp_redirect = false;
int cur_budget, ret;
int num_rx = 0;
@@ -909,9 +1238,12 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
cur_budget = budget - num_rx;
while (cur_budget--) {
- ret = am65_cpsw_nuss_rx_packets(common, flow);
- if (ret)
+ ret = am65_cpsw_nuss_rx_packets(common, flow, cpu);
+ if (ret) {
+ if (ret == AM65_CPSW_XDP_REDIRECT)
+ xdp_redirect = true;
break;
+ }
num_rx++;
}
@@ -919,6 +1251,9 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
break;
}
+ if (xdp_redirect)
+ xdp_do_flush();
+
dev_dbg(common->dev, "%s num_rx:%d %d\n", __func__, num_rx, budget);
if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) {
@@ -938,8 +1273,8 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
}
static struct sk_buff *
-am65_cpsw_nuss_tx_compl_packet(struct am65_cpsw_tx_chn *tx_chn,
- dma_addr_t desc_dma)
+am65_cpsw_nuss_tx_compl_packet_skb(struct am65_cpsw_tx_chn *tx_chn,
+ dma_addr_t desc_dma)
{
struct am65_cpsw_ndev_priv *ndev_priv;
struct am65_cpsw_ndev_stats *stats;
@@ -968,6 +1303,39 @@ am65_cpsw_nuss_tx_compl_packet(struct am65_cpsw_tx_chn *tx_chn,
return skb;
}
+static struct xdp_frame *
+am65_cpsw_nuss_tx_compl_packet_xdp(struct am65_cpsw_common *common,
+ struct am65_cpsw_tx_chn *tx_chn,
+ dma_addr_t desc_dma,
+ struct net_device **ndev)
+{
+ struct am65_cpsw_ndev_priv *ndev_priv;
+ struct am65_cpsw_ndev_stats *stats;
+ struct cppi5_host_desc_t *desc_tx;
+ struct am65_cpsw_port *port;
+ struct xdp_frame *xdpf;
+ u32 port_id = 0;
+ void **swdata;
+
+ desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
+ cppi5_desc_get_tags_ids(&desc_tx->hdr, NULL, &port_id);
+ swdata = cppi5_hdesc_get_swdata(desc_tx);
+ xdpf = *(swdata);
+ am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
+
+ port = am65_common_get_port(common, port_id);
+ *ndev = port->ndev;
+
+ ndev_priv = netdev_priv(*ndev);
+ stats = this_cpu_ptr(ndev_priv->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_packets++;
+ stats->tx_bytes += xdpf->len;
+ u64_stats_update_end(&stats->syncp);
+
+ return xdpf;
+}
+
static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_device *ndev,
struct netdev_queue *netif_txq)
{
@@ -988,11 +1356,13 @@ static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_d
static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
int chn, unsigned int budget, bool *tdown)
{
+ enum am65_cpsw_tx_buf_type buf_type;
struct device *dev = common->dev;
struct am65_cpsw_tx_chn *tx_chn;
struct netdev_queue *netif_txq;
unsigned int total_bytes = 0;
struct net_device *ndev;
+ struct xdp_frame *xdpf;
struct sk_buff *skb;
dma_addr_t desc_dma;
int res, num_tx = 0;
@@ -1013,10 +1383,21 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
break;
}
- skb = am65_cpsw_nuss_tx_compl_packet(tx_chn, desc_dma);
- total_bytes = skb->len;
- ndev = skb->dev;
- napi_consume_skb(skb, budget);
+ buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
+ if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
+ skb = am65_cpsw_nuss_tx_compl_packet_skb(tx_chn, desc_dma);
+ ndev = skb->dev;
+ total_bytes = skb->len;
+ napi_consume_skb(skb, budget);
+ } else {
+ xdpf = am65_cpsw_nuss_tx_compl_packet_xdp(common, tx_chn,
+ desc_dma, &ndev);
+ total_bytes = xdpf->len;
+ if (buf_type == AM65_CPSW_TX_BUF_TYPE_XDP_TX)
+ xdp_return_frame_rx_napi(xdpf);
+ else
+ xdp_return_frame(xdpf);
+ }
num_tx++;
netif_txq = netdev_get_tx_queue(ndev, chn);
@@ -1034,11 +1415,13 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common *common,
int chn, unsigned int budget, bool *tdown)
{
+ enum am65_cpsw_tx_buf_type buf_type;
struct device *dev = common->dev;
struct am65_cpsw_tx_chn *tx_chn;
struct netdev_queue *netif_txq;
unsigned int total_bytes = 0;
struct net_device *ndev;
+ struct xdp_frame *xdpf;
struct sk_buff *skb;
dma_addr_t desc_dma;
int res, num_tx = 0;
@@ -1057,11 +1440,21 @@ static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common *common,
break;
}
- skb = am65_cpsw_nuss_tx_compl_packet(tx_chn, desc_dma);
-
- ndev = skb->dev;
- total_bytes += skb->len;
- napi_consume_skb(skb, budget);
+ buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
+ if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
+ skb = am65_cpsw_nuss_tx_compl_packet_skb(tx_chn, desc_dma);
+ ndev = skb->dev;
+ total_bytes += skb->len;
+ napi_consume_skb(skb, budget);
+ } else {
+ xdpf = am65_cpsw_nuss_tx_compl_packet_xdp(common, tx_chn,
+ desc_dma, &ndev);
+ total_bytes += xdpf->len;
+ if (buf_type == AM65_CPSW_TX_BUF_TYPE_XDP_TX)
+ xdp_return_frame_rx_napi(xdpf);
+ else
+ xdp_return_frame(xdpf);
+ }
num_tx++;
}
@@ -1183,10 +1576,13 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
goto busy_stop_q;
}
+ am65_cpsw_nuss_set_buf_type(tx_chn, first_desc,
+ AM65_CPSW_TX_BUF_TYPE_SKB);
+
cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
AM65_CPSW_NAV_PS_DATA_SIZE);
- cppi5_desc_set_pktids(&first_desc->hdr, 0, 0x3FFF);
- cppi5_hdesc_set_pkttype(first_desc, 0x7);
+ cppi5_desc_set_pktids(&first_desc->hdr, 0, AM65_CPSW_CPPI_TX_FLOW_ID);
+ cppi5_hdesc_set_pkttype(first_desc, AM65_CPSW_CPPI_TX_PKT_TYPE);
cppi5_desc_set_tags_ids(&first_desc->hdr, 0, port->port_id);
k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
@@ -1225,6 +1621,9 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
goto busy_free_descs;
}
+ am65_cpsw_nuss_set_buf_type(tx_chn, next_desc,
+ AM65_CPSW_TX_BUF_TYPE_SKB);
+
buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(tx_chn->dma_dev, buf_dma))) {
@@ -1334,7 +1733,6 @@ static int am65_cpsw_nuss_ndo_slave_set_mac_address(struct net_device *ndev,
static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
struct ifreq *ifr)
{
- struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
u32 ts_ctrl, seq_id, ts_ctrl_ltype2, ts_vlan_ltype;
struct hwtstamp_config cfg;
@@ -1358,11 +1756,6 @@ static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
case HWTSTAMP_FILTER_NONE:
port->rx_ts_enabled = false;
break;
- case HWTSTAMP_FILTER_ALL:
- case HWTSTAMP_FILTER_SOME:
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
@@ -1372,10 +1765,13 @@ static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- case HWTSTAMP_FILTER_NTP_ALL:
port->rx_ts_enabled = true;
- cfg.rx_filter = HWTSTAMP_FILTER_ALL;
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ return -EOPNOTSUPP;
default:
return -ERANGE;
}
@@ -1405,6 +1801,10 @@ static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
ts_ctrl |= AM65_CPSW_TS_TX_ANX_ALL_EN |
AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN;
+ if (port->rx_ts_enabled)
+ ts_ctrl |= AM65_CPSW_TS_RX_ANX_ALL_EN |
+ AM65_CPSW_PN_TS_CTL_RX_VLAN_LT1_EN;
+
writel(seq_id, port->port_base + AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG);
writel(ts_vlan_ltype, port->port_base +
AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG);
@@ -1412,9 +1812,6 @@ static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2);
writel(ts_ctrl, port->port_base + AM65_CPSW_PORTN_REG_TS_CTL);
- /* en/dis RX timestamp */
- am65_cpts_rx_enable(common->cpts, port->rx_ts_enabled);
-
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
@@ -1431,7 +1828,7 @@ static int am65_cpsw_nuss_hwtstamp_get(struct net_device *ndev,
cfg.tx_type = port->tx_ts_enabled ?
HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
cfg.rx_filter = port->rx_ts_enabled ?
- HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+ HWTSTAMP_FILTER_PTP_V2_EVENT : HWTSTAMP_FILTER_NONE;
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
@@ -1488,6 +1885,59 @@ static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev,
stats->tx_dropped = dev->stats.tx_dropped;
}
+static int am65_cpsw_xdp_prog_setup(struct net_device *ndev,
+ struct bpf_prog *prog)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ bool running = netif_running(ndev);
+ struct bpf_prog *old_prog;
+
+ if (running)
+ am65_cpsw_nuss_ndo_slave_stop(ndev);
+
+ old_prog = xchg(&port->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (running)
+ return am65_cpsw_nuss_ndo_slave_open(ndev);
+
+ return 0;
+}
+
+static int am65_cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
+{
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return am65_cpsw_xdp_prog_setup(ndev, bpf->prog);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int am65_cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct am65_cpsw_tx_chn *tx_chn;
+ struct netdev_queue *netif_txq;
+ int cpu = smp_processor_id();
+ int i, nxmit = 0;
+
+ tx_chn = &am65_ndev_to_common(ndev)->tx_chns[cpu % AM65_CPSW_MAX_TX_QUEUES];
+ netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
+
+ __netif_tx_lock(netif_txq, cpu);
+ for (i = 0; i < n; i++) {
+ if (am65_cpsw_xdp_tx_frame(ndev, tx_chn, frames[i],
+ AM65_CPSW_TX_BUF_TYPE_XDP_NDO))
+ break;
+ nxmit++;
+ }
+ __netif_tx_unlock(netif_txq);
+
+ return nxmit;
+}
+
static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
.ndo_open = am65_cpsw_nuss_ndo_slave_open,
.ndo_stop = am65_cpsw_nuss_ndo_slave_stop,
@@ -1502,6 +1952,8 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
.ndo_eth_ioctl = am65_cpsw_nuss_ndo_slave_ioctl,
.ndo_setup_tc = am65_cpsw_qos_ndo_setup_tc,
.ndo_set_tx_maxrate = am65_cpsw_qos_ndo_tx_p0_set_maxrate,
+ .ndo_bpf = am65_cpsw_ndo_bpf,
+ .ndo_xdp_xmit = am65_cpsw_ndo_xdp_xmit,
};
static void am65_cpsw_disable_phy(struct phy *phy)
@@ -1772,7 +2224,7 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
.mode = K3_RINGACC_RING_MODE_RING,
.flags = 0
};
- u32 hdesc_size;
+ u32 hdesc_size, hdesc_size_out;
int i, ret = 0;
hdesc_size = cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE,
@@ -1816,6 +2268,10 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
goto err;
}
+ hdesc_size_out = k3_cppi_desc_pool_desc_size(tx_chn->desc_pool);
+ tx_chn->dsize_log2 = __fls(hdesc_size_out);
+ WARN_ON(hdesc_size_out != (1 << tx_chn->dsize_log2));
+
tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
if (tx_chn->irq < 0) {
dev_err(dev, "Failed to get tx dma irq %d\n",
@@ -1862,8 +2318,8 @@ static void am65_cpsw_nuss_free_rx_chns(void *data)
static void am65_cpsw_nuss_remove_rx_chns(void *data)
{
struct am65_cpsw_common *common = data;
- struct am65_cpsw_rx_chn *rx_chn;
struct device *dev = common->dev;
+ struct am65_cpsw_rx_chn *rx_chn;
rx_chn = &common->rx_chns;
devm_remove_action(dev, am65_cpsw_nuss_free_rx_chns, common);
@@ -1873,11 +2329,7 @@ static void am65_cpsw_nuss_remove_rx_chns(void *data)
netif_napi_del(&common->napi_rx);
- if (!IS_ERR_OR_NULL(rx_chn->desc_pool))
- k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
-
- if (!IS_ERR_OR_NULL(rx_chn->rx_chn))
- k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
+ am65_cpsw_nuss_free_rx_chns(common);
common->rx_flow_id_base = -1;
}
@@ -1888,7 +2340,7 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
struct k3_udma_glue_rx_channel_cfg rx_cfg = { 0 };
u32 max_desc_num = AM65_CPSW_MAX_RX_DESC;
struct device *dev = common->dev;
- u32 hdesc_size;
+ u32 hdesc_size, hdesc_size_out;
u32 fdqring_id;
int i, ret = 0;
@@ -1920,6 +2372,17 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
goto err;
}
+ hdesc_size_out = k3_cppi_desc_pool_desc_size(rx_chn->desc_pool);
+ rx_chn->dsize_log2 = __fls(hdesc_size_out);
+ WARN_ON(hdesc_size_out != (1 << rx_chn->dsize_log2));
+
+ rx_chn->page_pool = NULL;
+
+ rx_chn->pages = devm_kcalloc(dev, rx_chn->descs_num,
+ sizeof(*rx_chn->pages), GFP_KERNEL);
+ if (!rx_chn->pages)
+ return -ENOMEM;
+
common->rx_flow_id_base =
k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
dev_info(dev, "set new flow-id-base %u\n", common->rx_flow_id_base);
@@ -2252,6 +2715,9 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
NETIF_F_HW_TC;
port->ndev->features = port->ndev->hw_features |
NETIF_F_HW_VLAN_CTAG_FILTER;
+ port->ndev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
port->ndev->vlan_features |= NETIF_F_SG;
port->ndev->netdev_ops = &am65_cpsw_nuss_netdev_ops;
port->ndev->ethtool_ops = &am65_cpsw_ethtool_ops_slave;
@@ -2315,6 +2781,8 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
if (ret)
dev_err(dev, "failed to add percpu stat free action %d\n", ret);
+ port->xdp_prog = NULL;
+
if (!common->dma_ndev)
common->dma_ndev = port->ndev;
@@ -2588,7 +3056,8 @@ static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port)
}
static int am65_cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct am65_cpsw_devlink *dl_priv = devlink_priv(dl);
struct am65_cpsw_common *cpsw = dl_priv->common;
@@ -2922,7 +3391,8 @@ static const struct am65_cpsw_pdata j784s4_cpswxg_pdata = {
.quirks = 0,
.ale_dev_id = "am64-cpswxg",
.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
- .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_USXGMII),
+ .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII) |
+ BIT(PHY_INTERFACE_MODE_USXGMII),
};
static const struct of_device_id am65_cpsw_nuss_of_mtable[] = {
@@ -2958,9 +3428,9 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
struct device_node *node;
struct resource *res;
struct clk *clk;
+ int ale_entries;
u64 id_temp;
int ret, i;
- int ale_entries;
common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL);
if (!common)
@@ -3172,10 +3642,10 @@ static int am65_cpsw_nuss_suspend(struct device *dev)
static int am65_cpsw_nuss_resume(struct device *dev)
{
struct am65_cpsw_common *common = dev_get_drvdata(dev);
+ struct am65_cpsw_host *host_p = am65_common_get_host(common);
struct am65_cpsw_port *port;
struct net_device *ndev;
int i, ret;
- struct am65_cpsw_host *host_p = am65_common_get_host(common);
ret = am65_cpsw_nuss_init_tx_chns(common);
if (ret)