aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/net/renesas,etheravb.yaml1
-rw-r--r--Documentation/networking/device_drivers/ethernet/amazon/ena.rst1
-rw-r--r--drivers/net/ethernet/amazon/ena/Makefile2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c18
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c689
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h99
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_xdp.c468
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_xdp.h151
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c13
-rw-r--r--drivers/net/mdio/mdio-mux-bcm-iproc.c6
-rw-r--r--drivers/net/mdio/of_mdio.c12
-rw-r--r--drivers/net/phy/mdio_bus.c3
-rw-r--r--drivers/net/ppp/ppp_async.c2
-rw-r--r--include/linux/phy.h3
-rw-r--r--include/net/fib_rules.h3
-rw-r--r--include/net/xdp_sock_drv.h4
-rw-r--r--include/uapi/linux/ethtool.h1
-rw-r--r--net/core/fib_rules.c3
-rw-r--r--net/ethtool/ioctl.c44
-rw-r--r--net/ipv4/fib_rules.c6
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv6/fib6_rules.c4
-rw-r--r--net/ipv6/ip6mr.c2
-rw-r--r--net/sched/cls_api.c1
-rwxr-xr-xtools/testing/selftests/net/pmtu.sh4
-rwxr-xr-xtools/testing/selftests/net/unicast_extensions.sh4
27 files changed, 794 insertions, 754 deletions
diff --git a/Documentation/devicetree/bindings/net/renesas,etheravb.yaml b/Documentation/devicetree/bindings/net/renesas,etheravb.yaml
index d3306b186000..890f7858d0dc 100644
--- a/Documentation/devicetree/bindings/net/renesas,etheravb.yaml
+++ b/Documentation/devicetree/bindings/net/renesas,etheravb.yaml
@@ -58,6 +58,7 @@ properties:
- renesas,r9a07g043-gbeth # RZ/G2UL and RZ/Five
- renesas,r9a07g044-gbeth # RZ/G2{L,LC}
- renesas,r9a07g054-gbeth # RZ/V2L
+ - renesas,r9a08g045-gbeth # RZ/G3S
- const: renesas,rzg2l-gbeth # RZ/{G2L,G2UL,V2L} family
reg: true
diff --git a/Documentation/networking/device_drivers/ethernet/amazon/ena.rst b/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
index 5eaa3ab6c73e..b842bcb14255 100644
--- a/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
+++ b/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
@@ -54,6 +54,7 @@ ena_common_defs.h Common definitions for ena_com layer.
ena_regs_defs.h Definition of ENA PCI memory-mapped (MMIO) registers.
ena_netdev.[ch] Main Linux kernel driver.
ena_ethtool.c ethtool callbacks.
+ena_xdp.[ch] XDP files
ena_pci_id_tbl.h Supported device IDs.
================= ======================================================
diff --git a/drivers/net/ethernet/amazon/ena/Makefile b/drivers/net/ethernet/amazon/ena/Makefile
index f1f752a8f7bb..6ab615365172 100644
--- a/drivers/net/ethernet/amazon/ena/Makefile
+++ b/drivers/net/ethernet/amazon/ena/Makefile
@@ -5,4 +5,4 @@
obj-$(CONFIG_ENA_ETHERNET) += ena.o
-ena-y := ena_netdev.o ena_com.o ena_eth_com.o ena_ethtool.o
+ena-y := ena_netdev.o ena_com.o ena_eth_com.o ena_ethtool.o ena_xdp.o
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 2d8c5a2841b8..0cb6cc1cef56 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -7,6 +7,7 @@
#include <linux/pci.h>
#include "ena_netdev.h"
+#include "ena_xdp.h"
struct ena_stats {
char name[ETH_GSTRING_LEN];
@@ -262,17 +263,14 @@ static void ena_queue_strings(struct ena_adapter *adapter, u8 **data)
ena_stats->name);
}
- if (!is_xdp) {
- /* RX stats, in XDP there isn't a RX queue
- * counterpart
- */
- for (j = 0; j < ENA_STATS_ARRAY_RX; j++) {
- ena_stats = &ena_stats_rx_strings[j];
+ /* In XDP there isn't an RX queue counterpart */
+ if (is_xdp)
+ continue;
- ethtool_sprintf(data,
- "queue_%u_rx_%s", i,
- ena_stats->name);
- }
+ for (j = 0; j < ENA_STATS_ARRAY_RX; j++) {
+ ena_stats = &ena_stats_rx_strings[j];
+
+ ethtool_sprintf(data, "queue_%u_rx_%s", i, ena_stats->name);
}
}
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index afd1b7ce0013..1c0a7828d397 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -19,8 +19,8 @@
#include <net/ip.h>
#include "ena_netdev.h"
-#include <linux/bpf_trace.h>
#include "ena_pci_id_tbl.h"
+#include "ena_xdp.h"
MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
MODULE_DESCRIPTION(DEVICE_NAME);
@@ -45,53 +45,6 @@ static void check_for_admin_com_state(struct ena_adapter *adapter);
static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
static int ena_restore_device(struct ena_adapter *adapter);
-static void ena_init_io_rings(struct ena_adapter *adapter,
- int first_index, int count);
-static void ena_init_napi_in_range(struct ena_adapter *adapter, int first_index,
- int count);
-static void ena_del_napi_in_range(struct ena_adapter *adapter, int first_index,
- int count);
-static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid);
-static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
- int first_index,
- int count);
-static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid);
-static void ena_free_tx_resources(struct ena_adapter *adapter, int qid);
-static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget);
-static void ena_destroy_all_tx_queues(struct ena_adapter *adapter);
-static void ena_free_all_io_tx_resources(struct ena_adapter *adapter);
-static void ena_napi_disable_in_range(struct ena_adapter *adapter,
- int first_index, int count);
-static void ena_napi_enable_in_range(struct ena_adapter *adapter,
- int first_index, int count);
-static int ena_up(struct ena_adapter *adapter);
-static void ena_down(struct ena_adapter *adapter);
-static void ena_unmask_interrupt(struct ena_ring *tx_ring,
- struct ena_ring *rx_ring);
-static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
- struct ena_ring *rx_ring);
-static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
- struct ena_tx_buffer *tx_info);
-static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
- int first_index, int count);
-static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
- int first_index, int count);
-
-/* Increase a stat by cnt while holding syncp seqlock on 32bit machines */
-static void ena_increase_stat(u64 *statp, u64 cnt,
- struct u64_stats_sync *syncp)
-{
- u64_stats_update_begin(syncp);
- (*statp) += cnt;
- u64_stats_update_end(syncp);
-}
-
-static void ena_ring_tx_doorbell(struct ena_ring *tx_ring)
-{
- ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
- ena_increase_stat(&tx_ring->tx_stats.doorbells, 1, &tx_ring->syncp);
-}
-
static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ena_adapter *adapter = netdev_priv(dev);
@@ -135,19 +88,18 @@ static int ena_change_mtu(struct net_device *dev, int new_mtu)
return ret;
}
-static int ena_xmit_common(struct net_device *dev,
- struct ena_ring *ring,
- struct ena_tx_buffer *tx_info,
- struct ena_com_tx_ctx *ena_tx_ctx,
- u16 next_to_use,
- u32 bytes)
+int ena_xmit_common(struct ena_adapter *adapter,
+ struct ena_ring *ring,
+ struct ena_tx_buffer *tx_info,
+ struct ena_com_tx_ctx *ena_tx_ctx,
+ u16 next_to_use,
+ u32 bytes)
{
- struct ena_adapter *adapter = netdev_priv(dev);
int rc, nb_hw_desc;
if (unlikely(ena_com_is_doorbell_needed(ring->ena_com_io_sq,
ena_tx_ctx))) {
- netif_dbg(adapter, tx_queued, dev,
+ netif_dbg(adapter, tx_queued, adapter->netdev,
"llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
ring->qid);
ena_ring_tx_doorbell(ring);
@@ -162,7 +114,7 @@ static int ena_xmit_common(struct net_device *dev,
* ena_com_prepare_tx() are fatal and therefore require a device reset.
*/
if (unlikely(rc)) {
- netif_err(adapter, tx_queued, dev,
+ netif_err(adapter, tx_queued, adapter->netdev,
"Failed to prepare tx bufs\n");
ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1,
&ring->syncp);
@@ -178,6 +130,7 @@ static int ena_xmit_common(struct net_device *dev,
u64_stats_update_end(&ring->syncp);
tx_info->tx_descs = nb_hw_desc;
+ tx_info->total_tx_size = bytes;
tx_info->last_jiffies = jiffies;
tx_info->print_once = 0;
@@ -186,467 +139,6 @@ static int ena_xmit_common(struct net_device *dev,
return 0;
}
-/* This is the XDP napi callback. XDP queues use a separate napi callback
- * than Rx/Tx queues.
- */
-static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
-{
- struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
- u32 xdp_work_done, xdp_budget;
- struct ena_ring *xdp_ring;
- int napi_comp_call = 0;
- int ret;
-
- xdp_ring = ena_napi->xdp_ring;
-
- xdp_budget = budget;
-
- if (!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags) ||
- test_bit(ENA_FLAG_TRIGGER_RESET, &xdp_ring->adapter->flags)) {
- napi_complete_done(napi, 0);
- return 0;
- }
-
- xdp_work_done = ena_clean_xdp_irq(xdp_ring, xdp_budget);
-
- /* If the device is about to reset or down, avoid unmask
- * the interrupt and return 0 so NAPI won't reschedule
- */
- if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags))) {
- napi_complete_done(napi, 0);
- ret = 0;
- } else if (xdp_budget > xdp_work_done) {
- napi_comp_call = 1;
- if (napi_complete_done(napi, xdp_work_done))
- ena_unmask_interrupt(xdp_ring, NULL);
- ena_update_ring_numa_node(xdp_ring, NULL);
- ret = xdp_work_done;
- } else {
- ret = xdp_budget;
- }
-
- u64_stats_update_begin(&xdp_ring->syncp);
- xdp_ring->tx_stats.napi_comp += napi_comp_call;
- xdp_ring->tx_stats.tx_poll++;
- u64_stats_update_end(&xdp_ring->syncp);
- xdp_ring->tx_stats.last_napi_jiffies = jiffies;
-
- return ret;
-}
-
-static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring,
- struct ena_tx_buffer *tx_info,
- struct xdp_frame *xdpf,
- struct ena_com_tx_ctx *ena_tx_ctx)
-{
- struct ena_adapter *adapter = xdp_ring->adapter;
- struct ena_com_buf *ena_buf;
- int push_len = 0;
- dma_addr_t dma;
- void *data;
- u32 size;
-
- tx_info->xdpf = xdpf;
- data = tx_info->xdpf->data;
- size = tx_info->xdpf->len;
-
- if (xdp_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
- /* Designate part of the packet for LLQ */
- push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
-
- ena_tx_ctx->push_header = data;
-
- size -= push_len;
- data += push_len;
- }
-
- ena_tx_ctx->header_len = push_len;
-
- if (size > 0) {
- dma = dma_map_single(xdp_ring->dev,
- data,
- size,
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(xdp_ring->dev, dma)))
- goto error_report_dma_error;
-
- tx_info->map_linear_data = 0;
-
- ena_buf = tx_info->bufs;
- ena_buf->paddr = dma;
- ena_buf->len = size;
-
- ena_tx_ctx->ena_bufs = ena_buf;
- ena_tx_ctx->num_bufs = tx_info->num_of_bufs = 1;
- }
-
- return 0;
-
-error_report_dma_error:
- ena_increase_stat(&xdp_ring->tx_stats.dma_mapping_err, 1,
- &xdp_ring->syncp);
- netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n");
-
- return -EINVAL;
-}
-
-static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
- struct net_device *dev,
- struct xdp_frame *xdpf,
- int flags)
-{
- struct ena_com_tx_ctx ena_tx_ctx = {};
- struct ena_tx_buffer *tx_info;
- u16 next_to_use, req_id;
- int rc;
-
- next_to_use = xdp_ring->next_to_use;
- req_id = xdp_ring->free_ids[next_to_use];
- tx_info = &xdp_ring->tx_buffer_info[req_id];
- tx_info->num_of_bufs = 0;
-
- rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &ena_tx_ctx);
- if (unlikely(rc))
- return rc;
-
- ena_tx_ctx.req_id = req_id;
-
- rc = ena_xmit_common(dev,
- xdp_ring,
- tx_info,
- &ena_tx_ctx,
- next_to_use,
- xdpf->len);
- if (rc)
- goto error_unmap_dma;
-
- /* trigger the dma engine. ena_ring_tx_doorbell()
- * calls a memory barrier inside it.
- */
- if (flags & XDP_XMIT_FLUSH)
- ena_ring_tx_doorbell(xdp_ring);
-
- return rc;
-
-error_unmap_dma:
- ena_unmap_tx_buff(xdp_ring, tx_info);
- tx_info->xdpf = NULL;
- return rc;
-}
-
-static int ena_xdp_xmit(struct net_device *dev, int n,
- struct xdp_frame **frames, u32 flags)
-{
- struct ena_adapter *adapter = netdev_priv(dev);
- struct ena_ring *xdp_ring;
- int qid, i, nxmit = 0;
-
- if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
- return -EINVAL;
-
- if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
- return -ENETDOWN;
-
- /* We assume that all rings have the same XDP program */
- if (!READ_ONCE(adapter->rx_ring->xdp_bpf_prog))
- return -ENXIO;
-
- qid = smp_processor_id() % adapter->xdp_num_queues;
- qid += adapter->xdp_first_ring;
- xdp_ring = &adapter->tx_ring[qid];
-
- /* Other CPU ids might try to send thorugh this queue */
- spin_lock(&xdp_ring->xdp_tx_lock);
-
- for (i = 0; i < n; i++) {
- if (ena_xdp_xmit_frame(xdp_ring, dev, frames[i], 0))
- break;
- nxmit++;
- }
-
- /* Ring doorbell to make device aware of the packets */
- if (flags & XDP_XMIT_FLUSH)
- ena_ring_tx_doorbell(xdp_ring);
-
- spin_unlock(&xdp_ring->xdp_tx_lock);
-
- /* Return number of packets sent */
- return nxmit;
-}
-
-static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
-{
- u32 verdict = ENA_XDP_PASS;
- struct bpf_prog *xdp_prog;
- struct ena_ring *xdp_ring;
- struct xdp_frame *xdpf;
- u64 *xdp_stat;
-
- xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog);
-
- if (!xdp_prog)
- goto out;
-
- verdict = bpf_prog_run_xdp(xdp_prog, xdp);
-
- switch (verdict) {
- case XDP_TX:
- xdpf = xdp_convert_buff_to_frame(xdp);
- if (unlikely(!xdpf)) {
- trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
- xdp_stat = &rx_ring->rx_stats.xdp_aborted;
- verdict = ENA_XDP_DROP;
- break;
- }
-
- /* Find xmit queue */
- xdp_ring = rx_ring->xdp_ring;
-
- /* The XDP queues are shared between XDP_TX and XDP_REDIRECT */
- spin_lock(&xdp_ring->xdp_tx_lock);
-
- if (ena_xdp_xmit_frame(xdp_ring, rx_ring->netdev, xdpf,
- XDP_XMIT_FLUSH))
- xdp_return_frame(xdpf);
-
- spin_unlock(&xdp_ring->xdp_tx_lock);
- xdp_stat = &rx_ring->rx_stats.xdp_tx;
- verdict = ENA_XDP_TX;
- break;
- case XDP_REDIRECT:
- if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) {
- xdp_stat = &rx_ring->rx_stats.xdp_redirect;
- verdict = ENA_XDP_REDIRECT;
- break;
- }
- trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
- xdp_stat = &rx_ring->rx_stats.xdp_aborted;
- verdict = ENA_XDP_DROP;
- break;
- case XDP_ABORTED:
- trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
- xdp_stat = &rx_ring->rx_stats.xdp_aborted;
- verdict = ENA_XDP_DROP;
- break;
- case XDP_DROP:
- xdp_stat = &rx_ring->rx_stats.xdp_drop;
- verdict = ENA_XDP_DROP;
- break;
- case XDP_PASS:
- xdp_stat = &rx_ring->rx_stats.xdp_pass;
- verdict = ENA_XDP_PASS;
- break;
- default:
- bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, verdict);
- xdp_stat = &rx_ring->rx_stats.xdp_invalid;
- verdict = ENA_XDP_DROP;
- }
-
- ena_increase_stat(xdp_stat, 1, &rx_ring->syncp);
-out:
- return verdict;
-}
-
-static void ena_init_all_xdp_queues(struct ena_adapter *adapter)
-{
- adapter->xdp_first_ring = adapter->num_io_queues;
- adapter->xdp_num_queues = adapter->num_io_queues;
-
- ena_init_io_rings(adapter,
- adapter->xdp_first_ring,
- adapter->xdp_num_queues);
-}
-
-static int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter)
-{
- u32 xdp_first_ring = adapter->xdp_first_ring;
- u32 xdp_num_queues = adapter->xdp_num_queues;
- int rc = 0;
-
- rc = ena_setup_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues);
- if (rc)
- goto setup_err;
-
- rc = ena_create_io_tx_queues_in_range(adapter, xdp_first_ring, xdp_num_queues);
- if (rc)
- goto create_err;
-
- return 0;
-
-create_err:
- ena_free_all_io_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues);
-setup_err:
- return rc;
-}
-
-/* Provides a way for both kernel and bpf-prog to know
- * more about the RX-queue a given XDP frame arrived on.
- */
-static int ena_xdp_register_rxq_info(struct ena_ring *rx_ring)
-{
- int rc;
-
- rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid, 0);
-
- if (rc) {
- netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
- "Failed to register xdp rx queue info. RX queue num %d rc: %d\n",
- rx_ring->qid, rc);
- goto err;
- }
-
- rc = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_SHARED,
- NULL);
-
- if (rc) {
- netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
- "Failed to register xdp rx queue info memory model. RX queue num %d rc: %d\n",
- rx_ring->qid, rc);
- xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
- }
-
-err:
- return rc;
-}
-
-static void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring)
-{
- xdp_rxq_info_unreg_mem_model(&rx_ring->xdp_rxq);
- xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
-}
-
-static void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
- struct bpf_prog *prog,
- int first, int count)
-{
- struct bpf_prog *old_bpf_prog;
- struct ena_ring *rx_ring;
- int i = 0;
-
- for (i = first; i < count; i++) {
- rx_ring = &adapter->rx_ring[i];
- old_bpf_prog = xchg(&rx_ring->xdp_bpf_prog, prog);
-
- if (!old_bpf_prog && prog) {
- ena_xdp_register_rxq_info(rx_ring);
- rx_ring->rx_headroom = XDP_PACKET_HEADROOM;
- } else if (old_bpf_prog && !prog) {
- ena_xdp_unregister_rxq_info(rx_ring);
- rx_ring->rx_headroom = NET_SKB_PAD;
- }
- }
-}
-
-static void ena_xdp_exchange_program(struct ena_adapter *adapter,
- struct bpf_prog *prog)
-{
- struct bpf_prog *old_bpf_prog = xchg(&adapter->xdp_bpf_prog, prog);
-
- ena_xdp_exchange_program_rx_in_range(adapter,
- prog,
- 0,
- adapter->num_io_queues);
-
- if (old_bpf_prog)
- bpf_prog_put(old_bpf_prog);
-}
-
-static int ena_destroy_and_free_all_xdp_queues(struct ena_adapter *adapter)
-{
- bool was_up;
- int rc;
-
- was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
-
- if (was_up)
- ena_down(adapter);
-
- adapter->xdp_first_ring = 0;
- adapter->xdp_num_queues = 0;
- ena_xdp_exchange_program(adapter, NULL);
- if (was_up) {
- rc = ena_up(adapter);
- if (rc)
- return rc;
- }
- return 0;
-}
-
-static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf)
-{
- struct ena_adapter *adapter = netdev_priv(netdev);
- struct bpf_prog *prog = bpf->prog;
- struct bpf_prog *old_bpf_prog;
- int rc, prev_mtu;
- bool is_up;
-
- is_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
- rc = ena_xdp_allowed(adapter);
- if (rc == ENA_XDP_ALLOWED) {
- old_bpf_prog = adapter->xdp_bpf_prog;
- if (prog) {
- if (!is_up) {
- ena_init_all_xdp_queues(adapter);
- } else if (!old_bpf_prog) {
- ena_down(adapter);
- ena_init_all_xdp_queues(adapter);
- }
- ena_xdp_exchange_program(adapter, prog);
-
- if (is_up && !old_bpf_prog) {
- rc = ena_up(adapter);
- if (rc)
- return rc;
- }
- xdp_features_set_redirect_target(netdev, false);
- } else if (old_bpf_prog) {
- xdp_features_clear_redirect_target(netdev);
- rc = ena_destroy_and_free_all_xdp_queues(adapter);
- if (rc)
- return rc;
- }
-
- prev_mtu = netdev->max_mtu;
- netdev->max_mtu = prog ? ENA_XDP_MAX_MTU : adapter->max_mtu;
-
- if (!old_bpf_prog)
- netif_info(adapter, drv, adapter->netdev,
- "XDP program is set, changing the max_mtu from %d to %d",
- prev_mtu, netdev->max_mtu);
-
- } else if (rc == ENA_XDP_CURRENT_MTU_TOO_LARGE) {
- netif_err(adapter, drv, adapter->netdev,
- "Failed to set xdp program, the current MTU (%d) is larger than the maximum allowed MTU (%lu) while xdp is on",
- netdev->mtu, ENA_XDP_MAX_MTU);
- NL_SET_ERR_MSG_MOD(bpf->extack,
- "Failed to set xdp program, the current MTU is larger than the maximum allowed MTU. Check the dmesg for more info");
- return -EINVAL;
- } else if (rc == ENA_XDP_NO_ENOUGH_QUEUES) {
- netif_err(adapter, drv, adapter->netdev,
- "Failed to set xdp program, the Rx/Tx channel count should be at most half of the maximum allowed channel count. The current queue count (%d), the maximal queue count (%d)\n",
- adapter->num_io_queues, adapter->max_num_io_queues);
- NL_SET_ERR_MSG_MOD(bpf->extack,
- "Failed to set xdp program, there is no enough space for allocating XDP queues, Check the dmesg for more info");
- return -EINVAL;
- }
-
- return 0;
-}
-
-/* This is the main xdp callback, it's used by the kernel to set/unset the xdp
- * program as well as to query the current xdp program id.
- */
-static int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
-{
- switch (bpf->command) {
- case XDP_SETUP_PROG:
- return ena_xdp_set(netdev, bpf);
- default:
- return -EINVAL;
- }
- return 0;
-}
-
static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
{
#ifdef CONFIG_RFS_ACCEL
@@ -688,8 +180,8 @@ static void ena_init_io_rings_common(struct ena_adapter *adapter,
u64_stats_init(&ring->syncp);
}
-static void ena_init_io_rings(struct ena_adapter *adapter,
- int first_index, int count)
+void ena_init_io_rings(struct ena_adapter *adapter,
+ int first_index, int count)
{
struct ena_com_dev *ena_dev;
struct ena_ring *txr, *rxr;
@@ -820,9 +312,8 @@ static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
tx_ring->push_buf_intermediate_buf = NULL;
}
-static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
- int first_index,
- int count)
+int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
+ int first_index, int count)
{
int i, rc = 0;
@@ -845,8 +336,8 @@ err_setup_tx:
return rc;
}
-static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
- int first_index, int count)
+void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
+ int first_index, int count)
{
int i;
@@ -859,7 +350,7 @@ static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
*
* Free all transmit software resources
*/
-static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
+void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
{
ena_free_all_io_tx_resources_in_range(adapter,
0,
@@ -1169,8 +660,8 @@ static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
ena_free_rx_bufs(adapter, i);
}
-static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
- struct ena_tx_buffer *tx_info)
+void ena_unmap_tx_buff(struct ena_ring *tx_ring,
+ struct ena_tx_buffer *tx_info)
{
struct ena_com_buf *ena_buf;
u32 cnt;
@@ -1262,6 +753,7 @@ static void ena_destroy_all_rx_queues(struct ena_adapter *adapter)
for (i = 0; i < adapter->num_io_queues; i++) {
ena_qid = ENA_IO_RXQ_IDX(i);
cancel_work_sync(&adapter->ena_napi[i].dim.work);
+ ena_xdp_unregister_rxq_info(&adapter->rx_ring[i]);
ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
}
}
@@ -1272,8 +764,8 @@ static void ena_destroy_all_io_queues(struct ena_adapter *adapter)
ena_destroy_all_rx_queues(adapter);
}
-static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
- struct ena_tx_buffer *tx_info, bool is_xdp)
+int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
+ struct ena_tx_buffer *tx_info, bool is_xdp)
{
if (tx_info)
netif_err(ring->adapter,
@@ -1305,17 +797,6 @@ static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
return handle_invalid_req_id(tx_ring, req_id, tx_info, false);
}
-static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id)
-{
- struct ena_tx_buffer *tx_info;
-
- tx_info = &xdp_ring->tx_buffer_info[req_id];
- if (likely(tx_info->xdpf))
- return 0;
-
- return handle_invalid_req_id(xdp_ring, req_id, tx_info, true);
-}
-
static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
{
struct netdev_queue *txq;
@@ -1363,7 +844,7 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
"tx_poll: q %d skb %p completed\n", tx_ring->qid,
skb);
- tx_bytes += skb->len;
+ tx_bytes += tx_info->total_tx_size;
dev_kfree_skb(skb);
tx_pkts++;
total_done += tx_info->tx_descs;
@@ -1688,6 +1169,7 @@ static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp, u
return ret;
}
+
/* ena_clean_rx_irq - Cleanup RX irq
* @rx_ring: RX ring to clean
* @napi: napi handler
@@ -1880,8 +1362,8 @@ static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi)
rx_ring->per_napi_packets = 0;
}
-static void ena_unmask_interrupt(struct ena_ring *tx_ring,
- struct ena_ring *rx_ring)
+void ena_unmask_interrupt(struct ena_ring *tx_ring,
+ struct ena_ring *rx_ring)
{
u32 rx_interval = tx_ring->smoothed_interval;
struct ena_eth_io_intr_reg intr_reg;
@@ -1913,8 +1395,8 @@ static void ena_unmask_interrupt(struct ena_ring *tx_ring,
ena_com_unmask_intr(tx_ring->ena_com_io_cq, &intr_reg);
}
-static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
- struct ena_ring *rx_ring)
+void ena_update_ring_numa_node(struct ena_ring *tx_ring,
+ struct ena_ring *rx_ring)
{
int cpu = get_cpu();
int numa_node;
@@ -1949,67 +1431,6 @@ out:
put_cpu();
}
-static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget)
-{
- u32 total_done = 0;
- u16 next_to_clean;
- int tx_pkts = 0;
- u16 req_id;
- int rc;
-
- if (unlikely(!xdp_ring))
- return 0;
- next_to_clean = xdp_ring->next_to_clean;
-
- while (tx_pkts < budget) {
- struct ena_tx_buffer *tx_info;
- struct xdp_frame *xdpf;
-
- rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq,
- &req_id);
- if (rc) {
- if (unlikely(rc == -EINVAL))
- handle_invalid_req_id(xdp_ring, req_id, NULL,
- true);
- break;
- }
-
- /* validate that the request id points to a valid xdp_frame */
- rc = validate_xdp_req_id(xdp_ring, req_id);
- if (rc)
- break;
-
- tx_info = &xdp_ring->tx_buffer_info[req_id];
- xdpf = tx_info->xdpf;
-
- tx_info->xdpf = NULL;
- tx_info->last_jiffies = 0;
- ena_unmap_tx_buff(xdp_ring, tx_info);
-
- netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
- "tx_poll: q %d skb %p completed\n", xdp_ring->qid,
- xdpf);
-
- tx_pkts++;
- total_done += tx_info->tx_descs;
-
- xdp_return_frame(xdpf);
- xdp_ring->free_ids[next_to_clean] = req_id;
- next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
- xdp_ring->ring_size);
- }
-
- xdp_ring->next_to_clean = next_to_clean;
- ena_com_comp_ack(xdp_ring->ena_com_io_sq, total_done);
- ena_com_update_dev_comp_head(xdp_ring->ena_com_io_cq);
-
- netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
- "tx_poll: q %d done. total pkts: %d\n",
- xdp_ring->qid, tx_pkts);
-
- return tx_pkts;
-}
-
static int ena_io_poll(struct napi_struct *napi, int budget)
{
struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
@@ -2326,28 +1747,36 @@ static void ena_del_napi_in_range(struct ena_adapter *adapter,
for (i = first_index; i < first_index + count; i++) {
netif_napi_del(&adapter->ena_napi[i].napi);
- WARN_ON(!ENA_IS_XDP_INDEX(adapter, i) &&
- adapter->ena_napi[i].xdp_ring);
+ WARN_ON(ENA_IS_XDP_INDEX(adapter, i) &&
+ adapter->ena_napi[i].rx_ring);
}
}
static void ena_init_napi_in_range(struct ena_adapter *adapter,
int first_index, int count)
{
+ int (*napi_handler)(struct napi_struct *napi, int budget);
int i;
for (i = first_index; i < first_index + count; i++) {
struct ena_napi *napi = &adapter->ena_napi[i];
+ struct ena_ring *rx_ring, *tx_ring;
- netif_napi_add(adapter->netdev, &napi->napi,
- ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll);
+ memset(napi, 0, sizeof(*napi));
- if (!ENA_IS_XDP_INDEX(adapter, i)) {
- napi->rx_ring = &adapter->rx_ring[i];
- napi->tx_ring = &adapter->tx_ring[i];
- } else {
- napi->xdp_ring = &adapter->tx_ring[i];
- }
+ rx_ring = &adapter->rx_ring[i];
+ tx_ring = &adapter->tx_ring[i];
+
+ napi_handler = ena_io_poll;
+ if (ENA_IS_XDP_INDEX(adapter, i))
+ napi_handler = ena_xdp_io_poll;
+
+ netif_napi_add(adapter->netdev, &napi->napi, napi_handler);
+
+ if (!ENA_IS_XDP_INDEX(adapter, i))
+ napi->rx_ring = rx_ring;
+
+ napi->tx_ring = tx_ring;
napi->qid = i;
}
}
@@ -2475,8 +1904,8 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
return rc;
}
-static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
- int first_index, int count)
+int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
+ int first_index, int count)
{
struct ena_com_dev *ena_dev = adapter->ena_dev;
int rc, i;
@@ -2556,12 +1985,15 @@ static int ena_create_all_io_rx_queues(struct ena_adapter *adapter)
if (rc)
goto create_err;
INIT_WORK(&adapter->ena_napi[i].dim.work, ena_dim_work);
+
+ ena_xdp_register_rxq_info(&adapter->rx_ring[i]);
}
return 0;
create_err:
while (i--) {
+ ena_xdp_unregister_rxq_info(&adapter->rx_ring[i]);
cancel_work_sync(&adapter->ena_napi[i].dim.work);
ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
}
@@ -2686,7 +2118,7 @@ err_setup_tx:
}
}
-static int ena_up(struct ena_adapter *adapter)
+int ena_up(struct ena_adapter *adapter)
{
int io_queue_count, rc, i;
@@ -2748,7 +2180,7 @@ err_req_irq:
return rc;
}
-static void ena_down(struct ena_adapter *adapter)
+void ena_down(struct ena_adapter *adapter)
{
int io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
@@ -3179,7 +2611,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* set flags and meta data */
ena_tx_csum(&ena_tx_ctx, skb, tx_ring->disable_meta_caching);
- rc = ena_xmit_common(dev,
+ rc = ena_xmit_common(adapter,
tx_ring,
tx_info,
&ena_tx_ctx,
@@ -3363,6 +2795,7 @@ static void ena_get_stats64(struct net_device *netdev,
{
struct ena_adapter *adapter = netdev_priv(netdev);
struct ena_ring *rx_ring, *tx_ring;
+ u64 total_xdp_rx_drops = 0;
unsigned int start;
u64 rx_drops;
u64 tx_drops;
@@ -3371,8 +2804,8 @@ static void ena_get_stats64(struct net_device *netdev,
if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
return;
- for (i = 0; i < adapter->num_io_queues; i++) {
- u64 bytes, packets;
+ for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
+ u64 bytes, packets, xdp_rx_drops;
tx_ring = &adapter->tx_ring[i];
@@ -3385,16 +2818,22 @@ static void ena_get_stats64(struct net_device *netdev,
stats->tx_packets += packets;
stats->tx_bytes += bytes;
+ /* In XDP there isn't an RX queue counterpart */
+ if (ENA_IS_XDP_INDEX(adapter, i))
+ continue;
+
rx_ring = &adapter->rx_ring[i];
do {
start = u64_stats_fetch_begin(&rx_ring->syncp);
packets = rx_ring->rx_stats.cnt;
bytes = rx_ring->rx_stats.bytes;
+ xdp_rx_drops = rx_ring->rx_stats.xdp_drop;
} while (u64_stats_fetch_retry(&rx_ring->syncp, start));
stats->rx_packets += packets;
stats->rx_bytes += bytes;
+ total_xdp_rx_drops += xdp_rx_drops;
}
do {
@@ -3403,7 +2842,7 @@ static void ena_get_stats64(struct net_device *netdev,
tx_drops = adapter->dev_stats.tx_drops;
} while (u64_stats_fetch_retry(&adapter->syncp, start));
- stats->rx_dropped = rx_drops;
+ stats->rx_dropped = rx_drops + total_xdp_rx_drops;
stats->tx_dropped = tx_drops;
stats->multicast = 0;
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 33c923e1261a..6d2cc20210cc 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -110,19 +110,6 @@
#define ENA_MMIO_DISABLE_REG_READ BIT(0)
-/* The max MTU size is configured to be the ethernet frame size without
- * the overhead of the ethernet header, which can have a VLAN header, and
- * a frame check sequence (FCS).
- * The buffer size we share with the device is defined to be ENA_PAGE_SIZE
- */
-
-#define ENA_XDP_MAX_MTU (ENA_PAGE_SIZE - ETH_HLEN - ETH_FCS_LEN - \
- VLAN_HLEN - XDP_PACKET_HEADROOM - \
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
-
-#define ENA_IS_XDP_INDEX(adapter, index) (((index) >= (adapter)->xdp_first_ring) && \
- ((index) < (adapter)->xdp_first_ring + (adapter)->xdp_num_queues))
-
struct ena_irq {
irq_handler_t handler;
void *data;
@@ -138,13 +125,18 @@ struct ena_napi {
struct napi_struct napi;
struct ena_ring *tx_ring;
struct ena_ring *rx_ring;
- struct ena_ring *xdp_ring;
u32 qid;
struct dim dim;
};
struct ena_tx_buffer {
- struct sk_buff *skb;
+ union {
+ struct sk_buff *skb;
+ /* XDP buffer structure which is used for sending packets in
+ * the xdp queues
+ */
+ struct xdp_frame *xdpf;
+ };
/* num of ena desc for this specific skb
* (includes data desc and metadata desc)
*/
@@ -152,16 +144,14 @@ struct ena_tx_buffer {
/* num of buffers used by this skb */
u32 num_of_bufs;
- /* XDP buffer structure which is used for sending packets in
- * the xdp queues
- */
- struct xdp_frame *xdpf;
+ /* Total size of all buffers in bytes */
+ u32 total_tx_size;
/* Indicate if bufs[0] map the linear data of the skb. */
u8 map_linear_data;
/* Used for detect missing tx packets to limit the number of prints */
- u32 print_once;
+ u8 print_once;
/* Save the last jiffies to detect missing tx packets
*
* sets to non zero value on ena_start_xmit and set to zero on
@@ -421,47 +411,44 @@ static inline void ena_reset_device(struct ena_adapter *adapter,
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
}
-enum ena_xdp_errors_t {
- ENA_XDP_ALLOWED = 0,
- ENA_XDP_CURRENT_MTU_TOO_LARGE,
- ENA_XDP_NO_ENOUGH_QUEUES,
-};
+int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
+ struct ena_tx_buffer *tx_info, bool is_xdp);
-enum ENA_XDP_ACTIONS {
- ENA_XDP_PASS = 0,
- ENA_XDP_TX = BIT(0),
- ENA_XDP_REDIRECT = BIT(1),
- ENA_XDP_DROP = BIT(2)
-};
-
-#define ENA_XDP_FORWARDED (ENA_XDP_TX | ENA_XDP_REDIRECT)
-
-static inline bool ena_xdp_present(struct ena_adapter *adapter)
-{
- return !!adapter->xdp_bpf_prog;
-}
-
-static inline bool ena_xdp_present_ring(struct ena_ring *ring)
+/* Increase a stat by cnt while holding syncp seqlock on 32bit machines */
+static inline void ena_increase_stat(u64 *statp, u64 cnt,
+ struct u64_stats_sync *syncp)
{
- return !!ring->xdp_bpf_prog;
+ u64_stats_update_begin(syncp);
+ (*statp) += cnt;
+ u64_stats_update_end(syncp);
}
-static inline bool ena_xdp_legal_queue_count(struct ena_adapter *adapter,
- u32 queues)
+static inline void ena_ring_tx_doorbell(struct ena_ring *tx_ring)
{
- return 2 * queues <= adapter->max_num_io_queues;
-}
-
-static inline enum ena_xdp_errors_t ena_xdp_allowed(struct ena_adapter *adapter)
-{
- enum ena_xdp_errors_t rc = ENA_XDP_ALLOWED;
-
- if (adapter->netdev->mtu > ENA_XDP_MAX_MTU)
- rc = ENA_XDP_CURRENT_MTU_TOO_LARGE;
- else if (!ena_xdp_legal_queue_count(adapter, adapter->num_io_queues))
- rc = ENA_XDP_NO_ENOUGH_QUEUES;
-
- return rc;
+ ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
+ ena_increase_stat(&tx_ring->tx_stats.doorbells, 1, &tx_ring->syncp);
}
+int ena_xmit_common(struct ena_adapter *adapter,
+ struct ena_ring *ring,
+ struct ena_tx_buffer *tx_info,
+ struct ena_com_tx_ctx *ena_tx_ctx,
+ u16 next_to_use,
+ u32 bytes);
+void ena_unmap_tx_buff(struct ena_ring *tx_ring,
+ struct ena_tx_buffer *tx_info);
+void ena_init_io_rings(struct ena_adapter *adapter,
+ int first_index, int count);
+int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
+ int first_index, int count);
+int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
+ int first_index, int count);
+void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
+ int first_index, int count);
+void ena_free_all_io_tx_resources(struct ena_adapter *adapter);
+void ena_down(struct ena_adapter *adapter);
+int ena_up(struct ena_adapter *adapter);
+void ena_unmask_interrupt(struct ena_ring *tx_ring, struct ena_ring *rx_ring);
+void ena_update_ring_numa_node(struct ena_ring *tx_ring,
+ struct ena_ring *rx_ring);
#endif /* !(ENA_H) */
diff --git a/drivers/net/ethernet/amazon/ena/ena_xdp.c b/drivers/net/ethernet/amazon/ena/ena_xdp.c
new file mode 100644
index 000000000000..fc1c4ef73ba3
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_xdp.c
@@ -0,0 +1,468 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright 2015-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#include "ena_xdp.h"
+
+static int validate_xdp_req_id(struct ena_ring *tx_ring, u16 req_id)
+{
+ struct ena_tx_buffer *tx_info;
+
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+ if (likely(tx_info->xdpf))
+ return 0;
+
+ return handle_invalid_req_id(tx_ring, req_id, tx_info, true);
+}
+
+static int ena_xdp_tx_map_frame(struct ena_ring *tx_ring,
+ struct ena_tx_buffer *tx_info,
+ struct xdp_frame *xdpf,
+ struct ena_com_tx_ctx *ena_tx_ctx)
+{
+ struct ena_adapter *adapter = tx_ring->adapter;
+ struct ena_com_buf *ena_buf;
+ int push_len = 0;
+ dma_addr_t dma;
+ void *data;
+ u32 size;
+
+ tx_info->xdpf = xdpf;
+ data = tx_info->xdpf->data;
+ size = tx_info->xdpf->len;
+
+ if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+ /* Designate part of the packet for LLQ */
+ push_len = min_t(u32, size, tx_ring->tx_max_header_size);
+
+ ena_tx_ctx->push_header = data;
+
+ size -= push_len;
+ data += push_len;
+ }
+
+ ena_tx_ctx->header_len = push_len;
+
+ if (size > 0) {
+ dma = dma_map_single(tx_ring->dev,
+ data,
+ size,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
+ goto error_report_dma_error;
+
+ tx_info->map_linear_data = 0;
+
+ ena_buf = tx_info->bufs;
+ ena_buf->paddr = dma;
+ ena_buf->len = size;
+
+ ena_tx_ctx->ena_bufs = ena_buf;
+ ena_tx_ctx->num_bufs = tx_info->num_of_bufs = 1;
+ }
+
+ return 0;
+
+error_report_dma_error:
+ ena_increase_stat(&tx_ring->tx_stats.dma_mapping_err, 1,
+ &tx_ring->syncp);
+ netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n");
+
+ return -EINVAL;
+}
+
+int ena_xdp_xmit_frame(struct ena_ring *tx_ring,
+ struct ena_adapter *adapter,
+ struct xdp_frame *xdpf,
+ int flags)
+{
+ struct ena_com_tx_ctx ena_tx_ctx = {};
+ struct ena_tx_buffer *tx_info;
+ u16 next_to_use, req_id;
+ int rc;
+
+ next_to_use = tx_ring->next_to_use;
+ req_id = tx_ring->free_ids[next_to_use];
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+ tx_info->num_of_bufs = 0;
+
+ rc = ena_xdp_tx_map_frame(tx_ring, tx_info, xdpf, &ena_tx_ctx);
+ if (unlikely(rc))
+ return rc;
+
+ ena_tx_ctx.req_id = req_id;
+
+ rc = ena_xmit_common(adapter,
+ tx_ring,
+ tx_info,
+ &ena_tx_ctx,
+ next_to_use,
+ xdpf->len);
+ if (rc)
+ goto error_unmap_dma;
+
+ /* trigger the dma engine. ena_ring_tx_doorbell()
+ * calls a memory barrier inside it.
+ */
+ if (flags & XDP_XMIT_FLUSH)
+ ena_ring_tx_doorbell(tx_ring);
+
+ return rc;
+
+error_unmap_dma:
+ ena_unmap_tx_buff(tx_ring, tx_info);
+ tx_info->xdpf = NULL;
+ return rc;
+}
+
+int ena_xdp_xmit(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct ena_adapter *adapter = netdev_priv(dev);
+ struct ena_ring *tx_ring;
+ int qid, i, nxmit = 0;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
+ return -ENETDOWN;
+
+ /* We assume that all rings have the same XDP program */
+ if (!READ_ONCE(adapter->rx_ring->xdp_bpf_prog))
+ return -ENXIO;
+
+ qid = smp_processor_id() % adapter->xdp_num_queues;
+ qid += adapter->xdp_first_ring;
+ tx_ring = &adapter->tx_ring[qid];
+
+ /* Other CPU ids might try to send thorugh this queue */
+ spin_lock(&tx_ring->xdp_tx_lock);
+
+ for (i = 0; i < n; i++) {
+ if (ena_xdp_xmit_frame(tx_ring, adapter, frames[i], 0))
+ break;
+ nxmit++;
+ }
+
+ /* Ring doorbell to make device aware of the packets */
+ if (flags & XDP_XMIT_FLUSH)
+ ena_ring_tx_doorbell(tx_ring);
+
+ spin_unlock(&tx_ring->xdp_tx_lock);
+
+ /* Return number of packets sent */
+ return nxmit;
+}
+
+static void ena_init_all_xdp_queues(struct ena_adapter *adapter)
+{
+ adapter->xdp_first_ring = adapter->num_io_queues;
+ adapter->xdp_num_queues = adapter->num_io_queues;
+
+ ena_init_io_rings(adapter,
+ adapter->xdp_first_ring,
+ adapter->xdp_num_queues);
+}
+
+int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter)
+{
+ u32 xdp_first_ring = adapter->xdp_first_ring;
+ u32 xdp_num_queues = adapter->xdp_num_queues;
+ int rc = 0;
+
+ rc = ena_setup_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues);
+ if (rc)
+ goto setup_err;
+
+ rc = ena_create_io_tx_queues_in_range(adapter, xdp_first_ring, xdp_num_queues);
+ if (rc)
+ goto create_err;
+
+ return 0;
+
+create_err:
+ ena_free_all_io_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues);
+setup_err:
+ return rc;
+}
+
+/* Provides a way for both kernel and bpf-prog to know
+ * more about the RX-queue a given XDP frame arrived on.
+ */
+int ena_xdp_register_rxq_info(struct ena_ring *rx_ring)
+{
+ int rc;
+
+ rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid, 0);
+
+ netif_dbg(rx_ring->adapter, ifup, rx_ring->netdev, "Registering RX info for queue %d",
+ rx_ring->qid);
+ if (rc) {
+ netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
+ "Failed to register xdp rx queue info. RX queue num %d rc: %d\n",
+ rx_ring->qid, rc);
+ goto err;
+ }
+
+ rc = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_SHARED, NULL);
+
+ if (rc) {
+ netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
+ "Failed to register xdp rx queue info memory model. RX queue num %d rc: %d\n",
+ rx_ring->qid, rc);
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+ }
+
+err:
+ return rc;
+}
+
+void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring)
+{
+ netif_dbg(rx_ring->adapter, ifdown, rx_ring->netdev,
+ "Unregistering RX info for queue %d",
+ rx_ring->qid);
+ xdp_rxq_info_unreg_mem_model(&rx_ring->xdp_rxq);
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+}
+
+void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
+ struct bpf_prog *prog,
+ int first, int count)
+{
+ struct bpf_prog *old_bpf_prog;
+ struct ena_ring *rx_ring;
+ int i = 0;
+
+ for (i = first; i < count; i++) {
+ rx_ring = &adapter->rx_ring[i];
+ old_bpf_prog = xchg(&rx_ring->xdp_bpf_prog, prog);
+
+ if (!old_bpf_prog && prog) {
+ rx_ring->rx_headroom = XDP_PACKET_HEADROOM;
+ } else if (old_bpf_prog && !prog) {
+ rx_ring->rx_headroom = NET_SKB_PAD;
+ }
+ }
+}
+
+static void ena_xdp_exchange_program(struct ena_adapter *adapter,
+ struct bpf_prog *prog)
+{
+ struct bpf_prog *old_bpf_prog = xchg(&adapter->xdp_bpf_prog, prog);
+
+ ena_xdp_exchange_program_rx_in_range(adapter,
+ prog,
+ 0,
+ adapter->num_io_queues);
+
+ if (old_bpf_prog)
+ bpf_prog_put(old_bpf_prog);
+}
+
+static int ena_destroy_and_free_all_xdp_queues(struct ena_adapter *adapter)
+{
+ bool was_up;
+ int rc;
+
+ was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
+
+ if (was_up)
+ ena_down(adapter);
+
+ adapter->xdp_first_ring = 0;
+ adapter->xdp_num_queues = 0;
+ ena_xdp_exchange_program(adapter, NULL);
+ if (was_up) {
+ rc = ena_up(adapter);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ struct bpf_prog *prog = bpf->prog;
+ struct bpf_prog *old_bpf_prog;
+ int rc, prev_mtu;
+ bool is_up;
+
+ is_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
+ rc = ena_xdp_allowed(adapter);
+ if (rc == ENA_XDP_ALLOWED) {
+ old_bpf_prog = adapter->xdp_bpf_prog;
+ if (prog) {
+ if (!is_up) {
+ ena_init_all_xdp_queues(adapter);
+ } else if (!old_bpf_prog) {
+ ena_down(adapter);
+ ena_init_all_xdp_queues(adapter);
+ }
+ ena_xdp_exchange_program(adapter, prog);
+
+ netif_dbg(adapter, drv, adapter->netdev, "Set a new XDP program\n");
+
+ if (is_up && !old_bpf_prog) {
+ rc = ena_up(adapter);
+ if (rc)
+ return rc;
+ }
+ xdp_features_set_redirect_target(netdev, false);
+ } else if (old_bpf_prog) {
+ xdp_features_clear_redirect_target(netdev);
+ netif_dbg(adapter, drv, adapter->netdev, "Removing XDP program\n");
+
+ rc = ena_destroy_and_free_all_xdp_queues(adapter);
+ if (rc)
+ return rc;
+ }
+
+ prev_mtu = netdev->max_mtu;
+ netdev->max_mtu = prog ? ENA_XDP_MAX_MTU : adapter->max_mtu;
+
+ if (!old_bpf_prog)
+ netif_info(adapter, drv, adapter->netdev,
+ "XDP program is set, changing the max_mtu from %d to %d",
+ prev_mtu, netdev->max_mtu);
+
+ } else if (rc == ENA_XDP_CURRENT_MTU_TOO_LARGE) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Failed to set xdp program, the current MTU (%d) is larger than the maximum allowed MTU (%lu) while xdp is on",
+ netdev->mtu, ENA_XDP_MAX_MTU);
+ NL_SET_ERR_MSG_MOD(bpf->extack,
+ "Failed to set xdp program, the current MTU is larger than the maximum allowed MTU. Check the dmesg for more info");
+ return -EINVAL;
+ } else if (rc == ENA_XDP_NO_ENOUGH_QUEUES) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Failed to set xdp program, the Rx/Tx channel count should be at most half of the maximum allowed channel count. The current queue count (%d), the maximal queue count (%d)\n",
+ adapter->num_io_queues, adapter->max_num_io_queues);
+ NL_SET_ERR_MSG_MOD(bpf->extack,
+ "Failed to set xdp program, there is no enough space for allocating XDP queues, Check the dmesg for more info");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* This is the main xdp callback, it's used by the kernel to set/unset the xdp
+ * program as well as to query the current xdp program id.
+ */
+int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
+{
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return ena_xdp_set(netdev, bpf);
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int ena_clean_xdp_irq(struct ena_ring *tx_ring, u32 budget)
+{
+ u32 total_done = 0;
+ u16 next_to_clean;
+ int tx_pkts = 0;
+ u16 req_id;
+ int rc;
+
+ if (unlikely(!tx_ring))
+ return 0;
+ next_to_clean = tx_ring->next_to_clean;
+
+ while (tx_pkts < budget) {
+ struct ena_tx_buffer *tx_info;
+ struct xdp_frame *xdpf;
+
+ rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
+ &req_id);
+ if (rc) {
+ if (unlikely(rc == -EINVAL))
+ handle_invalid_req_id(tx_ring, req_id, NULL, true);
+ break;
+ }
+
+ /* validate that the request id points to a valid xdp_frame */
+ rc = validate_xdp_req_id(tx_ring, req_id);
+ if (rc)
+ break;
+
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+
+ tx_info->last_jiffies = 0;
+
+ xdpf = tx_info->xdpf;
+ tx_info->xdpf = NULL;
+ ena_unmap_tx_buff(tx_ring, tx_info);
+ xdp_return_frame(xdpf);
+
+ tx_pkts++;
+ total_done += tx_info->tx_descs;
+ tx_ring->free_ids[next_to_clean] = req_id;
+ next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
+ tx_ring->ring_size);
+
+ netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
+ "tx_poll: q %d pkt #%d req_id %d\n", tx_ring->qid, tx_pkts, req_id);
+ }
+
+ tx_ring->next_to_clean = next_to_clean;
+ ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
+ ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
+
+ netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
+ "tx_poll: q %d done. total pkts: %d\n",
+ tx_ring->qid, tx_pkts);
+
+ return tx_pkts;
+}
+
+/* This is the XDP napi callback. XDP queues use a separate napi callback
+ * than Rx/Tx queues.
+ */
+int ena_xdp_io_poll(struct napi_struct *napi, int budget)
+{
+ struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
+ struct ena_ring *tx_ring;
+ u32 work_done;
+ int ret;
+
+ tx_ring = ena_napi->tx_ring;
+
+ if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
+ test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) {
+ napi_complete_done(napi, 0);
+ return 0;
+ }
+
+ work_done = ena_clean_xdp_irq(tx_ring, budget);
+
+ /* If the device is about to reset or down, avoid unmask
+ * the interrupt and return 0 so NAPI won't reschedule
+ */
+ if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags))) {
+ napi_complete_done(napi, 0);
+ ret = 0;
+ } else if (budget > work_done) {
+ ena_increase_stat(&tx_ring->tx_stats.napi_comp, 1,
+ &tx_ring->syncp);
+ if (napi_complete_done(napi, work_done))
+ ena_unmask_interrupt(tx_ring, NULL);
+
+ ena_update_ring_numa_node(tx_ring, NULL);
+ ret = work_done;
+ } else {
+ ret = budget;
+ }
+
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->tx_stats.tx_poll++;
+ u64_stats_update_end(&tx_ring->syncp);
+ tx_ring->tx_stats.last_napi_jiffies = jiffies;
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/amazon/ena/ena_xdp.h b/drivers/net/ethernet/amazon/ena/ena_xdp.h
new file mode 100644
index 000000000000..cfd82728486a
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_xdp.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright 2015-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#ifndef ENA_XDP_H
+#define ENA_XDP_H
+
+#include "ena_netdev.h"
+#include <linux/bpf_trace.h>
+
+/* The max MTU size is configured to be the ethernet frame size without
+ * the overhead of the ethernet header, which can have a VLAN header, and
+ * a frame check sequence (FCS).
+ * The buffer size we share with the device is defined to be ENA_PAGE_SIZE
+ */
+#define ENA_XDP_MAX_MTU (ENA_PAGE_SIZE - ETH_HLEN - ETH_FCS_LEN - \
+ VLAN_HLEN - XDP_PACKET_HEADROOM - \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+#define ENA_IS_XDP_INDEX(adapter, index) (((index) >= (adapter)->xdp_first_ring) && \
+ ((index) < (adapter)->xdp_first_ring + (adapter)->xdp_num_queues))
+
+enum ENA_XDP_ACTIONS {
+ ENA_XDP_PASS = 0,
+ ENA_XDP_TX = BIT(0),
+ ENA_XDP_REDIRECT = BIT(1),
+ ENA_XDP_DROP = BIT(2)
+};
+
+#define ENA_XDP_FORWARDED (ENA_XDP_TX | ENA_XDP_REDIRECT)
+
+int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter);
+void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
+ struct bpf_prog *prog,
+ int first, int count);
+int ena_xdp_io_poll(struct napi_struct *napi, int budget);
+int ena_xdp_xmit_frame(struct ena_ring *tx_ring,
+ struct ena_adapter *adapter,
+ struct xdp_frame *xdpf,
+ int flags);
+int ena_xdp_xmit(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags);
+int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf);
+int ena_xdp_register_rxq_info(struct ena_ring *rx_ring);
+void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring);
+
+enum ena_xdp_errors_t {
+ ENA_XDP_ALLOWED = 0,
+ ENA_XDP_CURRENT_MTU_TOO_LARGE,
+ ENA_XDP_NO_ENOUGH_QUEUES,
+};
+
+static inline bool ena_xdp_present(struct ena_adapter *adapter)
+{
+ return !!adapter->xdp_bpf_prog;
+}
+
+static inline bool ena_xdp_present_ring(struct ena_ring *ring)
+{
+ return !!ring->xdp_bpf_prog;
+}
+
+static inline bool ena_xdp_legal_queue_count(struct ena_adapter *adapter,
+ u32 queues)
+{
+ return 2 * queues <= adapter->max_num_io_queues;
+}
+
+static inline enum ena_xdp_errors_t ena_xdp_allowed(struct ena_adapter *adapter)
+{
+ enum ena_xdp_errors_t rc = ENA_XDP_ALLOWED;
+
+ if (adapter->netdev->mtu > ENA_XDP_MAX_MTU)
+ rc = ENA_XDP_CURRENT_MTU_TOO_LARGE;
+ else if (!ena_xdp_legal_queue_count(adapter, adapter->num_io_queues))
+ rc = ENA_XDP_NO_ENOUGH_QUEUES;
+
+ return rc;
+}
+
+static inline int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
+{
+ u32 verdict = ENA_XDP_PASS;
+ struct bpf_prog *xdp_prog;
+ struct ena_ring *xdp_ring;
+ struct xdp_frame *xdpf;
+ u64 *xdp_stat;
+
+ xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog);
+
+ verdict = bpf_prog_run_xdp(xdp_prog, xdp);
+
+ switch (verdict) {
+ case XDP_TX:
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf)) {
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
+ xdp_stat = &rx_ring->rx_stats.xdp_aborted;
+ verdict = ENA_XDP_DROP;
+ break;
+ }
+
+ /* Find xmit queue */
+ xdp_ring = rx_ring->xdp_ring;
+
+ /* The XDP queues are shared between XDP_TX and XDP_REDIRECT */
+ spin_lock(&xdp_ring->xdp_tx_lock);
+
+ if (ena_xdp_xmit_frame(xdp_ring, rx_ring->adapter, xdpf,
+ XDP_XMIT_FLUSH))
+ xdp_return_frame(xdpf);
+
+ spin_unlock(&xdp_ring->xdp_tx_lock);
+ xdp_stat = &rx_ring->rx_stats.xdp_tx;
+ verdict = ENA_XDP_TX;
+ break;
+ case XDP_REDIRECT:
+ if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) {
+ xdp_stat = &rx_ring->rx_stats.xdp_redirect;
+ verdict = ENA_XDP_REDIRECT;
+ break;
+ }
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
+ xdp_stat = &rx_ring->rx_stats.xdp_aborted;
+ verdict = ENA_XDP_DROP;
+ break;
+ case XDP_ABORTED:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
+ xdp_stat = &rx_ring->rx_stats.xdp_aborted;
+ verdict = ENA_XDP_DROP;
+ break;
+ case XDP_DROP:
+ xdp_stat = &rx_ring->rx_stats.xdp_drop;
+ verdict = ENA_XDP_DROP;
+ break;
+ case XDP_PASS:
+ xdp_stat = &rx_ring->rx_stats.xdp_pass;
+ verdict = ENA_XDP_PASS;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, verdict);
+ xdp_stat = &rx_ring->rx_stats.xdp_invalid;
+ verdict = ENA_XDP_DROP;
+ }
+
+ ena_increase_stat(xdp_stat, 1, &rx_ring->syncp);
+
+ return verdict;
+}
+#endif /* ENA_XDP_H */
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 1ca273f17d29..820b1fabe297 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -6877,7 +6877,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev->min_mtu = ETH_MIN_MTU;
/* 9704 == 9728 - 20 and rounding to 8 */
dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
- dev->dev.of_node = port_node;
+ device_set_node(&dev->dev, port_fwnode);
port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops;
port->pcs_gmac.neg_mode = true;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 7f30e08b580f..167145bdcb75 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -2715,18 +2715,17 @@ int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
rsp->entry = NPC_MCAM_ENTRY_INVALID;
rsp->free_count = 0;
- /* Check if ref_entry is within range */
- if (req->priority && req->ref_entry >= mcam->bmap_entries) {
- dev_err(rvu->dev, "%s: reference entry %d is out of range\n",
- __func__, req->ref_entry);
- return NPC_MCAM_INVALID_REQ;
- }
+ /* Check if ref_entry is greater that the range
+ * then set it to max value.
+ */
+ if (req->ref_entry > mcam->bmap_entries)
+ req->ref_entry = mcam->bmap_entries;
/* ref_entry can't be '0' if requested priority is high.
* Can't be last entry if requested priority is low.
*/
if ((!req->ref_entry && req->priority == NPC_MCAM_HIGHER_PRIO) ||
- ((req->ref_entry == (mcam->bmap_entries - 1)) &&
+ ((req->ref_entry == mcam->bmap_entries) &&
req->priority == NPC_MCAM_LOWER_PRIO))
return NPC_MCAM_INVALID_REQ;
diff --git a/drivers/net/mdio/mdio-mux-bcm-iproc.c b/drivers/net/mdio/mdio-mux-bcm-iproc.c
index a750bd4c77a0..1ce7d67ba72e 100644
--- a/drivers/net/mdio/mdio-mux-bcm-iproc.c
+++ b/drivers/net/mdio/mdio-mux-bcm-iproc.c
@@ -2,6 +2,7 @@
/*
* Copyright 2016 Broadcom
*/
+#include <linux/align.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -11,6 +12,7 @@
#include <linux/of_mdio.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
+#include <linux/sizes.h>
#define MDIO_RATE_ADJ_EXT_OFFSET 0x000
#define MDIO_RATE_ADJ_INT_OFFSET 0x004
@@ -220,12 +222,12 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev)
md->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(md->base))
return PTR_ERR(md->base);
- if (res->start & 0xfff) {
+ if (!IS_ALIGNED(res->start, SZ_4K)) {
/* For backward compatibility in case the
* base address is specified with an offset.
*/
dev_info(&pdev->dev, "fix base address in dt-blob\n");
- res->start &= ~0xfff;
+ res->start = ALIGN_DOWN(res->start, SZ_4K);
res->end = res->start + MDIO_REG_ADDR_SPACE_SIZE - 1;
}
diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c
index 9b6cab6154e0..64ebcb6d235c 100644
--- a/drivers/net/mdio/of_mdio.c
+++ b/drivers/net/mdio/of_mdio.c
@@ -139,11 +139,6 @@ bool of_mdiobus_child_is_phy(struct device_node *child)
}
EXPORT_SYMBOL(of_mdiobus_child_is_phy);
-static void __of_mdiobus_unregister_callback(struct mii_bus *mdio)
-{
- of_node_put(mdio->dev.of_node);
-}
-
/**
* __of_mdiobus_register - Register mii_bus and create PHYs from the device tree
* @mdio: pointer to mii_bus structure
@@ -171,8 +166,6 @@ int __of_mdiobus_register(struct mii_bus *mdio, struct device_node *np,
* the device tree are populated after the bus has been registered */
mdio->phy_mask = ~0;
- mdio->__unregister_callback = __of_mdiobus_unregister_callback;
- of_node_get(np);
device_set_node(&mdio->dev, of_fwnode_handle(np));
/* Get bus level PHY reset GPIO details */
@@ -184,7 +177,7 @@ int __of_mdiobus_register(struct mii_bus *mdio, struct device_node *np,
/* Register the MDIO bus */
rc = __mdiobus_register(mdio, owner);
if (rc)
- goto put_node;
+ return rc;
/* Loop over the child nodes and register a phy_device for each phy */
for_each_available_child_of_node(np, child) {
@@ -244,9 +237,6 @@ int __of_mdiobus_register(struct mii_bus *mdio, struct device_node *np,
unregister:
of_node_put(child);
mdiobus_unregister(mdio);
-
-put_node:
- of_node_put(np);
return rc;
}
EXPORT_SYMBOL(__of_mdiobus_register);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 4a30757c4ff8..6cf73c15635b 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -787,9 +787,6 @@ void mdiobus_unregister(struct mii_bus *bus)
gpiod_set_value_cansleep(bus->reset_gpiod, 1);
device_del(&bus->dev);
-
- if (bus->__unregister_callback)
- bus->__unregister_callback(bus);
}
EXPORT_SYMBOL(mdiobus_unregister);
diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
index fbaaa8c102a1..840da924708b 100644
--- a/drivers/net/ppp/ppp_async.c
+++ b/drivers/net/ppp/ppp_async.c
@@ -533,7 +533,7 @@ ppp_async_encode(struct asyncppp *ap)
proto = get_unaligned_be16(data);
/*
- * LCP packets with code values between 1 (configure-reqest)
+ * LCP packets with code values between 1 (configure-request)
* and 7 (code-reject) must be sent as though no options
* had been negotiated.
*/
diff --git a/include/linux/phy.h b/include/linux/phy.h
index ac22b8e28a85..6cb9d843aee9 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -434,9 +434,6 @@ struct mii_bus {
/** @shared: shared state across different PHYs */
struct phy_package_shared *shared[PHY_MAX_ADDR];
-
- /** @__unregister_callback: called at the last step of unregistration */
- void (*__unregister_callback)(struct mii_bus *bus);
};
#define to_mii_bus(d) container_of(d, struct mii_bus, dev)
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 82da359bca03..d17855c52ef9 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -172,8 +172,7 @@ void fib_rules_unregister(struct fib_rules_ops *);
int fib_rules_lookup(struct fib_rules_ops *, struct flowi *, int flags,
struct fib_lookup_arg *);
-int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table,
- u32 flags);
+int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table);
bool fib_rule_matchall(const struct fib_rule *rule);
int fib_rules_dump(struct net *net, struct notifier_block *nb, int family,
struct netlink_ext_ack *extack);
diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
index b62bb8525a5f..526c1e7f505e 100644
--- a/include/net/xdp_sock_drv.h
+++ b/include/net/xdp_sock_drv.h
@@ -12,14 +12,14 @@
#define XDP_UMEM_MIN_CHUNK_SHIFT 11
#define XDP_UMEM_MIN_CHUNK_SIZE (1 << XDP_UMEM_MIN_CHUNK_SHIFT)
-#ifdef CONFIG_XDP_SOCKETS
-
struct xsk_cb_desc {
void *src;
u8 off;
u8 bytes;
};
+#ifdef CONFIG_XDP_SOCKETS
+
void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max);
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 60801df9d8c0..01ba529dbb6d 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -2002,6 +2002,7 @@ static inline int ethtool_validate_duplex(__u8 duplex)
* be exploited to reduce the RSS queue spread.
*/
#define RXH_XFRM_SYM_XOR (1 << 0)
+#define RXH_XFRM_NO_CHANGE 0xff
/* L2-L4 network traffic flow types */
#define TCP_V4_FLOW 0x01 /* hash or spec (tcp_ip4_spec) */
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 75282222e0b4..96622bfb838a 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -53,7 +53,7 @@ bool fib_rule_matchall(const struct fib_rule *rule)
EXPORT_SYMBOL_GPL(fib_rule_matchall);
int fib_default_rule_add(struct fib_rules_ops *ops,
- u32 pref, u32 table, u32 flags)
+ u32 pref, u32 table)
{
struct fib_rule *r;
@@ -65,7 +65,6 @@ int fib_default_rule_add(struct fib_rules_ops *ops,
r->action = FR_ACT_TO_TBL;
r->pref = pref;
r->table = table;
- r->flags = flags;
r->proto = RTPROT_KERNEL;
r->fr_net = ops->fro_net;
r->uid_range = fib_kuid_range_unset;
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index 86d47425038b..4bc9a2a07bbb 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -973,32 +973,35 @@ static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
u32 cmd, void __user *useraddr)
{
const struct ethtool_ops *ops = dev->ethtool_ops;
- struct ethtool_rxfh_param rxfh = {};
struct ethtool_rxnfc info;
size_t info_size = sizeof(info);
int rc;
- if (!ops->set_rxnfc || !ops->get_rxfh)
+ if (!ops->set_rxnfc)
return -EOPNOTSUPP;
rc = ethtool_rxnfc_copy_struct(cmd, &info, &info_size, useraddr);
if (rc)
return rc;
- rc = ops->get_rxfh(dev, &rxfh);
- if (rc)
- return rc;
+ if (ops->get_rxfh) {
+ struct ethtool_rxfh_param rxfh = {};
- /* Sanity check: if symmetric-xor is set, then:
- * 1 - no other fields besides IP src/dst and/or L4 src/dst
- * 2 - If src is set, dst must also be set
- */
- if ((rxfh.input_xfrm & RXH_XFRM_SYM_XOR) &&
- ((info.data & ~(RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3)) ||
- (!!(info.data & RXH_IP_SRC) ^ !!(info.data & RXH_IP_DST)) ||
- (!!(info.data & RXH_L4_B_0_1) ^ !!(info.data & RXH_L4_B_2_3))))
- return -EINVAL;
+ rc = ops->get_rxfh(dev, &rxfh);
+ if (rc)
+ return rc;
+
+ /* Sanity check: if symmetric-xor is set, then:
+ * 1 - no other fields besides IP src/dst and/or L4 src/dst
+ * 2 - If src is set, dst must also be set
+ */
+ if ((rxfh.input_xfrm & RXH_XFRM_SYM_XOR) &&
+ ((info.data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3)) ||
+ (!!(info.data & RXH_IP_SRC) ^ !!(info.data & RXH_IP_DST)) ||
+ (!!(info.data & RXH_L4_B_0_1) ^ !!(info.data & RXH_L4_B_2_3))))
+ return -EINVAL;
+ }
rc = ops->set_rxnfc(dev, &info);
if (rc)
@@ -1252,6 +1255,11 @@ static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev,
&rxfh_dev.hfunc, sizeof(rxfh.hfunc))) {
ret = -EFAULT;
} else if (copy_to_user(useraddr +
+ offsetof(struct ethtool_rxfh, input_xfrm),
+ &rxfh_dev.input_xfrm,
+ sizeof(rxfh.input_xfrm))) {
+ ret = -EFAULT;
+ } else if (copy_to_user(useraddr +
offsetof(struct ethtool_rxfh, rss_config[0]),
rss_config, total_size)) {
ret = -EFAULT;
@@ -1299,14 +1307,16 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
return -EOPNOTSUPP;
/* If either indir, hash key or function is valid, proceed further.
- * Must request at least one change: indir size, hash key or function.
+ * Must request at least one change: indir size, hash key, function
+ * or input transformation.
*/
if ((rxfh.indir_size &&
rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE &&
rxfh.indir_size != dev_indir_size) ||
(rxfh.key_size && (rxfh.key_size != dev_key_size)) ||
(rxfh.indir_size == ETH_RXFH_INDIR_NO_CHANGE &&
- rxfh.key_size == 0 && rxfh.hfunc == ETH_RSS_HASH_NO_CHANGE))
+ rxfh.key_size == 0 && rxfh.hfunc == ETH_RSS_HASH_NO_CHANGE &&
+ rxfh.input_xfrm == RXH_XFRM_NO_CHANGE))
return -EINVAL;
if (rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE)
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 513f475c6a53..5bdd1c016009 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -395,13 +395,13 @@ static int fib_default_rules_init(struct fib_rules_ops *ops)
{
int err;
- err = fib_default_rule_add(ops, 0, RT_TABLE_LOCAL, 0);
+ err = fib_default_rule_add(ops, 0, RT_TABLE_LOCAL);
if (err < 0)
return err;
- err = fib_default_rule_add(ops, 0x7FFE, RT_TABLE_MAIN, 0);
+ err = fib_default_rule_add(ops, 0x7FFE, RT_TABLE_MAIN);
if (err < 0)
return err;
- err = fib_default_rule_add(ops, 0x7FFF, RT_TABLE_DEFAULT, 0);
+ err = fib_default_rule_add(ops, 0x7FFF, RT_TABLE_DEFAULT);
if (err < 0)
return err;
return 0;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 0063a237253b..9d6f59531b3a 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -253,7 +253,7 @@ static int __net_init ipmr_rules_init(struct net *net)
goto err1;
}
- err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0);
+ err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT);
if (err < 0)
goto err2;
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 7c2003833010..7523c4baef35 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -475,11 +475,11 @@ static int __net_init fib6_rules_net_init(struct net *net)
if (IS_ERR(ops))
return PTR_ERR(ops);
- err = fib_default_rule_add(ops, 0, RT6_TABLE_LOCAL, 0);
+ err = fib_default_rule_add(ops, 0, RT6_TABLE_LOCAL);
if (err)
goto out_fib6_rules_ops;
- err = fib_default_rule_add(ops, 0x7FFE, RT6_TABLE_MAIN, 0);
+ err = fib_default_rule_add(ops, 0x7FFE, RT6_TABLE_MAIN);
if (err)
goto out_fib6_rules_ops;
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 30ca064b76ef..9782c180fee6 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -242,7 +242,7 @@ static int __net_init ip6mr_rules_init(struct net *net)
goto err1;
}
- err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0);
+ err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT);
if (err < 0)
goto err2;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 3c50b4037755..adf5de1ff773 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -2735,6 +2735,7 @@ errout:
}
static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = {
+ [TCA_CHAIN] = { .type = NLA_U32 },
[TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE),
};
diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
index 175d3d1d773b..f10879788f61 100755
--- a/tools/testing/selftests/net/pmtu.sh
+++ b/tools/testing/selftests/net/pmtu.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
#
# Check that route PMTU values match expectations, and that initial device MTU
@@ -198,7 +198,7 @@
# - pmtu_ipv6_route_change
# Same as above but with IPv6
-source ./lib.sh
+source lib.sh
PAUSE_ON_FAIL=no
VERBOSE=0
diff --git a/tools/testing/selftests/net/unicast_extensions.sh b/tools/testing/selftests/net/unicast_extensions.sh
index b7a2cb9e7477..f52aa5f7da52 100755
--- a/tools/testing/selftests/net/unicast_extensions.sh
+++ b/tools/testing/selftests/net/unicast_extensions.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
#
# By Seth Schoen (c) 2021, for the IPv4 Unicast Extensions Project
@@ -28,7 +28,7 @@
# These tests provide an easy way to flip the expected result of any
# of these behaviors for testing kernel patches that change them.
-source ./lib.sh
+source lib.sh
# nettest can be run from PATH or from same directory as this selftest
if ! which nettest >/dev/null; then