aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/networking/page_pool.rst18
-rw-r--r--MAINTAINERS1
-rw-r--r--arch/powerpc/platforms/8xx/adder875.c1
-rw-r--r--arch/powerpc/platforms/8xx/mpc885ads_setup.c1
-rw-r--r--arch/powerpc/platforms/8xx/tqm8xx_setup.c1
-rw-r--r--arch/powerpc/sysdev/fsl_soc.c3
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c2
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/net/dsa/Kconfig3
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.c2
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.h2
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c26
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h644
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c3
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h19
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fcc.c4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c14
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-scc.c8
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h54
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c205
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch_br.c47
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c37
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c34
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c360
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c3
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c29
-rw-r--r--drivers/net/ethernet/renesas/rswitch.h1
-rw-r--r--drivers/net/ethernet/sfc/Makefile2
-rw-r--r--drivers/net/ethernet/sfc/bitfield.h2
-rw-r--r--drivers/net/ethernet/sfc/mae.c827
-rw-r--r--drivers/net/ethernet/sfc/mae.h12
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h14
-rw-r--r--drivers/net/ethernet/sfc/tc.c533
-rw-r--r--drivers/net/ethernet/sfc/tc.h86
-rw-r--r--drivers/net/ethernet/sfc/tc_conntrack.c533
-rw-r--r--drivers/net/ethernet/sfc/tc_conntrack.h55
-rw-r--r--drivers/net/ethernet/sfc/tc_counters.c8
-rw-r--r--drivers/net/ethernet/sfc/tc_counters.h4
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c12
-rw-r--r--drivers/net/team/team.c62
-rw-r--r--drivers/net/team/team_mode_activebackup.c8
-rw-r--r--drivers/net/team/team_mode_broadcast.c1
-rw-r--r--drivers/net/team/team_mode_loadbalance.c50
-rw-r--r--drivers/net/team/team_mode_roundrobin.c1
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c2
-rw-r--r--drivers/vfio/pci/mlx5/cmd.c4
-rw-r--r--include/linux/fs_enet_pd.h165
-rw-r--r--include/linux/if_team.h4
-rw-r--r--include/linux/mlx5/driver.h7
-rw-r--r--include/linux/phy.h4
-rw-r--r--include/net/devlink.h3
-rw-r--r--include/net/fq.h5
-rw-r--r--include/net/page_pool/helpers.h24
-rw-r--r--include/net/switchdev.h4
-rw-r--r--net/devlink/leftover.c4
-rw-r--r--net/dsa/port.c12
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv6/exthdrs.c5
-rw-r--r--net/tipc/link.h2
-rw-r--r--net/tls/tls_sw.c13
-rwxr-xr-xtools/net/ynl/ynl-gen-c.py8
98 files changed, 2879 insertions, 1451 deletions
diff --git a/Documentation/networking/page_pool.rst b/Documentation/networking/page_pool.rst
index 68b82cea13e4..215ebc92752c 100644
--- a/Documentation/networking/page_pool.rst
+++ b/Documentation/networking/page_pool.rst
@@ -4,22 +4,8 @@
Page Pool API
=============
-The page_pool allocator is optimized for the XDP mode that uses one frame
-per-page, but it can fallback on the regular page allocator APIs.
-
-Basic use involves replacing alloc_pages() calls with the
-page_pool_alloc_pages() call. Drivers should use page_pool_dev_alloc_pages()
-replacing dev_alloc_pages().
-
-API keeps track of in-flight pages, in order to let API user know
-when it is safe to free a page_pool object. Thus, API users
-must call page_pool_put_page() to free the page, or attach
-the page to a page_pool-aware objects like skbs marked with
-skb_mark_for_recycle().
-
-API user must call page_pool_put_page() once on a page, as it
-will either recycle the page, or in case of refcnt > 1, it will
-release the DMA mapping and in-flight state accounting.
+.. kernel-doc:: include/net/page_pool/helpers.h
+ :doc: page_pool allocator
Architecture overview
=====================
diff --git a/MAINTAINERS b/MAINTAINERS
index 08bcf3a7c482..6efcd8713682 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -8371,7 +8371,6 @@ L: [email protected]
S: Maintained
F: drivers/net/ethernet/freescale/fs_enet/
-F: include/linux/fs_enet_pd.h
FREESCALE SOC SOUND DRIVERS
M: Shengjiu Wang <[email protected]>
diff --git a/arch/powerpc/platforms/8xx/adder875.c b/arch/powerpc/platforms/8xx/adder875.c
index 7e83eb6746f4..f6bd232f8323 100644
--- a/arch/powerpc/platforms/8xx/adder875.c
+++ b/arch/powerpc/platforms/8xx/adder875.c
@@ -7,7 +7,6 @@
*/
#include <linux/init.h>
-#include <linux/fs_enet_pd.h>
#include <linux/of_platform.h>
#include <asm/time.h>
diff --git a/arch/powerpc/platforms/8xx/mpc885ads_setup.c b/arch/powerpc/platforms/8xx/mpc885ads_setup.c
index 2fc7cacbcd96..c7c4f082b838 100644
--- a/arch/powerpc/platforms/8xx/mpc885ads_setup.c
+++ b/arch/powerpc/platforms/8xx/mpc885ads_setup.c
@@ -21,7 +21,6 @@
#include <linux/device.h>
#include <linux/delay.h>
-#include <linux/fs_enet_pd.h>
#include <linux/fs_uart_pd.h>
#include <linux/fsl_devices.h>
#include <linux/mii.h>
diff --git a/arch/powerpc/platforms/8xx/tqm8xx_setup.c b/arch/powerpc/platforms/8xx/tqm8xx_setup.c
index 7d8eb50bb9cd..6e56be852b2c 100644
--- a/arch/powerpc/platforms/8xx/tqm8xx_setup.c
+++ b/arch/powerpc/platforms/8xx/tqm8xx_setup.c
@@ -24,7 +24,6 @@
#include <linux/device.h>
#include <linux/delay.h>
-#include <linux/fs_enet_pd.h>
#include <linux/fs_uart_pd.h>
#include <linux/fsl_devices.h>
#include <linux/mii.h>
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index 68709743450e..c11771542bec 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -23,7 +23,6 @@
#include <linux/phy.h>
#include <linux/spi/spi.h>
#include <linux/fsl_devices.h>
-#include <linux/fs_enet_pd.h>
#include <linux/fs_uart_pd.h>
#include <linux/reboot.h>
@@ -37,8 +36,6 @@
#include <asm/cpm2.h>
#include <asm/fsl_hcalls.h> /* For the Freescale hypervisor */
-extern void init_fcc_ioports(struct fs_platform_info*);
-extern void init_fec_ioports(struct fs_platform_info*);
extern void init_smc_ioports(struct fs_uart_platform_info*);
static phys_addr_t immrbase = -1;
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index efc9e4a6df04..9773d2a3d97f 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -993,7 +993,7 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
}
- err = mlx5_vector2eqn(dev->mdev, vector, &eqn);
+ err = mlx5_comp_eqn_get(dev->mdev, vector, &eqn);
if (err)
goto err_cqb;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index db5fb196c728..8ba53edf2311 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -1002,7 +1002,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
return PTR_ERR(c);
dev = to_mdev(c->ibucontext.device);
- err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn);
+ err = mlx5_comp_eqn_get(dev->mdev, user_vector, &dev_eqn);
if (err < 0)
return err;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index f0b394ed7452..3c25b9045f9d 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -3685,7 +3685,7 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
if (mlx5_use_mad_ifc(dev))
get_ext_port_caps(dev);
- dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_count(mdev);
+ dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_max(mdev);
mutex_init(&dev->cap_mask_mutex);
INIT_LIST_HEAD(&dev->qp_list);
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 3ed5391bb18d..f8c1d73b251d 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -37,7 +37,6 @@ config NET_DSA_LANTIQ_GSWIP
config NET_DSA_MT7530
tristate "MediaTek MT7530 and MT7531 Ethernet switch support"
select NET_DSA_TAG_MTK
- select MEDIATEK_GE_PHY
imply NET_DSA_MT7530_MDIO
imply NET_DSA_MT7530_MMIO
help
@@ -49,6 +48,7 @@ config NET_DSA_MT7530
config NET_DSA_MT7530_MDIO
tristate "MediaTek MT7530 MDIO interface driver"
depends on NET_DSA_MT7530
+ imply MEDIATEK_GE_PHY
select PCS_MTK_LYNXI
help
This enables support for the MediaTek MT7530 and MT7531 switch
@@ -60,6 +60,7 @@ config NET_DSA_MT7530_MMIO
tristate "MediaTek MT7530 MMIO interface driver"
depends on NET_DSA_MT7530
depends on HAS_IOMEM
+ imply MEDIATEK_GE_SOC_PHY
help
This enables support for the built-in Ethernet switch found
in the MediaTek MT7988 SoC.
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
index eb35ced1c8ba..d63d321f3e7b 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
@@ -640,7 +640,7 @@ bool bcmasp_netfilt_check_dup(struct bcmasp_intf *intf,
* If no more open filters return NULL
*/
struct bcmasp_net_filter *bcmasp_netfilt_get_init(struct bcmasp_intf *intf,
- int loc, bool wake_filter,
+ u32 loc, bool wake_filter,
bool init)
{
struct bcmasp_net_filter *nfilter = NULL;
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.h b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
index 6bfcaa7f95a8..5b512f7f5e94 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.h
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
@@ -566,7 +566,7 @@ void bcmasp_disable_all_filters(struct bcmasp_intf *intf);
void bcmasp_core_clock_set_intf(struct bcmasp_intf *intf, bool en);
struct bcmasp_net_filter *bcmasp_netfilt_get_init(struct bcmasp_intf *intf,
- int loc, bool wake_filter,
+ u32 loc, bool wake_filter,
bool init);
bool bcmasp_netfilt_check_dup(struct bcmasp_intf *intf,
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 2cf96892e565..a741070f1f9a 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1940,7 +1940,6 @@ static struct platform_driver bcm63xx_enet_driver = {
.remove = bcm_enet_remove,
.driver = {
.name = "bcm63xx_enet",
- .owner = THIS_MODULE,
},
};
@@ -2761,7 +2760,6 @@ static struct platform_driver bcm63xx_enetsw_driver = {
.remove = bcm_enetsw_remove,
.driver = {
.name = "bcm63xx_enetsw",
- .owner = THIS_MODULE,
},
};
@@ -2791,7 +2789,6 @@ struct platform_driver bcm63xx_enet_shared_driver = {
.probe = bcm_enet_shared_probe,
.driver = {
.name = "bcm63xx_enet_shared",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index eb168ca983b7..7be917a8da48 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -13132,9 +13132,6 @@ static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
if (nla_type(attr) != IFLA_BRIDGE_MODE)
continue;
- if (nla_len(attr) < sizeof(mode))
- return -EINVAL;
-
mode = nla_get_u16(attr);
if (mode == bp->br_mode)
break;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 31f85f3e2364..63e067038385 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -98,7 +98,6 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
{
struct hwrm_queue_cos2bw_cfg_input *req;
struct bnxt_cos2bw_cfg cos2bw;
- void *data;
int rc, i;
rc = hwrm_req_init(bp, req, HWRM_QUEUE_COS2BW_CFG);
@@ -129,11 +128,15 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
cpu_to_le32((ets->tc_tx_bw[i] * 100) |
BW_VALUE_UNIT_PERCENT1_100);
}
- data = &req->unused_0 + qidx * (sizeof(cos2bw) - 4);
- memcpy(data, &cos2bw.cfg, sizeof(cos2bw) - 4);
if (qidx == 0) {
req->queue_id0 = cos2bw.queue_id;
- req->unused_0 = 0;
+ req->queue_id0_min_bw = cos2bw.min_bw;
+ req->queue_id0_max_bw = cos2bw.max_bw;
+ req->queue_id0_tsa_assign = cos2bw.tsa;
+ req->queue_id0_pri_lvl = cos2bw.pri_lvl;
+ req->queue_id0_bw_weight = cos2bw.bw_weight;
+ } else {
+ memcpy(&req->cfg[i - 1], &cos2bw.cfg, sizeof(cos2bw.cfg));
}
}
return hwrm_req_send(bp, req);
@@ -144,7 +147,6 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
struct hwrm_queue_cos2bw_qcfg_output *resp;
struct hwrm_queue_cos2bw_qcfg_input *req;
struct bnxt_cos2bw_cfg cos2bw;
- void *data;
int rc, i;
rc = hwrm_req_init(bp, req, HWRM_QUEUE_COS2BW_QCFG);
@@ -158,13 +160,19 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
return rc;
}
- data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id);
- for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw.cfg)) {
+ for (i = 0; i < bp->max_tc; i++) {
int tc;
- memcpy(&cos2bw.cfg, data, sizeof(cos2bw.cfg));
- if (i == 0)
+ if (i == 0) {
cos2bw.queue_id = resp->queue_id0;
+ cos2bw.min_bw = resp->queue_id0_min_bw;
+ cos2bw.max_bw = resp->queue_id0_max_bw;
+ cos2bw.tsa = resp->queue_id0_tsa_assign;
+ cos2bw.pri_lvl = resp->queue_id0_pri_lvl;
+ cos2bw.bw_weight = resp->queue_id0_bw_weight;
+ } else {
+ memcpy(&cos2bw.cfg, &resp->cfg[i - 1], sizeof(cos2bw.cfg));
+ }
tc = bnxt_queue_to_tc(bp, cos2bw.queue_id);
if (tc < 0)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index b31de4cf6534..f178ed9899a9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -5739,286 +5739,48 @@ struct hwrm_queue_cos2bw_qcfg_output {
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id0_pri_lvl;
u8 queue_id0_bw_weight;
- u8 queue_id1;
- __le32 queue_id1_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id1_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id1_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id1_pri_lvl;
- u8 queue_id1_bw_weight;
- u8 queue_id2;
- __le32 queue_id2_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id2_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id2_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id2_pri_lvl;
- u8 queue_id2_bw_weight;
- u8 queue_id3;
- __le32 queue_id3_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id3_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id3_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id3_pri_lvl;
- u8 queue_id3_bw_weight;
- u8 queue_id4;
- __le32 queue_id4_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id4_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id4_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id4_pri_lvl;
- u8 queue_id4_bw_weight;
- u8 queue_id5;
- __le32 queue_id5_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id5_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id5_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id5_pri_lvl;
- u8 queue_id5_bw_weight;
- u8 queue_id6;
- __le32 queue_id6_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id6_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id6_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id6_pri_lvl;
- u8 queue_id6_bw_weight;
- u8 queue_id7;
- __le32 queue_id7_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id7_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id7_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id7_pri_lvl;
- u8 queue_id7_bw_weight;
+ struct {
+ u8 queue_id;
+ __le32 queue_id_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id_pri_lvl;
+ u8 queue_id_bw_weight;
+ } __packed cfg[7];
u8 unused_2[4];
u8 valid;
};
@@ -6082,286 +5844,48 @@ struct hwrm_queue_cos2bw_cfg_input {
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id0_pri_lvl;
u8 queue_id0_bw_weight;
- u8 queue_id1;
- __le32 queue_id1_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id1_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id1_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id1_pri_lvl;
- u8 queue_id1_bw_weight;
- u8 queue_id2;
- __le32 queue_id2_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id2_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id2_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id2_pri_lvl;
- u8 queue_id2_bw_weight;
- u8 queue_id3;
- __le32 queue_id3_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id3_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id3_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id3_pri_lvl;
- u8 queue_id3_bw_weight;
- u8 queue_id4;
- __le32 queue_id4_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id4_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id4_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id4_pri_lvl;
- u8 queue_id4_bw_weight;
- u8 queue_id5;
- __le32 queue_id5_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id5_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id5_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id5_pri_lvl;
- u8 queue_id5_bw_weight;
- u8 queue_id6;
- __le32 queue_id6_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id6_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id6_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id6_pri_lvl;
- u8 queue_id6_bw_weight;
- u8 queue_id7;
- __le32 queue_id7_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id7_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id7_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id7_pri_lvl;
- u8 queue_id7_bw_weight;
+ struct {
+ u8 queue_id;
+ __le32 queue_id_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id_pri_lvl;
+ u8 queue_id_bw_weight;
+ } __packed cfg[7];
u8 unused_1[5];
};
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 0616b5fe241c..ad862ed7888a 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4986,9 +4986,6 @@ static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
if (nla_type(attr) != IFLA_BRIDGE_MODE)
continue;
- if (nla_len(attr) < sizeof(mode))
- return -EINVAL;
-
mode = nla_get_u16(attr);
if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index f9f5b28cc72e..a6dfc8807d3d 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -318,14 +318,12 @@ fs_enet_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct fs_enet_private *fep;
- const struct fs_platform_info *fpi;
u32 int_events;
u32 int_clr_events;
int nr, napi_ok;
int handled;
fep = netdev_priv(dev);
- fpi = fep->fpi;
nr = 0;
while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) {
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
index cb419aef8d1b..d371072fff60 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
@@ -2,6 +2,7 @@
#ifndef FS_ENET_H
#define FS_ENET_H
+#include <linux/clk.h>
#include <linux/mii.h>
#include <linux/netdevice.h>
#include <linux/types.h>
@@ -9,7 +10,6 @@
#include <linux/phy.h>
#include <linux/dma-mapping.h>
-#include <linux/fs_enet_pd.h>
#include <asm/fs_pd.h>
#ifdef CONFIG_CPM1
@@ -118,6 +118,23 @@ struct phy_info {
#define ENET_RX_ALIGN 16
#define ENET_RX_FRSIZE L1_CACHE_ALIGN(PKT_MAXBUF_SIZE + ENET_RX_ALIGN - 1)
+struct fs_platform_info {
+ /* device specific information */
+ u32 cp_command; /* CPM page/sblock/mcn */
+
+ u32 dpram_offset;
+
+ struct device_node *phy_node;
+
+ int rx_ring, tx_ring; /* number of buffers on rx */
+ int rx_copybreak; /* limit we copy small frames */
+ int napi_weight; /* NAPI weight */
+
+ int use_rmii; /* use RMII mode */
+
+ struct clk *clk_per; /* 'per' clock for register access */
+};
+
struct fs_enet_private {
struct napi_struct napi;
struct device *dev; /* pointer back to the device (must be initialized first) */
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
index 925428f1b0c8..d903a9012db0 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
@@ -105,7 +105,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
goto out_ep;
fep->fcc.mem = (void __iomem *)cpm2_immr;
- fpi->dpram_offset = cpm_dpalloc(128, 32);
+ fpi->dpram_offset = cpm_muram_alloc(128, 32);
if (IS_ERR_VALUE(fpi->dpram_offset)) {
ret = fpi->dpram_offset;
goto out_fcccp;
@@ -547,7 +547,7 @@ static void tx_restart(struct net_device *dev)
}
/* Now update the TBPTR and dirty flag to the current buffer */
W32(ep, fen_genfcc.fcc_tbptr,
- (uint) (((void *)recheck_bd - fep->ring_base) +
+ (uint)(((void __iomem *)recheck_bd - fep->ring_base) +
fep->ring_mem_addr));
fep->dirty_tx = recheck_bd;
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index f609dc112458..cdc89d83cf07 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -339,11 +339,7 @@ static void restart(struct net_device *dev)
static void stop(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
- const struct fs_platform_info *fpi = fep->fpi;
struct fec __iomem *fecp = fep->fec.fecp;
-
- struct fec_info *feci = dev->phydev->mdio.bus->priv;
-
int i;
if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0)
@@ -363,16 +359,6 @@ static void stop(struct net_device *dev)
FC(fecp, ecntrl, FEC_ECNTRL_ETHER_EN);
fs_cleanup_bds(dev);
-
- /* shut down FEC1? that's where the mii bus is */
- if (fpi->has_phy) {
- FS(fecp, r_cntrl, fpi->use_rmii ?
- FEC_RCNTRL_RMII_MODE :
- FEC_RCNTRL_MII_MODE); /* MII/RMII enable */
- FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
- FW(fecp, ievent, FEC_ENET_MII);
- FW(fecp, mii_speed, feci->mii_speed);
- }
}
static void napi_clear_event_fs(struct net_device *dev)
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
index 66d40da5cde0..a64cb6270515 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
@@ -133,13 +133,13 @@ static int allocate_bd(struct net_device *dev)
struct fs_enet_private *fep = netdev_priv(dev);
const struct fs_platform_info *fpi = fep->fpi;
- fep->ring_mem_addr = cpm_dpalloc((fpi->tx_ring + fpi->rx_ring) *
- sizeof(cbd_t), 8);
+ fep->ring_mem_addr = cpm_muram_alloc((fpi->tx_ring + fpi->rx_ring) *
+ sizeof(cbd_t), 8);
if (IS_ERR_VALUE(fep->ring_mem_addr))
return -ENOMEM;
fep->ring_base = (void __iomem __force*)
- cpm_dpram_addr(fep->ring_mem_addr);
+ cpm_muram_addr(fep->ring_mem_addr);
return 0;
}
@@ -149,7 +149,7 @@ static void free_bd(struct net_device *dev)
struct fs_enet_private *fep = netdev_priv(dev);
if (fep->ring_base)
- cpm_dpfree(fep->ring_mem_addr);
+ cpm_muram_free(fep->ring_mem_addr);
}
static void cleanup_data(struct net_device *dev)
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
index 91a69fc2f7c2..f965a2329055 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -29,8 +29,8 @@
struct bb_info {
struct mdiobb_ctrl ctrl;
- __be32 __iomem *dir;
- __be32 __iomem *dat;
+ u32 __iomem *dir;
+ u32 __iomem *dat;
u32 mdio_msk;
u32 mdc_msk;
};
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index 1910df250c33..a1e777a4b75f 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -31,6 +31,7 @@
#include <linux/bitops.h>
#include <linux/platform_device.h>
#include <linux/of_address.h>
+#include <linux/of_mdio.h>
#include <linux/of_platform.h>
#include <linux/pgtable.h>
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index e3bb05959ba9..edf0bcf76ac9 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -422,7 +422,6 @@ void *hns_xgmac_config(struct hns_mac_cb *mac_cb,
struct mac_params *mac_param);
int hns_mac_init(struct dsaf_device *dsaf_dev);
-void mac_adjust_link(struct net_device *net_dev);
bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex);
void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status);
int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index fe845987d99a..3eeee224f1fb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -18,7 +18,6 @@
/* adminq functions */
int i40e_init_adminq(struct i40e_hw *hw);
void i40e_shutdown_adminq(struct i40e_hw *hw);
-void i40e_adminq_init_ring_data(struct i40e_hw *hw);
int i40e_clean_arq_element(struct i40e_hw *hw,
struct i40e_arq_event_info *e,
u16 *events_pending);
@@ -51,7 +50,6 @@ i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
void *desc, void *buffer, u16 buf_len);
-void i40e_idle_aq(struct i40e_hw *hw);
bool i40e_check_asq_alive(struct i40e_hw *hw);
int i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
@@ -117,9 +115,6 @@ int i40e_aq_set_link_restart_an(struct i40e_hw *hw,
int i40e_aq_get_link_info(struct i40e_hw *hw,
bool enable_lse, struct i40e_link_status *link,
struct i40e_asq_cmd_details *cmd_details);
-int i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
- u64 advt_reg,
- struct i40e_asq_cmd_details *cmd_details);
int i40e_aq_send_driver_version(struct i40e_hw *hw,
struct i40e_driver_version *dv,
struct i40e_asq_cmd_details *cmd_details);
@@ -269,9 +264,6 @@ int i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
int i40e_aq_dcb_updated(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
-int i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
- u16 seid, u16 credit, u8 max_bw,
- struct i40e_asq_cmd_details *cmd_details);
int i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
struct i40e_asq_cmd_details *cmd_details);
@@ -350,7 +342,6 @@ i40e_aq_configure_partition_bw(struct i40e_hw *hw,
int i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
int i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
u32 pba_num_size);
-int i40e_validate_mac_addr(u8 *mac_addr);
void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
/* prototype for functions used for NVM access */
int i40e_init_nvm(struct i40e_hw *hw);
@@ -425,14 +416,6 @@ i40e_virtchnl_link_speed(enum i40e_aq_link_speed link_speed)
/* prototype for functions used for SW locks */
/* i40e_common for VF drivers*/
-void i40e_vf_parse_hw_config(struct i40e_hw *hw,
- struct virtchnl_vf_resource *msg);
-int i40e_vf_reset(struct i40e_hw *hw);
-int i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
- enum virtchnl_ops v_opcode,
- int v_retval,
- u8 *msg, u16 msglen,
- struct i40e_asq_cmd_details *cmd_details);
int i40e_set_filter_control(struct i40e_hw *hw,
struct i40e_filter_control_settings *settings);
int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index c0ad34b42531..29f7a9852aec 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -1404,6 +1404,7 @@ struct ice_aqc_get_link_topo {
struct ice_aqc_link_topo_addr addr;
u8 node_part_num;
#define ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575 0x21
+#define ICE_AQC_GET_LINK_TOPO_NODE_NR_C827 0x31
u8 rsvd[9];
};
@@ -1793,11 +1794,10 @@ struct ice_aqc_lldp_filter_ctrl {
u8 reserved2[12];
};
+#define ICE_AQC_RSS_VSI_VALID BIT(15)
+
/* Get/Set RSS key (indirect 0x0B04/0x0B02) */
struct ice_aqc_get_set_rss_key {
-#define ICE_AQC_GSET_RSS_KEY_VSI_VALID BIT(15)
-#define ICE_AQC_GSET_RSS_KEY_VSI_ID_S 0
-#define ICE_AQC_GSET_RSS_KEY_VSI_ID_M (0x3FF << ICE_AQC_GSET_RSS_KEY_VSI_ID_S)
__le16 vsi_id;
u8 reserved[6];
__le32 addr_high;
@@ -1815,35 +1815,33 @@ struct ice_aqc_get_set_rss_keys {
u8 extended_hash_key[ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE];
};
-/* Get/Set RSS LUT (indirect 0x0B05/0x0B03) */
-struct ice_aqc_get_set_rss_lut {
-#define ICE_AQC_GSET_RSS_LUT_VSI_VALID BIT(15)
-#define ICE_AQC_GSET_RSS_LUT_VSI_ID_S 0
-#define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x3FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S)
- __le16 vsi_id;
-#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S 0
-#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M \
- (0x3 << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S)
-
-#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI 0
-#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF 1
-#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL 2
+enum ice_lut_type {
+ ICE_LUT_VSI = 0,
+ ICE_LUT_PF = 1,
+ ICE_LUT_GLOBAL = 2,
+};
-#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S 2
-#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M \
- (0x3 << ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S)
+enum ice_lut_size {
+ ICE_LUT_VSI_SIZE = 64,
+ ICE_LUT_GLOBAL_SIZE = 512,
+ ICE_LUT_PF_SIZE = 2048,
+};
-#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 128
-#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG 0
-#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 512
-#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG 1
-#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K 2048
-#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG 2
+/* enum ice_aqc_lut_flags combines constants used to fill
+ * &ice_aqc_get_set_rss_lut ::flags, which is an amalgamation of global LUT ID,
+ * LUT size and LUT type, last of which does not need neither shift nor mask.
+ */
+enum ice_aqc_lut_flags {
+ ICE_AQC_LUT_SIZE_SMALL = 0, /* size = 64 or 128 */
+ ICE_AQC_LUT_SIZE_512 = BIT(2),
+ ICE_AQC_LUT_SIZE_2K = BIT(3),
-#define ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S 4
-#define ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M \
- (0xF << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S)
+ ICE_AQC_LUT_GLOBAL_IDX = GENMASK(7, 4),
+};
+/* Get/Set RSS LUT (indirect 0x0B05/0x0B03) */
+struct ice_aqc_get_set_rss_lut {
+ __le16 vsi_id;
__le16 flags;
__le32 reserved;
__le32 addr_high;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index dade0a50299c..a86255b529a0 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -5,6 +5,7 @@
#include "ice_sched.h"
#include "ice_adminq_cmd.h"
#include "ice_flow.h"
+#include "ice_ptp_hw.h"
#define ICE_PF_RESET_WAIT_COUNT 300
@@ -2662,6 +2663,67 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
}
/**
+ * ice_aq_get_netlist_node
+ * @hw: pointer to the hw struct
+ * @cmd: get_link_topo AQ structure
+ * @node_part_number: output node part number if node found
+ * @node_handle: output node handle parameter if node found
+ */
+static int
+ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
+ u8 *node_part_number, u16 *node_handle)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
+ desc.params.get_link_topo = *cmd;
+
+ if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL))
+ return -EIO;
+
+ if (node_handle)
+ *node_handle = le16_to_cpu(desc.params.get_link_topo.addr.handle);
+ if (node_part_number)
+ *node_part_number = desc.params.get_link_topo.node_part_num;
+
+ return 0;
+}
+
+/**
+ * ice_is_pf_c827 - check if pf contains c827 phy
+ * @hw: pointer to the hw struct
+ */
+bool ice_is_pf_c827(struct ice_hw *hw)
+{
+ struct ice_aqc_get_link_topo cmd = {};
+ u8 node_part_number;
+ u16 node_handle;
+ int status;
+
+ if (hw->mac_type != ICE_MAC_E810)
+ return false;
+
+ if (hw->device_id != ICE_DEV_ID_E810C_QSFP)
+ return true;
+
+ cmd.addr.topo_params.node_type_ctx =
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) |
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT);
+ cmd.addr.topo_params.index = 0;
+
+ status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number,
+ &node_handle);
+
+ if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827)
+ return false;
+
+ if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE)
+ return true;
+
+ return false;
+}
+
+/**
* ice_aq_list_caps - query function/device capabilities
* @hw: pointer to the HW struct
* @buf: a buffer to hold the capabilities
@@ -3877,6 +3939,34 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
return status;
}
+static enum ice_lut_size ice_lut_type_to_size(enum ice_lut_type type)
+{
+ switch (type) {
+ case ICE_LUT_VSI:
+ return ICE_LUT_VSI_SIZE;
+ case ICE_LUT_GLOBAL:
+ return ICE_LUT_GLOBAL_SIZE;
+ case ICE_LUT_PF:
+ return ICE_LUT_PF_SIZE;
+ }
+ WARN_ONCE(1, "incorrect type passed");
+ return ICE_LUT_VSI_SIZE;
+}
+
+static enum ice_aqc_lut_flags ice_lut_size_to_flag(enum ice_lut_size size)
+{
+ switch (size) {
+ case ICE_LUT_VSI_SIZE:
+ return ICE_AQC_LUT_SIZE_SMALL;
+ case ICE_LUT_GLOBAL_SIZE:
+ return ICE_AQC_LUT_SIZE_512;
+ case ICE_LUT_PF_SIZE:
+ return ICE_AQC_LUT_SIZE_2K;
+ }
+ WARN_ONCE(1, "incorrect size passed");
+ return 0;
+}
+
/**
* __ice_aq_get_set_rss_lut
* @hw: pointer to the hardware structure
@@ -3886,95 +3976,44 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
* Internal function to get (0x0B05) or set (0x0B03) RSS look up table
*/
static int
-__ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
-{
- u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle;
- struct ice_aqc_get_set_rss_lut *cmd_resp;
+__ice_aq_get_set_rss_lut(struct ice_hw *hw,
+ struct ice_aq_get_set_rss_lut_params *params, bool set)
+{
+ u16 opcode, vsi_id, vsi_handle = params->vsi_handle, glob_lut_idx = 0;
+ enum ice_lut_type lut_type = params->lut_type;
+ struct ice_aqc_get_set_rss_lut *desc_params;
+ enum ice_aqc_lut_flags flags;
+ enum ice_lut_size lut_size;
struct ice_aq_desc desc;
- int status;
- u8 *lut;
+ u8 *lut = params->lut;
- if (!params)
- return -EINVAL;
- vsi_handle = params->vsi_handle;
- lut = params->lut;
-
- if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
+ if (!lut || !ice_is_vsi_valid(hw, vsi_handle))
return -EINVAL;
- lut_size = params->lut_size;
- lut_type = params->lut_type;
- glob_lut_idx = params->global_lut_id;
- vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
-
- cmd_resp = &desc.params.get_set_rss_lut;
+ lut_size = ice_lut_type_to_size(lut_type);
+ if (lut_size > params->lut_size)
+ return -EINVAL;
+ else if (set && lut_size != params->lut_size)
+ return -EINVAL;
- if (set) {
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
+ opcode = set ? ice_aqc_opc_set_rss_lut : ice_aqc_opc_get_rss_lut;
+ ice_fill_dflt_direct_cmd_desc(&desc, opcode);
+ if (set)
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
- } else {
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
- }
- cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
- ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
- ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
- ICE_AQC_GSET_RSS_LUT_VSI_VALID);
-
- switch (lut_type) {
- case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
- case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
- case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
- flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
- ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
- break;
- default:
- status = -EINVAL;
- goto ice_aq_get_set_rss_lut_exit;
- }
-
- if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
- flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
- ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
-
- if (!set)
- goto ice_aq_get_set_rss_lut_send;
- } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
- if (!set)
- goto ice_aq_get_set_rss_lut_send;
- } else {
- goto ice_aq_get_set_rss_lut_send;
- }
+ desc_params = &desc.params.get_set_rss_lut;
+ vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
+ desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID);
- /* LUT size is only valid for Global and PF table types */
- switch (lut_size) {
- case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
- break;
- case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
- flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
- ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
- ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
- break;
- case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
- if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
- flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
- ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
- ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
- break;
- }
- fallthrough;
- default:
- status = -EINVAL;
- goto ice_aq_get_set_rss_lut_exit;
- }
+ if (lut_type == ICE_LUT_GLOBAL)
+ glob_lut_idx = FIELD_PREP(ICE_AQC_LUT_GLOBAL_IDX,
+ params->global_lut_id);
-ice_aq_get_set_rss_lut_send:
- cmd_resp->flags = cpu_to_le16(flags);
- status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
+ flags = lut_type | glob_lut_idx | ice_lut_size_to_flag(lut_size);
+ desc_params->flags = cpu_to_le16(flags);
-ice_aq_get_set_rss_lut_exit:
- return status;
+ return ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
}
/**
@@ -4016,12 +4055,10 @@ static int
__ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
struct ice_aqc_get_set_rss_keys *key, bool set)
{
- struct ice_aqc_get_set_rss_key *cmd_resp;
+ struct ice_aqc_get_set_rss_key *desc_params;
u16 key_size = sizeof(*key);
struct ice_aq_desc desc;
- cmd_resp = &desc.params.get_set_rss_key;
-
if (set) {
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
@@ -4029,10 +4066,8 @@ __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
}
- cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
- ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
- ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
- ICE_AQC_GSET_RSS_KEY_VSI_VALID);
+ desc_params = &desc.params.get_set_rss_key;
+ desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID);
return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index df12a9d8d28c..71b82cdf4a6d 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -93,6 +93,7 @@ int
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_sq_cd *cd);
+bool ice_is_pf_c827(struct ice_hw *hw);
int
ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
enum ice_adminq_opc opc, struct ice_sq_cd *cd);
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
index cc7357ed6e5f..67bfd1f61cdd 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
@@ -20,8 +20,23 @@ static const struct rhashtable_params ice_fdb_ht_params = {
static bool ice_eswitch_br_is_dev_valid(const struct net_device *dev)
{
- /* Accept only PF netdev and PRs */
- return ice_is_port_repr_netdev(dev) || netif_is_ice(dev);
+ /* Accept only PF netdev, PRs and LAG */
+ return ice_is_port_repr_netdev(dev) || netif_is_ice(dev) ||
+ netif_is_lag_master(dev);
+}
+
+static struct net_device *
+ice_eswitch_br_get_uplink_from_lag(struct net_device *lag_dev)
+{
+ struct net_device *lower;
+ struct list_head *iter;
+
+ netdev_for_each_lower_dev(lag_dev, lower, iter) {
+ if (netif_is_ice(lower))
+ return lower;
+ }
+
+ return NULL;
}
static struct ice_esw_br_port *
@@ -31,8 +46,19 @@ ice_eswitch_br_netdev_to_port(struct net_device *dev)
struct ice_repr *repr = ice_netdev_to_repr(dev);
return repr->br_port;
- } else if (netif_is_ice(dev)) {
- struct ice_pf *pf = ice_netdev_to_pf(dev);
+ } else if (netif_is_ice(dev) || netif_is_lag_master(dev)) {
+ struct net_device *ice_dev;
+ struct ice_pf *pf;
+
+ if (netif_is_lag_master(dev))
+ ice_dev = ice_eswitch_br_get_uplink_from_lag(dev);
+ else
+ ice_dev = dev;
+
+ if (!ice_dev)
+ return NULL;
+
+ pf = ice_netdev_to_pf(ice_dev);
return pf->br_port;
}
@@ -1085,7 +1111,18 @@ ice_eswitch_br_port_link(struct ice_esw_br_offloads *br_offloads,
err = ice_eswitch_br_vf_repr_port_init(bridge, repr);
trace_ice_eswitch_br_port_link(repr->br_port);
} else {
- struct ice_pf *pf = ice_netdev_to_pf(dev);
+ struct net_device *ice_dev;
+ struct ice_pf *pf;
+
+ if (netif_is_lag_master(dev))
+ ice_dev = ice_eswitch_br_get_uplink_from_lag(dev);
+ else
+ ice_dev = dev;
+
+ if (!ice_dev)
+ return 0;
+
+ pf = ice_netdev_to_pf(ice_dev);
err = ice_eswitch_br_uplink_port_init(bridge, pf);
trace_ice_eswitch_br_port_link(pf->br_port);
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index a92dc9a16035..531cc2194741 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -335,6 +335,8 @@
#define VP_MDET_TX_TCLAN_VALID_M BIT(0)
#define VP_MDET_TX_TDPU(_VF) (0x00040000 + ((_VF) * 4))
#define VP_MDET_TX_TDPU_VALID_M BIT(0)
+#define GL_MNG_FWSM 0x000B6134
+#define GL_MNG_FWSM_FW_LOADING_M BIT(30)
#define GLNVM_FLA 0x000B6108
#define GLNVM_FLA_LOCKED_M BIT(6)
#define GLNVM_GENS 0x000B6100
@@ -489,7 +491,6 @@
#define VSIQF_FD_CNT_FD_BCNT_M ICE_M(0x3FFF, 16)
#define VSIQF_FD_SIZE(_VSI) (0x00462000 + ((_VSI) * 4))
#define VSIQF_HKEY_MAX_INDEX 12
-#define VSIQF_HLUT_MAX_INDEX 15
#define PFPM_APM 0x000B8080
#define PFPM_APM_APME_M BIT(0)
#define PFPM_WUFC 0x0009DC00
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 077f2e91ae1a..927518fcad51 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -907,6 +907,7 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
{
struct ice_hw_common_caps *cap;
struct ice_pf *pf = vsi->back;
+ u16 max_rss_size;
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
vsi->rss_size = 1;
@@ -914,32 +915,31 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
}
cap = &pf->hw.func_caps.common_cap;
+ max_rss_size = BIT(cap->rss_table_entry_width);
switch (vsi->type) {
case ICE_VSI_CHNL:
case ICE_VSI_PF:
/* PF VSI will inherit RSS instance of PF */
vsi->rss_table_size = (u16)cap->rss_table_size;
if (vsi->type == ICE_VSI_CHNL)
- vsi->rss_size = min_t(u16, vsi->num_rxq,
- BIT(cap->rss_table_entry_width));
+ vsi->rss_size = min_t(u16, vsi->num_rxq, max_rss_size);
else
vsi->rss_size = min_t(u16, num_online_cpus(),
- BIT(cap->rss_table_entry_width));
- vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
+ max_rss_size);
+ vsi->rss_lut_type = ICE_LUT_PF;
break;
case ICE_VSI_SWITCHDEV_CTRL:
- vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
- vsi->rss_size = min_t(u16, num_online_cpus(),
- BIT(cap->rss_table_entry_width));
- vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI;
+ vsi->rss_table_size = ICE_LUT_VSI_SIZE;
+ vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size);
+ vsi->rss_lut_type = ICE_LUT_VSI;
break;
case ICE_VSI_VF:
/* VF VSI will get a small RSS table.
* For VSI_LUT, LUT size should be set to 64 bytes.
*/
- vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
+ vsi->rss_table_size = ICE_LUT_VSI_SIZE;
vsi->rss_size = ICE_MAX_RSS_QS_PER_VF;
- vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI;
+ vsi->rss_lut_type = ICE_LUT_VSI;
break;
case ICE_VSI_LB:
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 2e80d5cd9f56..0f04347eda39 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -4520,6 +4520,31 @@ static void ice_deinit_eth(struct ice_pf *pf)
ice_decfg_netdev(vsi);
}
+/**
+ * ice_wait_for_fw - wait for full FW readiness
+ * @hw: pointer to the hardware structure
+ * @timeout: milliseconds that can elapse before timing out
+ */
+static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
+{
+ int fw_loading;
+ u32 elapsed = 0;
+
+ while (elapsed <= timeout) {
+ fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M;
+
+ /* firmware was not yet loaded, we have to wait more */
+ if (fw_loading) {
+ elapsed += 100;
+ msleep(100);
+ continue;
+ }
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
static int ice_init_dev(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
@@ -4532,6 +4557,18 @@ static int ice_init_dev(struct ice_pf *pf)
return err;
}
+ /* Some cards require longer initialization times
+ * due to necessity of loading FW from an external source.
+ * This can take even half a minute.
+ */
+ if (ice_is_pf_c827(hw)) {
+ err = ice_wait_for_fw(hw, 30000);
+ if (err) {
+ dev_err(dev, "ice_wait_for_fw timed out");
+ return err;
+ }
+ }
+
ice_init_feature_support(pf);
ice_request_fw(pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
index 6a9364761165..f6f27361c3cf 100644
--- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
@@ -287,6 +287,7 @@ struct ice_nvgre_hdr {
* M = EVLAN (0x8100) - Outer L2 header has EVLAN (ethernet type 0x8100)
* N = EVLAN (0x9100) - Outer L2 header has EVLAN (ethernet type 0x9100)
*/
+#define ICE_PKT_FROM_NETWORK BIT(3)
#define ICE_PKT_VLAN_STAG BIT(12)
#define ICE_PKT_VLAN_ITAG BIT(13)
#define ICE_PKT_VLAN_EVLAN (BIT(14) | BIT(15))
@@ -392,10 +393,10 @@ enum ice_hw_metadata_offset {
};
enum ice_pkt_flags {
- ICE_PKT_FLAGS_VLAN = 0,
- ICE_PKT_FLAGS_TUNNEL = 1,
- ICE_PKT_FLAGS_TCP = 2,
- ICE_PKT_FLAGS_ERROR = 3,
+ ICE_PKT_FLAGS_MDID20 = 0,
+ ICE_PKT_FLAGS_MDID21 = 1,
+ ICE_PKT_FLAGS_MDID22 = 2,
+ ICE_PKT_FLAGS_MDID23 = 3,
};
struct ice_hw_metadata {
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 3b68cb91bd81..1969425f0084 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -112,6 +112,9 @@ struct ice_cgu_pll_params_e822 {
extern const struct
ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ];
+#define E810C_QSFP_C827_0_HANDLE 2
+#define E810C_QSFP_C827_1_HANDLE 3
+
/* Table of constants related to possible TIME_REF sources */
extern const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ];
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 91bc92f5059b..a7afb612fe32 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -6058,14 +6058,21 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw,
void ice_rule_add_tunnel_metadata(struct ice_adv_lkup_elem *lkup)
{
lkup->type = ICE_HW_METADATA;
- lkup->m_u.metadata.flags[ICE_PKT_FLAGS_TUNNEL] =
+ lkup->m_u.metadata.flags[ICE_PKT_FLAGS_MDID21] |=
cpu_to_be16(ICE_PKT_TUNNEL_MASK);
}
+void ice_rule_add_direction_metadata(struct ice_adv_lkup_elem *lkup)
+{
+ lkup->type = ICE_HW_METADATA;
+ lkup->m_u.metadata.flags[ICE_PKT_FLAGS_MDID20] |=
+ cpu_to_be16(ICE_PKT_FROM_NETWORK);
+}
+
void ice_rule_add_vlan_metadata(struct ice_adv_lkup_elem *lkup)
{
lkup->type = ICE_HW_METADATA;
- lkup->m_u.metadata.flags[ICE_PKT_FLAGS_VLAN] =
+ lkup->m_u.metadata.flags[ICE_PKT_FLAGS_MDID20] |=
cpu_to_be16(ICE_PKT_VLAN_MASK);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index 250823ac173a..0bd4320e39df 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -359,6 +359,7 @@ int ice_share_res(struct ice_hw *hw, u16 type, u8 shared, u16 res_id);
/* Switch/bridge related commands */
void ice_rule_add_tunnel_metadata(struct ice_adv_lkup_elem *lkup);
+void ice_rule_add_direction_metadata(struct ice_adv_lkup_elem *lkup);
void ice_rule_add_vlan_metadata(struct ice_adv_lkup_elem *lkup);
void ice_rule_add_src_vsi_metadata(struct ice_adv_lkup_elem *lkup);
int
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index 38547db1ec4e..37b54db91df2 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -7,6 +7,8 @@
#include "ice_lib.h"
#include "ice_protocol_type.h"
+#define ICE_TC_METADATA_LKUP_IDX 0
+
/**
* ice_tc_count_lkups - determine lookup count for switch filter
* @flags: TC-flower flags
@@ -19,7 +21,13 @@ static int
ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
struct ice_tc_flower_fltr *fltr)
{
- int lkups_cnt = 0;
+ int lkups_cnt = 1; /* 0th lookup is metadata */
+
+ /* Always add metadata as the 0th lookup. Included elements:
+ * - Direction flag (always present)
+ * - ICE_TC_FLWR_FIELD_VLAN_TPID (present if specified)
+ * - Tunnel flag (present if tunnel)
+ */
if (flags & ICE_TC_FLWR_FIELD_TENANT_ID)
lkups_cnt++;
@@ -54,10 +62,6 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
if (flags & (ICE_TC_FLWR_FIELD_VLAN | ICE_TC_FLWR_FIELD_VLAN_PRIO))
lkups_cnt++;
- /* is VLAN TPID specified */
- if (flags & ICE_TC_FLWR_FIELD_VLAN_TPID)
- lkups_cnt++;
-
/* is CVLAN specified? */
if (flags & (ICE_TC_FLWR_FIELD_CVLAN | ICE_TC_FLWR_FIELD_CVLAN_PRIO))
lkups_cnt++;
@@ -84,10 +88,6 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
ICE_TC_FLWR_FIELD_SRC_L4_PORT))
lkups_cnt++;
- /* matching for tunneled packets in metadata */
- if (fltr->tunnel_type != TNL_LAST)
- lkups_cnt++;
-
return lkups_cnt;
}
@@ -176,10 +176,9 @@ static u16 ice_check_supported_vlan_tpid(u16 vlan_tpid)
static int
ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
- struct ice_adv_lkup_elem *list)
+ struct ice_adv_lkup_elem *list, int i)
{
struct ice_tc_flower_lyr_2_4_hdrs *hdr = &fltr->outer_headers;
- int i = 0;
if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) {
u32 tenant_id;
@@ -329,8 +328,7 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
}
/* always fill matching on tunneled packets in metadata */
- ice_rule_add_tunnel_metadata(&list[i]);
- i++;
+ ice_rule_add_tunnel_metadata(&list[ICE_TC_METADATA_LKUP_IDX]);
return i;
}
@@ -358,13 +356,16 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
bool inner = false;
u16 vlan_tpid = 0;
- int i = 0;
+ int i = 1; /* 0th lookup is metadata */
rule_info->vlan_type = vlan_tpid;
+ /* Always add direction metadata */
+ ice_rule_add_direction_metadata(&list[ICE_TC_METADATA_LKUP_IDX]);
+
rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type);
if (tc_fltr->tunnel_type != TNL_LAST) {
- i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list);
+ i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list, i);
headers = &tc_fltr->inner_headers;
inner = true;
@@ -431,8 +432,7 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
rule_info->vlan_type =
ice_check_supported_vlan_tpid(vlan_tpid);
- ice_rule_add_vlan_metadata(&list[i]);
- i++;
+ ice_rule_add_vlan_metadata(&list[ICE_TC_METADATA_LKUP_IDX]);
}
if (flags & (ICE_TC_FLWR_FIELD_CVLAN | ICE_TC_FLWR_FIELD_CVLAN_PRIO)) {
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index e82f38c2a940..5e353b0cbe6f 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -1040,10 +1040,10 @@ enum ice_sw_fwd_act_type {
};
struct ice_aq_get_set_rss_lut_params {
- u16 vsi_handle; /* software VSI handle */
- u16 lut_size; /* size of the LUT buffer */
- u8 lut_type; /* type of the LUT (i.e. VSI, PF, Global) */
u8 *lut; /* input RSS LUT for set and output RSS LUT for get */
+ enum ice_lut_size lut_size; /* size of the LUT buffer */
+ enum ice_lut_type lut_type; /* type of the LUT (i.e. VSI, PF, Global) */
+ u16 vsi_handle; /* software VSI handle */
u8 global_lut_id; /* only valid when lut_type is global */
};
@@ -1145,9 +1145,6 @@ struct ice_aq_get_set_rss_lut_params {
#define ICE_SR_WORDS_IN_1KB 512
-/* Hash redirection LUT for VSI - maximum array size */
-#define ICE_VSIQF_HLUT_ARRAY_SIZE ((VSIQF_HLUT_MAX_INDEX + 1) * 4)
-
/* AQ API version for LLDP_FILTER_CONTROL */
#define ICE_FW_API_LLDP_FLTR_MAJ 1
#define ICE_FW_API_LLDP_FLTR_MIN 7
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index 625da88e7965..85d996531502 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -500,7 +500,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->num_queue_pairs = vsi->num_txq;
vfres->max_vectors = vf->pf->vfs.num_msix_per;
vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
- vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
+ vfres->rss_lut_size = ICE_LUT_VSI_SIZE;
vfres->max_mtu = ice_vc_get_max_frame_size(vf);
vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
@@ -962,7 +962,7 @@ static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
+ if (vrl->lut_entries != ICE_LUT_VSI_SIZE) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -978,7 +978,7 @@ static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- if (ice_set_rss_lut(vsi, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
+ if (ice_set_rss_lut(vsi, vrl->lut, ICE_LUT_VSI_SIZE))
v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
error_param:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 63d4e32df029..b6f0376e42f4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -945,8 +945,6 @@ void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid);
void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *,
struct ixgbe_ring *);
-void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
- struct ixgbe_tx_buffer *);
void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
void ixgbe_write_eitr(struct ixgbe_q_vector *);
int ixgbe_poll(struct napi_struct *napi, int budget);
@@ -997,10 +995,6 @@ int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
int ixgbe_fcoe_enable(struct net_device *netdev);
int ixgbe_fcoe_disable(struct net_device *netdev);
-#ifdef CONFIG_IXGBE_DCB
-u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
-u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
-#endif /* CONFIG_IXGBE_DCB */
int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
struct netdev_fcoe_hbainfo *info);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 4b531e8ae38a..34761e691d52 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -8,7 +8,6 @@
#include "ixgbe.h"
u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
-s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 8eb9839a3ca6..dd03b017dfc5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -10042,9 +10042,6 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
if (nla_type(attr) != IFLA_BRIDGE_MODE)
continue;
- if (nla_len(attr) < sizeof(mode))
- return -EINVAL;
-
mode = nla_get_u16(attr);
status = ixgbe_configure_bridge_mode(adapter, mode);
if (status)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 0f8f70b91485..c1deb04ba7e8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -193,7 +193,7 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
{
return is_kdump_kernel() ?
MLX5E_MIN_NUM_CHANNELS :
- min_t(int, mlx5_comp_vectors_count(mdev), MLX5E_MAX_NUM_CHANNELS);
+ min_t(int, mlx5_comp_vectors_max(mdev), MLX5E_MAX_NUM_CHANNELS);
}
/* The maximum WQE size can be retrieved by max_wqe_sz_sq in
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
index 560800246573..0fef853eab62 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -77,6 +77,10 @@ mlx5_esw_bridge_rep_vport_num_vhca_id_get(struct net_device *dev, struct mlx5_es
return NULL;
priv = netdev_priv(dev);
+
+ if (!priv->mdev->priv.eswitch->br_offloads)
+ return NULL;
+
rpriv = priv->ppriv;
*vport_num = rpriv->rep->vport;
*esw_owner_vhca_id = MLX5_CAP_GEN(priv->mdev, vhca_id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
index 698647cc8c0f..5620d9f97518 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -127,7 +127,7 @@ static void mlx5e_build_trap_params(struct mlx5_core_dev *mdev,
static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
{
- int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, 0));
+ int cpu = mlx5_comp_vector_get_cpu(priv->mdev, 0);
struct net_device *netdev = priv->netdev;
struct mlx5e_trap *t;
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index c8ec6467d4d1..61a5ddd6e585 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1989,7 +1989,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
int eqn;
int err;
- err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn);
+ err = mlx5_comp_eqn_get(mdev, param->eq_ix, &eqn);
if (err)
return err;
@@ -2445,14 +2445,14 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct xsk_buff_pool *xsk_pool,
struct mlx5e_channel **cp)
{
- int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, ix));
+ int cpu = mlx5_comp_vector_get_cpu(priv->mdev, ix);
struct net_device *netdev = priv->netdev;
struct mlx5e_xsk_param xsk;
struct mlx5e_channel *c;
unsigned int irq;
int err;
- err = mlx5_vector2irqn(priv->mdev, ix, &irq);
+ err = mlx5_comp_irqn_get(priv->mdev, ix, &irq);
if (err)
return err;
@@ -2856,13 +2856,13 @@ static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
struct mlx5_core_dev *mdev = priv->mdev;
int num_comp_vectors, ix, irq;
- num_comp_vectors = mlx5_comp_vectors_count(mdev);
+ num_comp_vectors = mlx5_comp_vectors_max(mdev);
for (ix = 0; ix < params->num_channels; ix++) {
cpumask_clear(priv->scratchpad.cpumask);
for (irq = ix; irq < num_comp_vectors; irq += params->num_channels) {
- int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(mdev, irq));
+ int cpu = mlx5_comp_vector_get_cpu(mdev, irq);
cpumask_set_cpu(cpu, priv->scratchpad.cpumask);
}
@@ -4896,9 +4896,6 @@ static int mlx5e_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
if (nla_type(attr) != IFLA_BRIDGE_MODE)
continue;
- if (nla_len(attr) < sizeof(mode))
- return -EINVAL;
-
mode = nla_get_u16(attr);
if (mode > BRIDGE_MODE_VEPA)
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 3db4866d7880..ea0405e0a43f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -47,7 +47,7 @@ enum {
static_assert(MLX5_EQ_POLLING_BUDGET <= MLX5_NUM_SPARE_EQE);
struct mlx5_eq_table {
- struct list_head comp_eqs_list;
+ struct xarray comp_eqs;
struct mlx5_eq_async pages_eq;
struct mlx5_eq_async cmd_eq;
struct mlx5_eq_async async_eq;
@@ -58,11 +58,14 @@ struct mlx5_eq_table {
struct mlx5_nb cq_err_nb;
struct mutex lock; /* sync async eqs creations */
- int num_comp_eqs;
+ struct mutex comp_lock; /* sync comp eqs creations */
+ int curr_comp_eqs;
+ int max_comp_eqs;
struct mlx5_irq_table *irq_table;
- struct mlx5_irq **comp_irqs;
+ struct xarray comp_irqs;
struct mlx5_irq *ctrl_irq;
struct cpu_rmap *rmap;
+ struct cpumask used_cpus;
};
#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
@@ -452,13 +455,22 @@ int mlx5_eq_table_init(struct mlx5_core_dev *dev)
ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]);
eq_table->irq_table = mlx5_irq_table_get(dev);
+ cpumask_clear(&eq_table->used_cpus);
+ xa_init(&eq_table->comp_eqs);
+ xa_init(&eq_table->comp_irqs);
+ mutex_init(&eq_table->comp_lock);
+ eq_table->curr_comp_eqs = 0;
return 0;
}
void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev)
{
+ struct mlx5_eq_table *table = dev->priv.eq_table;
+
mlx5_eq_debugfs_cleanup(dev);
- kvfree(dev->priv.eq_table);
+ xa_destroy(&table->comp_irqs);
+ xa_destroy(&table->comp_eqs);
+ kvfree(table);
}
/* Async EQs */
@@ -803,88 +815,112 @@ void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm)
}
EXPORT_SYMBOL(mlx5_eq_update_ci);
-static void comp_irqs_release_pci(struct mlx5_core_dev *dev)
+static void comp_irq_release_pci(struct mlx5_core_dev *dev, u16 vecidx)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
+ struct mlx5_irq *irq;
+
+ irq = xa_load(&table->comp_irqs, vecidx);
+ if (!irq)
+ return;
- mlx5_irqs_release_vectors(table->comp_irqs, table->num_comp_eqs);
+ xa_erase(&table->comp_irqs, vecidx);
+ mlx5_irq_release_vector(irq);
}
-static int comp_irqs_request_pci(struct mlx5_core_dev *dev)
+static int mlx5_cpumask_default_spread(int numa_node, int index)
{
- struct mlx5_eq_table *table = dev->priv.eq_table;
const struct cpumask *prev = cpu_none_mask;
const struct cpumask *mask;
- int ncomp_eqs;
- u16 *cpus;
- int ret;
+ int found_cpu = 0;
+ int i = 0;
int cpu;
- int i;
-
- ncomp_eqs = table->num_comp_eqs;
- cpus = kcalloc(ncomp_eqs, sizeof(*cpus), GFP_KERNEL);
- if (!cpus)
- return -ENOMEM;
- i = 0;
rcu_read_lock();
- for_each_numa_hop_mask(mask, dev->priv.numa_node) {
+ for_each_numa_hop_mask(mask, numa_node) {
for_each_cpu_andnot(cpu, mask, prev) {
- cpus[i] = cpu;
- if (++i == ncomp_eqs)
+ if (i++ == index) {
+ found_cpu = cpu;
goto spread_done;
+ }
}
prev = mask;
}
+
spread_done:
rcu_read_unlock();
- ret = mlx5_irqs_request_vectors(dev, cpus, ncomp_eqs, table->comp_irqs, &table->rmap);
- kfree(cpus);
- return ret;
+ return found_cpu;
}
-static void comp_irqs_release_sf(struct mlx5_core_dev *dev)
+static struct cpu_rmap *mlx5_eq_table_get_pci_rmap(struct mlx5_core_dev *dev)
{
- struct mlx5_eq_table *table = dev->priv.eq_table;
-
- mlx5_irq_affinity_irqs_release(dev, table->comp_irqs, table->num_comp_eqs);
+#ifdef CONFIG_RFS_ACCEL
+#ifdef CONFIG_MLX5_SF
+ if (mlx5_core_is_sf(dev))
+ return dev->priv.parent_mdev->priv.eq_table->rmap;
+#endif
+ return dev->priv.eq_table->rmap;
+#else
+ return NULL;
+#endif
}
-static int comp_irqs_request_sf(struct mlx5_core_dev *dev)
+static int comp_irq_request_pci(struct mlx5_core_dev *dev, u16 vecidx)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
- int ncomp_eqs = table->num_comp_eqs;
+ struct cpu_rmap *rmap;
+ struct mlx5_irq *irq;
+ int cpu;
+
+ rmap = mlx5_eq_table_get_pci_rmap(dev);
+ cpu = mlx5_cpumask_default_spread(dev->priv.numa_node, vecidx);
+ irq = mlx5_irq_request_vector(dev, cpu, vecidx, &rmap);
+ if (IS_ERR(irq))
+ return PTR_ERR(irq);
- return mlx5_irq_affinity_irqs_request_auto(dev, ncomp_eqs, table->comp_irqs);
+ return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL));
}
-static void comp_irqs_release(struct mlx5_core_dev *dev)
+static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
+ struct mlx5_irq *irq;
- mlx5_core_is_sf(dev) ? comp_irqs_release_sf(dev) :
- comp_irqs_release_pci(dev);
+ irq = xa_load(&table->comp_irqs, vecidx);
+ if (!irq)
+ return;
- kfree(table->comp_irqs);
+ xa_erase(&table->comp_irqs, vecidx);
+ mlx5_irq_affinity_irq_release(dev, irq);
}
-static int comp_irqs_request(struct mlx5_core_dev *dev)
+static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
- int ncomp_eqs;
- int ret;
+ struct mlx5_irq *irq;
- ncomp_eqs = table->num_comp_eqs;
- table->comp_irqs = kcalloc(ncomp_eqs, sizeof(*table->comp_irqs), GFP_KERNEL);
- if (!table->comp_irqs)
- return -ENOMEM;
+ irq = mlx5_irq_affinity_irq_request_auto(dev, &table->used_cpus, vecidx);
+ if (IS_ERR(irq)) {
+ /* In case SF irq pool does not exist, fallback to the PF irqs*/
+ if (PTR_ERR(irq) == -ENOENT)
+ return comp_irq_request_pci(dev, vecidx);
+
+ return PTR_ERR(irq);
+ }
+
+ return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL));
+}
- ret = mlx5_core_is_sf(dev) ? comp_irqs_request_sf(dev) :
- comp_irqs_request_pci(dev);
- if (ret < 0)
- kfree(table->comp_irqs);
+static void comp_irq_release(struct mlx5_core_dev *dev, u16 vecidx)
+{
+ mlx5_core_is_sf(dev) ? comp_irq_release_sf(dev, vecidx) :
+ comp_irq_release_pci(dev, vecidx);
+}
- return ret;
+static int comp_irq_request(struct mlx5_core_dev *dev, u16 vecidx)
+{
+ return mlx5_core_is_sf(dev) ? comp_irq_request_sf(dev, vecidx) :
+ comp_irq_request_pci(dev, vecidx);
}
#ifdef CONFIG_RFS_ACCEL
@@ -901,7 +937,7 @@ static int alloc_rmap(struct mlx5_core_dev *mdev)
if (mlx5_core_is_sf(mdev))
return 0;
- eq_table->rmap = alloc_irq_cpu_rmap(eq_table->num_comp_eqs);
+ eq_table->rmap = alloc_irq_cpu_rmap(eq_table->max_comp_eqs);
if (!eq_table->rmap)
return -ENOMEM;
return 0;
@@ -921,22 +957,19 @@ static int alloc_rmap(struct mlx5_core_dev *mdev) { return 0; }
static void free_rmap(struct mlx5_core_dev *mdev) {}
#endif
-static void destroy_comp_eqs(struct mlx5_core_dev *dev)
+static void destroy_comp_eq(struct mlx5_core_dev *dev, struct mlx5_eq_comp *eq, u16 vecidx)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
- struct mlx5_eq_comp *eq, *n;
-
- list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
- list_del(&eq->list);
- mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
- if (destroy_unmap_eq(dev, &eq->core))
- mlx5_core_warn(dev, "failed to destroy comp EQ 0x%x\n",
- eq->core.eqn);
- tasklet_disable(&eq->tasklet_ctx.task);
- kfree(eq);
- }
- comp_irqs_release(dev);
- free_rmap(dev);
+
+ xa_erase(&table->comp_eqs, vecidx);
+ mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
+ if (destroy_unmap_eq(dev, &eq->core))
+ mlx5_core_warn(dev, "failed to destroy comp EQ 0x%x\n",
+ eq->core.eqn);
+ tasklet_disable(&eq->tasklet_ctx.task);
+ kfree(eq);
+ comp_irq_release(dev, vecidx);
+ table->curr_comp_eqs--;
}
static u16 comp_eq_depth_devlink_param_get(struct mlx5_core_dev *dev)
@@ -954,129 +987,149 @@ static u16 comp_eq_depth_devlink_param_get(struct mlx5_core_dev *dev)
return MLX5_COMP_EQ_SIZE;
}
-static int create_comp_eqs(struct mlx5_core_dev *dev)
+/* Must be called with EQ table comp_lock held */
+static int create_comp_eq(struct mlx5_core_dev *dev, u16 vecidx)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
+ struct mlx5_eq_param param = {};
struct mlx5_eq_comp *eq;
- int ncomp_eqs;
+ struct mlx5_irq *irq;
int nent;
int err;
- int i;
- err = alloc_rmap(dev);
+ lockdep_assert_held(&table->comp_lock);
+ if (table->curr_comp_eqs == table->max_comp_eqs) {
+ mlx5_core_err(dev, "maximum number of vectors is allocated, %d\n",
+ table->max_comp_eqs);
+ return -ENOMEM;
+ }
+
+ err = comp_irq_request(dev, vecidx);
if (err)
return err;
- ncomp_eqs = comp_irqs_request(dev);
- if (ncomp_eqs < 0) {
- err = ncomp_eqs;
- goto err_irqs_req;
- }
-
- INIT_LIST_HEAD(&table->comp_eqs_list);
nent = comp_eq_depth_devlink_param_get(dev);
- for (i = 0; i < ncomp_eqs; i++) {
- struct mlx5_eq_param param = {};
+ eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, dev->priv.numa_node);
+ if (!eq) {
+ err = -ENOMEM;
+ goto clean_irq;
+ }
- eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, dev->priv.numa_node);
- if (!eq) {
- err = -ENOMEM;
- goto clean;
- }
+ INIT_LIST_HEAD(&eq->tasklet_ctx.list);
+ INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
+ spin_lock_init(&eq->tasklet_ctx.lock);
+ tasklet_setup(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb);
- INIT_LIST_HEAD(&eq->tasklet_ctx.list);
- INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
- spin_lock_init(&eq->tasklet_ctx.lock);
- tasklet_setup(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb);
-
- eq->irq_nb.notifier_call = mlx5_eq_comp_int;
- param = (struct mlx5_eq_param) {
- .irq = table->comp_irqs[i],
- .nent = nent,
- };
-
- err = create_map_eq(dev, &eq->core, &param);
- if (err)
- goto clean_eq;
- err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
- if (err) {
- destroy_unmap_eq(dev, &eq->core);
- goto clean_eq;
- }
+ irq = xa_load(&table->comp_irqs, vecidx);
+ eq->irq_nb.notifier_call = mlx5_eq_comp_int;
+ param = (struct mlx5_eq_param) {
+ .irq = irq,
+ .nent = nent,
+ };
- mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn);
- /* add tail, to keep the list ordered, for mlx5_vector2eqn to work */
- list_add_tail(&eq->list, &table->comp_eqs_list);
+ err = create_map_eq(dev, &eq->core, &param);
+ if (err)
+ goto clean_eq;
+ err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
+ if (err) {
+ destroy_unmap_eq(dev, &eq->core);
+ goto clean_eq;
}
- table->num_comp_eqs = ncomp_eqs;
- return 0;
+ mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn);
+ err = xa_err(xa_store(&table->comp_eqs, vecidx, eq, GFP_KERNEL));
+ if (err)
+ goto disable_eq;
+
+ table->curr_comp_eqs++;
+ return eq->core.eqn;
+disable_eq:
+ mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
clean_eq:
kfree(eq);
-clean:
- destroy_comp_eqs(dev);
-err_irqs_req:
- free_rmap(dev);
+clean_irq:
+ comp_irq_release(dev, vecidx);
return err;
}
-static int vector2eqnirqn(struct mlx5_core_dev *dev, int vector, int *eqn,
- unsigned int *irqn)
+int mlx5_comp_eqn_get(struct mlx5_core_dev *dev, u16 vecidx, int *eqn)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
struct mlx5_eq_comp *eq;
- int err = -ENOENT;
- int i = 0;
+ int ret = 0;
- list_for_each_entry(eq, &table->comp_eqs_list, list) {
- if (i++ == vector) {
- if (irqn)
- *irqn = eq->core.irqn;
- if (eqn)
- *eqn = eq->core.eqn;
- err = 0;
- break;
- }
+ mutex_lock(&table->comp_lock);
+ eq = xa_load(&table->comp_eqs, vecidx);
+ if (eq) {
+ *eqn = eq->core.eqn;
+ goto out;
}
- return err;
-}
+ ret = create_comp_eq(dev, vecidx);
+ if (ret < 0) {
+ mutex_unlock(&table->comp_lock);
+ return ret;
+ }
-int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn)
-{
- return vector2eqnirqn(dev, vector, eqn, NULL);
+ *eqn = ret;
+out:
+ mutex_unlock(&table->comp_lock);
+ return 0;
}
-EXPORT_SYMBOL(mlx5_vector2eqn);
+EXPORT_SYMBOL(mlx5_comp_eqn_get);
-int mlx5_vector2irqn(struct mlx5_core_dev *dev, int vector, unsigned int *irqn)
+int mlx5_comp_irqn_get(struct mlx5_core_dev *dev, int vector, unsigned int *irqn)
{
- return vector2eqnirqn(dev, vector, NULL, irqn);
+ struct mlx5_eq_table *table = dev->priv.eq_table;
+ struct mlx5_eq_comp *eq;
+ int eqn;
+ int err;
+
+ /* Allocate the EQ if not allocated yet */
+ err = mlx5_comp_eqn_get(dev, vector, &eqn);
+ if (err)
+ return err;
+
+ eq = xa_load(&table->comp_eqs, vector);
+ *irqn = eq->core.irqn;
+ return 0;
}
-unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev)
+unsigned int mlx5_comp_vectors_max(struct mlx5_core_dev *dev)
{
- return dev->priv.eq_table->num_comp_eqs;
+ return dev->priv.eq_table->max_comp_eqs;
}
-EXPORT_SYMBOL(mlx5_comp_vectors_count);
+EXPORT_SYMBOL(mlx5_comp_vectors_max);
-struct cpumask *
+static struct cpumask *
mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
struct mlx5_eq_comp *eq;
- int i = 0;
- list_for_each_entry(eq, &table->comp_eqs_list, list) {
- if (i++ == vector)
- return mlx5_irq_get_affinity_mask(eq->core.irq);
- }
+ eq = xa_load(&table->comp_eqs, vector);
+ if (eq)
+ return mlx5_irq_get_affinity_mask(eq->core.irq);
- WARN_ON_ONCE(1);
return NULL;
}
-EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask);
+
+int mlx5_comp_vector_get_cpu(struct mlx5_core_dev *dev, int vector)
+{
+ struct cpumask *mask;
+ int cpu;
+
+ mask = mlx5_comp_irq_get_affinity_mask(dev, vector);
+ if (mask)
+ cpu = cpumask_first(mask);
+ else
+ cpu = mlx5_cpumask_default_spread(dev->priv.numa_node, vector);
+
+ return cpu;
+}
+EXPORT_SYMBOL(mlx5_comp_vector_get_cpu);
#ifdef CONFIG_RFS_ACCEL
struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev)
@@ -1089,11 +1142,11 @@ struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
struct mlx5_eq_comp *eq;
+ unsigned long index;
- list_for_each_entry(eq, &table->comp_eqs_list, list) {
+ xa_for_each(&table->comp_eqs, index, eq)
if (eq->core.eqn == eqn)
return eq;
- }
return ERR_PTR(-ENOENT);
}
@@ -1101,11 +1154,7 @@ struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
/* This function should only be called after mlx5_cmd_force_teardown_hca */
void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
{
- struct mlx5_eq_table *table = dev->priv.eq_table;
-
- mutex_lock(&table->lock); /* sync with create/destroy_async_eq */
mlx5_irq_table_free_irqs(dev);
- mutex_unlock(&table->lock);
}
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
@@ -1148,22 +1197,22 @@ int mlx5_eq_table_create(struct mlx5_core_dev *dev)
struct mlx5_eq_table *eq_table = dev->priv.eq_table;
int err;
- eq_table->num_comp_eqs = get_num_eqs(dev);
+ eq_table->max_comp_eqs = get_num_eqs(dev);
err = create_async_eqs(dev);
if (err) {
mlx5_core_err(dev, "Failed to create async EQs\n");
goto err_async_eqs;
}
- err = create_comp_eqs(dev);
+ err = alloc_rmap(dev);
if (err) {
- mlx5_core_err(dev, "Failed to create completion EQs\n");
- goto err_comp_eqs;
+ mlx5_core_err(dev, "Failed to allocate rmap\n");
+ goto err_rmap;
}
return 0;
-err_comp_eqs:
+err_rmap:
destroy_async_eqs(dev);
err_async_eqs:
return err;
@@ -1171,7 +1220,14 @@ err_async_eqs:
void mlx5_eq_table_destroy(struct mlx5_core_dev *dev)
{
- destroy_comp_eqs(dev);
+ struct mlx5_eq_table *table = dev->priv.eq_table;
+ struct mlx5_eq_comp *eq;
+ unsigned long index;
+
+ xa_for_each(&table->comp_eqs, index, eq)
+ destroy_comp_eq(dev, eq, index);
+
+ free_rmap(dev);
destroy_async_eqs(dev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 7c79476cc5f9..1887a24ee414 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -740,7 +740,7 @@ int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32
static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char *name,
u64 *rate, struct netlink_ext_ack *extack)
{
- u32 link_speed_max, reminder;
+ u32 link_speed_max, remainder;
u64 value;
int err;
@@ -750,8 +750,8 @@ static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char *
return err;
}
- value = div_u64_rem(*rate, MLX5_LINKSPEED_UNIT, &reminder);
- if (reminder) {
+ value = div_u64_rem(*rate, MLX5_LINKSPEED_UNIT, &remainder);
+ if (remainder) {
pr_err("%s rate value %lluBps not in link speed units of 1Mbps.\n",
name, *rate);
NL_SET_ERR_MSG_MOD(extack, "TX rate value not in link speed units of 1Mbps");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 723dff87e6d5..e391535e1ab1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -375,7 +375,6 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest,
struct mlx5_flow_act *flow_act,
struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr,
- bool ignore_flow_lvl,
int *i)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
@@ -385,8 +384,7 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest,
return -EOPNOTSUPP;
for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) {
- if (ignore_flow_lvl)
- flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[*i].ft = mlx5_esw_indir_table_get(esw, attr,
@@ -569,7 +567,7 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
err = esw_setup_mtu_dest(dest, &attr->meter_attr, *i);
(*i)++;
} else if (esw_is_indir_table(esw, attr)) {
- err = esw_setup_indir_table(dest, flow_act, esw, attr, true, i);
+ err = esw_setup_indir_table(dest, flow_act, esw, attr, i);
} else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) {
err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i);
} else {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
index 12abe991583a..c4de6bf8d1b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
@@ -445,7 +445,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
goto err_cqwq;
}
- err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn);
+ err = mlx5_comp_eqn_get(mdev, smp_processor_id(), &eqn);
if (err) {
kvfree(in);
goto err_cqwq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
index 39c03dcbd196..e5c1012921d2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
@@ -57,7 +57,7 @@ static const char * const mlx5_fpga_qp_error_strings[] = {
};
static struct mlx5_fpga_device *mlx5_fpga_device_alloc(void)
{
- struct mlx5_fpga_device *fdev = NULL;
+ struct mlx5_fpga_device *fdev;
fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
if (!fdev)
@@ -252,7 +252,7 @@ out:
int mlx5_fpga_init(struct mlx5_core_dev *mdev)
{
- struct mlx5_fpga_device *fdev = NULL;
+ struct mlx5_fpga_device *fdev;
if (!MLX5_CAP_GEN(mdev, fpga)) {
mlx5_core_dbg(mdev, "FPGA capability not present\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
index fa467335526e..047d5fed5f89 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
@@ -156,67 +156,57 @@ unlock:
return least_loaded_irq;
}
-void mlx5_irq_affinity_irqs_release(struct mlx5_core_dev *dev, struct mlx5_irq **irqs,
- int num_irqs)
+void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq)
{
struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
- int i;
-
- for (i = 0; i < num_irqs; i++) {
- int cpu = cpumask_first(mlx5_irq_get_affinity_mask(irqs[i]));
+ int cpu;
- synchronize_irq(pci_irq_vector(pool->dev->pdev,
- mlx5_irq_get_index(irqs[i])));
- if (mlx5_irq_put(irqs[i]))
- if (pool->irqs_per_cpu)
- cpu_put(pool, cpu);
- }
+ cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq));
+ synchronize_irq(pci_irq_vector(pool->dev->pdev,
+ mlx5_irq_get_index(irq)));
+ if (mlx5_irq_put(irq))
+ if (pool->irqs_per_cpu)
+ cpu_put(pool, cpu);
}
/**
- * mlx5_irq_affinity_irqs_request_auto - request one or more IRQs for mlx5 device.
- * @dev: mlx5 device that is requesting the IRQs.
- * @nirqs: number of IRQs to request.
- * @irqs: an output array of IRQs pointers.
+ * mlx5_irq_affinity_irq_request_auto - request one IRQ for mlx5 device.
+ * @dev: mlx5 device that is requesting the IRQ.
+ * @used_cpus: cpumask of bounded cpus by the device
+ * @vecidx: vector index to request an IRQ for.
*
* Each IRQ is bounded to at most 1 CPU.
- * This function is requesting IRQs according to the default assignment.
+ * This function is requesting an IRQ according to the default assignment.
* The default assignment policy is:
- * - in each iteration, request the least loaded IRQ which is not bound to any
+ * - request the least loaded IRQ which is not bound to any
* CPU of the previous IRQs requested.
*
- * This function returns the number of IRQs requested, (which might be smaller than
- * @nirqs), if successful, or a negative error code in case of an error.
+ * On success, this function updates used_cpus mask and returns an irq pointer.
+ * In case of an error, an appropriate error pointer is returned.
*/
-int mlx5_irq_affinity_irqs_request_auto(struct mlx5_core_dev *dev, int nirqs,
- struct mlx5_irq **irqs)
+struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev,
+ struct cpumask *used_cpus, u16 vecidx)
{
struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
struct irq_affinity_desc af_desc = {};
struct mlx5_irq *irq;
- int i = 0;
+
+ if (!mlx5_irq_pool_is_sf_pool(pool))
+ return ERR_PTR(-ENOENT);
af_desc.is_managed = 1;
cpumask_copy(&af_desc.mask, cpu_online_mask);
- for (i = 0; i < nirqs; i++) {
- if (mlx5_irq_pool_is_sf_pool(pool))
- irq = mlx5_irq_affinity_request(pool, &af_desc);
- else
- /* In case SF pool doesn't exists, fallback to the PF IRQs.
- * The PF IRQs are already allocated and binded to CPU
- * at this point. Hence, only an index is needed.
- */
- irq = mlx5_irq_request(dev, i, NULL, NULL);
- if (IS_ERR(irq))
- break;
- irqs[i] = irq;
- cpumask_clear_cpu(cpumask_first(mlx5_irq_get_affinity_mask(irq)), &af_desc.mask);
- mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n",
- pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)),
- cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)),
- mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ);
- }
- if (!i)
- return PTR_ERR(irq);
- return i;
+ cpumask_andnot(&af_desc.mask, &af_desc.mask, used_cpus);
+ irq = mlx5_irq_affinity_request(pool, &af_desc);
+
+ if (IS_ERR(irq))
+ return irq;
+
+ cpumask_or(used_cpus, used_cpus, mlx5_irq_get_affinity_mask(irq));
+ mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n",
+ pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)),
+ cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)),
+ mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ);
+
+ return irq;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
index 5a80fb7dbbca..40c7be124041 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
@@ -81,7 +81,7 @@ static int create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data)
int inlen, eqn;
int err;
- err = mlx5_vector2eqn(mdev, 0, &eqn);
+ err = mlx5_comp_eqn_get(mdev, 0, &eqn);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
index d3d628b862f3..69a75459775d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
@@ -104,6 +104,6 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev);
struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev);
#endif
-int mlx5_vector2irqn(struct mlx5_core_dev *dev, int vector, unsigned int *irqn);
+int mlx5_comp_irqn_get(struct mlx5_core_dev *dev, int vector, unsigned int *irqn);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c
index 4047629a876b..30564d9b00e9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c
@@ -40,7 +40,7 @@ struct mlx5_hv_vhca_agent {
struct mlx5_hv_vhca *mlx5_hv_vhca_create(struct mlx5_core_dev *dev)
{
- struct mlx5_hv_vhca *hv_vhca = NULL;
+ struct mlx5_hv_vhca *hv_vhca;
hv_vhca = kzalloc(sizeof(*hv_vhca), GFP_KERNEL);
if (!hv_vhca)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
index aa403a5ea34e..1088114e905d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
@@ -29,9 +29,9 @@ void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq);
struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
struct irq_affinity_desc *af_desc,
struct cpu_rmap **rmap);
-int mlx5_irqs_request_vectors(struct mlx5_core_dev *dev, u16 *cpus, int nirqs,
- struct mlx5_irq **irqs, struct cpu_rmap **rmap);
-void mlx5_irqs_release_vectors(struct mlx5_irq **irqs, int nirqs);
+struct mlx5_irq *mlx5_irq_request_vector(struct mlx5_core_dev *dev, u16 cpu,
+ u16 vecidx, struct cpu_rmap **rmap);
+void mlx5_irq_release_vector(struct mlx5_irq *irq);
int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb);
int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb);
struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq);
@@ -39,17 +39,17 @@ int mlx5_irq_get_index(struct mlx5_irq *irq);
struct mlx5_irq_pool;
#ifdef CONFIG_MLX5_SF
-int mlx5_irq_affinity_irqs_request_auto(struct mlx5_core_dev *dev, int nirqs,
- struct mlx5_irq **irqs);
+struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev,
+ struct cpumask *used_cpus, u16 vecidx);
struct mlx5_irq *mlx5_irq_affinity_request(struct mlx5_irq_pool *pool,
struct irq_affinity_desc *af_desc);
-void mlx5_irq_affinity_irqs_release(struct mlx5_core_dev *dev, struct mlx5_irq **irqs,
- int num_irqs);
+void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq);
#else
-static inline int mlx5_irq_affinity_irqs_request_auto(struct mlx5_core_dev *dev, int nirqs,
- struct mlx5_irq **irqs)
+static inline
+struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev,
+ struct cpumask *used_cpus, u16 vecidx)
{
- return -EOPNOTSUPP;
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline struct mlx5_irq *
@@ -58,7 +58,9 @@ mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, struct irq_affinity_desc *
return ERR_PTR(-EOPNOTSUPP);
}
-static inline void mlx5_irq_affinity_irqs_release(struct mlx5_core_dev *dev,
- struct mlx5_irq **irqs, int num_irqs) {}
+static inline
+void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq)
+{
+}
#endif
#endif /* __MLX5_IRQ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index cba2a4afb5fd..33a133c9918c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -432,19 +432,10 @@ static struct mlx5_irq_pool *ctrl_irq_pool_get(struct mlx5_core_dev *dev)
return pool ? pool : irq_table->pcif_pool;
}
-/**
- * mlx5_irqs_release - release one or more IRQs back to the system.
- * @irqs: IRQs to be released.
- * @nirqs: number of IRQs to be released.
- */
-static void mlx5_irqs_release(struct mlx5_irq **irqs, int nirqs)
+static void _mlx5_irq_release(struct mlx5_irq *irq)
{
- int i;
-
- for (i = 0; i < nirqs; i++) {
- synchronize_irq(irqs[i]->map.virq);
- mlx5_irq_put(irqs[i]);
- }
+ synchronize_irq(irq->map.virq);
+ mlx5_irq_put(irq);
}
/**
@@ -453,7 +444,7 @@ static void mlx5_irqs_release(struct mlx5_irq **irqs, int nirqs)
*/
void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq)
{
- mlx5_irqs_release(&ctrl_irq, 1);
+ _mlx5_irq_release(ctrl_irq);
}
/**
@@ -569,53 +560,42 @@ void mlx5_msix_free(struct mlx5_core_dev *dev, struct msi_map map)
EXPORT_SYMBOL(mlx5_msix_free);
/**
- * mlx5_irqs_release_vectors - release one or more IRQs back to the system.
- * @irqs: IRQs to be released.
- * @nirqs: number of IRQs to be released.
+ * mlx5_irq_release_vector - release one IRQ back to the system.
+ * @irq: the irq to release.
*/
-void mlx5_irqs_release_vectors(struct mlx5_irq **irqs, int nirqs)
+void mlx5_irq_release_vector(struct mlx5_irq *irq)
{
- mlx5_irqs_release(irqs, nirqs);
+ _mlx5_irq_release(irq);
}
/**
- * mlx5_irqs_request_vectors - request one or more IRQs for mlx5 device.
- * @dev: mlx5 device that is requesting the IRQs.
- * @cpus: CPUs array for binding the IRQs
- * @nirqs: number of IRQs to request.
- * @irqs: an output array of IRQs pointers.
+ * mlx5_irq_request_vector - request one IRQ for mlx5 device.
+ * @dev: mlx5 device that is requesting the IRQ.
+ * @cpu: CPU to bind the IRQ to.
+ * @vecidx: vector index to request an IRQ for.
* @rmap: pointer to reverse map pointer for completion interrupts
*
* Each IRQ is bound to at most 1 CPU.
- * This function is requests nirqs IRQs, starting from @vecidx.
+ * This function is requests one IRQ, for the given @vecidx.
*
- * This function returns the number of IRQs requested, (which might be smaller than
- * @nirqs), if successful, or a negative error code in case of an error.
+ * This function returns a pointer to the irq on success, or an error pointer
+ * in case of an error.
*/
-int mlx5_irqs_request_vectors(struct mlx5_core_dev *dev, u16 *cpus, int nirqs,
- struct mlx5_irq **irqs, struct cpu_rmap **rmap)
+struct mlx5_irq *mlx5_irq_request_vector(struct mlx5_core_dev *dev, u16 cpu,
+ u16 vecidx, struct cpu_rmap **rmap)
{
struct mlx5_irq_table *table = mlx5_irq_table_get(dev);
struct mlx5_irq_pool *pool = table->pcif_pool;
struct irq_affinity_desc af_desc;
- struct mlx5_irq *irq;
int offset = 1;
- int i;
if (!pool->xa_num_irqs.max)
offset = 0;
af_desc.is_managed = false;
- for (i = 0; i < nirqs; i++) {
- cpumask_clear(&af_desc.mask);
- cpumask_set_cpu(cpus[i], &af_desc.mask);
- irq = mlx5_irq_request(dev, i + offset, &af_desc, rmap);
- if (IS_ERR(irq))
- break;
- irqs[i] = irq;
- }
-
- return i ? i : PTR_ERR(irq);
+ cpumask_clear(&af_desc.mask);
+ cpumask_set_cpu(cpu, &af_desc.mask);
+ return mlx5_irq_request(dev, vecidx + offset, &af_desc, rmap);
}
static struct mlx5_irq_pool *
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index 4a5ae86e2b62..6fa06ba2d346 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -1096,8 +1096,8 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
if (!in)
goto err_cqwq;
- vector = raw_smp_processor_id() % mlx5_comp_vectors_count(mdev);
- err = mlx5_vector2eqn(mdev, vector, &eqn);
+ vector = raw_smp_processor_id() % mlx5_comp_vectors_max(mdev);
+ err = mlx5_comp_eqn_get(mdev, vector, &eqn);
if (err) {
kvfree(in);
goto err_cqwq;
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
index a453b9cd9033..bc94e75a7aeb 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
@@ -175,9 +175,6 @@ enum mlxbf_gige_res {
int mlxbf_gige_mdio_probe(struct platform_device *pdev,
struct mlxbf_gige *priv);
void mlxbf_gige_mdio_remove(struct mlxbf_gige *priv);
-irqreturn_t mlxbf_gige_mdio_handle_phy_interrupt(int irq, void *dev_id);
-void mlxbf_gige_mdio_enable_phy_int(struct mlxbf_gige *priv);
-
void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
unsigned int index, u64 dmac);
void mlxbf_gige_get_mac_rx_filter(struct mlxbf_gige *priv,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 5376d4af5f91..efacb057d1d4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -3549,7 +3549,6 @@ mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp,
struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
struct mlxsw_sp_bridge_device *bridge_device;
struct net_device *dev = switchdev_work->dev;
- u8 all_zeros_mac[ETH_ALEN] = { 0 };
enum mlxsw_sp_l3proto proto;
union mlxsw_sp_l3addr addr;
struct net_device *br_dev;
@@ -3571,7 +3570,7 @@ mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
&proto, &addr);
- if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
+ if (is_zero_ether_addr(vxlan_fdb_info->eth_addr)) {
err = mlxsw_sp_nve_flood_ip_add(mlxsw_sp, fid, proto, &addr);
if (err) {
mlxsw_sp_fid_put(fid);
@@ -3623,7 +3622,6 @@ mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_bridge_device *bridge_device;
struct net_device *dev = switchdev_work->dev;
struct net_device *br_dev = netdev_master_upper_dev_get(dev);
- u8 all_zeros_mac[ETH_ALEN] = { 0 };
enum mlxsw_sp_l3proto proto;
union mlxsw_sp_l3addr addr;
struct mlxsw_sp_fid *fid;
@@ -3644,7 +3642,7 @@ mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
&proto, &addr);
- if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
+ if (is_zero_ether_addr(vxlan_fdb_info->eth_addr)) {
mlxsw_sp_nve_flood_ip_del(mlxsw_sp, fid, proto, &addr);
mlxsw_sp_fid_put(fid);
return;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index f18c791cf698..de0a5d5ded30 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -2108,9 +2108,6 @@ static int nfp_net_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
if (nla_type(attr) != IFLA_BRIDGE_MODE)
continue;
- if (nla_len(attr) < sizeof(mode))
- return -EINVAL;
-
new_ctrl = nn->dp.ctrl;
mode = nla_get_u16(attr);
if (mode == BRIDGE_MODE_VEPA)
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index 0ba7fb75d589..6083b1c8e4fb 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -20,6 +20,7 @@
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/sys_soc.h>
#include "rswitch.h"
@@ -1243,7 +1244,6 @@ static void rswitch_adjust_link(struct net_device *ndev)
struct rswitch_device *rdev = netdev_priv(ndev);
struct phy_device *phydev = ndev->phydev;
- /* Current hardware has a restriction not to change speed at runtime */
if (phydev->link != rdev->etha->link) {
phy_print_status(phydev);
if (phydev->link)
@@ -1252,13 +1252,23 @@ static void rswitch_adjust_link(struct net_device *ndev)
phy_power_off(rdev->serdes);
rdev->etha->link = phydev->link;
+
+ if (!rdev->priv->etha_no_runtime_change &&
+ phydev->speed != rdev->etha->speed) {
+ rdev->etha->speed = phydev->speed;
+
+ rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr);
+ phy_set_speed(rdev->serdes, rdev->etha->speed);
+ }
}
}
static void rswitch_phy_remove_link_mode(struct rswitch_device *rdev,
struct phy_device *phydev)
{
- /* Current hardware has a restriction not to change speed at runtime */
+ if (!rdev->priv->etha_no_runtime_change)
+ return;
+
switch (rdev->etha->speed) {
case SPEED_2500:
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
@@ -1347,7 +1357,8 @@ static int rswitch_ether_port_init_one(struct rswitch_device *rdev)
err = rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr);
if (err < 0)
return err;
- rdev->etha->operated = true;
+ if (rdev->priv->etha_no_runtime_change)
+ rdev->etha->operated = true;
}
err = rswitch_mii_register(rdev);
@@ -1653,6 +1664,8 @@ static int rswitch_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *
static const struct ethtool_ops rswitch_ethtool_ops = {
.get_ts_info = rswitch_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static const struct of_device_id renesas_eth_sw_of_table[] = {
@@ -1853,8 +1866,14 @@ err_ts_queue_alloc:
return err;
}
+static const struct soc_device_attribute rswitch_soc_no_speed_change[] = {
+ { .soc_id = "r8a779f0", .revision = "ES1.0" },
+ { /* Sentinel */ }
+};
+
static int renesas_eth_sw_probe(struct platform_device *pdev)
{
+ const struct soc_device_attribute *attr;
struct rswitch_private *priv;
struct resource *res;
int ret;
@@ -1869,6 +1888,10 @@ static int renesas_eth_sw_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ attr = soc_device_match(rswitch_soc_no_speed_change);
+ if (attr)
+ priv->etha_no_runtime_change = true;
+
priv->ptp_priv = rcar_gen4_ptp_alloc(pdev);
if (!priv->ptp_priv)
return -ENOMEM;
diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h
index bb9ed971a97c..54f397effbc6 100644
--- a/drivers/net/ethernet/renesas/rswitch.h
+++ b/drivers/net/ethernet/renesas/rswitch.h
@@ -1011,6 +1011,7 @@ struct rswitch_private {
struct rswitch_etha etha[RSWITCH_NUM_PORTS];
struct rswitch_mfwd mfwd;
+ bool etha_no_runtime_change;
bool gwca_halt;
};
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index 16293b58e0a8..8f446b9bd5ee 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -11,7 +11,7 @@ sfc-y += efx.o efx_common.o efx_channels.o nic.o \
sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += sriov.o ef10_sriov.o ef100_sriov.o ef100_rep.o \
mae.o tc.o tc_bindings.o tc_counters.o \
- tc_encap_actions.o
+ tc_encap_actions.o tc_conntrack.o
obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/ethernet/sfc/bitfield.h b/drivers/net/ethernet/sfc/bitfield.h
index 1f981dfe4bdc..89665fc9b8d0 100644
--- a/drivers/net/ethernet/sfc/bitfield.h
+++ b/drivers/net/ethernet/sfc/bitfield.h
@@ -26,6 +26,8 @@
/* Lowest bit numbers and widths */
#define EFX_DUMMY_FIELD_LBN 0
#define EFX_DUMMY_FIELD_WIDTH 0
+#define EFX_BYTE_0_LBN 0
+#define EFX_BYTE_0_WIDTH 8
#define EFX_WORD_0_LBN 0
#define EFX_WORD_0_WIDTH 16
#define EFX_WORD_1_LBN 16
diff --git a/drivers/net/ethernet/sfc/mae.c b/drivers/net/ethernet/sfc/mae.c
index 0cab508f2f9d..3b8780c76b6e 100644
--- a/drivers/net/ethernet/sfc/mae.c
+++ b/drivers/net/ethernet/sfc/mae.c
@@ -16,6 +16,7 @@
#include "mcdi_pcol.h"
#include "mcdi_pcol_mae.h"
#include "tc_encap_actions.h"
+#include "tc_conntrack.h"
int efx_mae_allocate_mport(struct efx_nic *efx, u32 *id, u32 *label)
{
@@ -227,6 +228,256 @@ void efx_mae_counters_grant_credits(struct work_struct *work)
rx_queue->granted_count += credits;
}
+static int efx_mae_table_get_desc(struct efx_nic *efx,
+ struct efx_tc_table_desc *desc,
+ u32 table_id)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_TABLE_DESCRIPTOR_OUT_LEN(16));
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_TABLE_DESCRIPTOR_IN_LEN);
+ unsigned int offset = 0, i;
+ size_t outlen;
+ int rc;
+
+ memset(desc, 0, sizeof(*desc));
+
+ MCDI_SET_DWORD(inbuf, TABLE_DESCRIPTOR_IN_TABLE_ID, table_id);
+more:
+ MCDI_SET_DWORD(inbuf, TABLE_DESCRIPTOR_IN_FIRST_FIELDS_INDEX, offset);
+ rc = efx_mcdi_rpc(efx, MC_CMD_TABLE_DESCRIPTOR, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+ if (outlen < MC_CMD_TABLE_DESCRIPTOR_OUT_LEN(1)) {
+ rc = -EIO;
+ goto fail;
+ }
+ if (!offset) { /* first iteration: get metadata */
+ desc->type = MCDI_WORD(outbuf, TABLE_DESCRIPTOR_OUT_TYPE);
+ desc->key_width = MCDI_WORD(outbuf, TABLE_DESCRIPTOR_OUT_KEY_WIDTH);
+ desc->resp_width = MCDI_WORD(outbuf, TABLE_DESCRIPTOR_OUT_RESP_WIDTH);
+ desc->n_keys = MCDI_WORD(outbuf, TABLE_DESCRIPTOR_OUT_N_KEY_FIELDS);
+ desc->n_resps = MCDI_WORD(outbuf, TABLE_DESCRIPTOR_OUT_N_RESP_FIELDS);
+ desc->n_prios = MCDI_WORD(outbuf, TABLE_DESCRIPTOR_OUT_N_PRIORITIES);
+ desc->flags = MCDI_BYTE(outbuf, TABLE_DESCRIPTOR_OUT_FLAGS);
+ rc = -EOPNOTSUPP;
+ if (desc->flags)
+ goto fail;
+ desc->scheme = MCDI_BYTE(outbuf, TABLE_DESCRIPTOR_OUT_SCHEME);
+ if (desc->scheme)
+ goto fail;
+ rc = -ENOMEM;
+ desc->keys = kcalloc(desc->n_keys,
+ sizeof(struct efx_tc_table_field_fmt),
+ GFP_KERNEL);
+ if (!desc->keys)
+ goto fail;
+ desc->resps = kcalloc(desc->n_resps,
+ sizeof(struct efx_tc_table_field_fmt),
+ GFP_KERNEL);
+ if (!desc->resps)
+ goto fail;
+ }
+ /* FW could have returned more than the 16 field_descrs we
+ * made room for in our outbuf
+ */
+ outlen = min(outlen, sizeof(outbuf));
+ for (i = 0; i + offset < desc->n_keys + desc->n_resps; i++) {
+ struct efx_tc_table_field_fmt *field;
+ MCDI_DECLARE_STRUCT_PTR(fdesc);
+
+ if (outlen < MC_CMD_TABLE_DESCRIPTOR_OUT_LEN(i + 1)) {
+ offset += i;
+ goto more;
+ }
+ if (i + offset < desc->n_keys)
+ field = desc->keys + i + offset;
+ else
+ field = desc->resps + (i + offset - desc->n_keys);
+ fdesc = MCDI_ARRAY_STRUCT_PTR(outbuf,
+ TABLE_DESCRIPTOR_OUT_FIELDS, i);
+ field->field_id = MCDI_STRUCT_WORD(fdesc,
+ TABLE_FIELD_DESCR_FIELD_ID);
+ field->lbn = MCDI_STRUCT_WORD(fdesc, TABLE_FIELD_DESCR_LBN);
+ field->width = MCDI_STRUCT_WORD(fdesc, TABLE_FIELD_DESCR_WIDTH);
+ field->masking = MCDI_STRUCT_BYTE(fdesc, TABLE_FIELD_DESCR_MASK_TYPE);
+ field->scheme = MCDI_STRUCT_BYTE(fdesc, TABLE_FIELD_DESCR_SCHEME);
+ }
+ return 0;
+
+fail:
+ kfree(desc->keys);
+ kfree(desc->resps);
+ return rc;
+}
+
+static int efx_mae_table_hook_find(u16 n_fields,
+ struct efx_tc_table_field_fmt *fields,
+ u16 field_id)
+{
+ unsigned int i;
+
+ for (i = 0; i < n_fields; i++) {
+ if (fields[i].field_id == field_id)
+ return i;
+ }
+ return -EPROTO;
+}
+
+#define TABLE_FIND_KEY(_desc, _id) \
+ efx_mae_table_hook_find((_desc)->n_keys, (_desc)->keys, _id)
+#define TABLE_FIND_RESP(_desc, _id) \
+ efx_mae_table_hook_find((_desc)->n_resps, (_desc)->resps, _id)
+
+#define TABLE_HOOK_KEY(_meta, _name, _mcdi_name) ({ \
+ int _rc = TABLE_FIND_KEY(&_meta->desc, TABLE_FIELD_ID_##_mcdi_name); \
+ \
+ if (_rc > U8_MAX) \
+ _rc = -EOPNOTSUPP; \
+ if (_rc >= 0) { \
+ _meta->keys._name##_idx = _rc; \
+ _rc = 0; \
+ } \
+ _rc; \
+})
+#define TABLE_HOOK_RESP(_meta, _name, _mcdi_name) ({ \
+ int _rc = TABLE_FIND_RESP(&_meta->desc, TABLE_FIELD_ID_##_mcdi_name); \
+ \
+ if (_rc > U8_MAX) \
+ _rc = -EOPNOTSUPP; \
+ if (_rc >= 0) { \
+ _meta->resps._name##_idx = _rc; \
+ _rc = 0; \
+ } \
+ _rc; \
+})
+
+static int efx_mae_table_hook_ct(struct efx_nic *efx,
+ struct efx_tc_table_ct *meta_ct)
+{
+ int rc;
+
+ rc = TABLE_HOOK_KEY(meta_ct, eth_proto, ETHER_TYPE);
+ if (rc)
+ return rc;
+ rc = TABLE_HOOK_KEY(meta_ct, ip_proto, IP_PROTO);
+ if (rc)
+ return rc;
+ rc = TABLE_HOOK_KEY(meta_ct, src_ip, SRC_IP);
+ if (rc)
+ return rc;
+ rc = TABLE_HOOK_KEY(meta_ct, dst_ip, DST_IP);
+ if (rc)
+ return rc;
+ rc = TABLE_HOOK_KEY(meta_ct, l4_sport, SRC_PORT);
+ if (rc)
+ return rc;
+ rc = TABLE_HOOK_KEY(meta_ct, l4_dport, DST_PORT);
+ if (rc)
+ return rc;
+ rc = TABLE_HOOK_KEY(meta_ct, zone, DOMAIN);
+ if (rc)
+ return rc;
+ rc = TABLE_HOOK_RESP(meta_ct, dnat, NAT_DIR);
+ if (rc)
+ return rc;
+ rc = TABLE_HOOK_RESP(meta_ct, nat_ip, NAT_IP);
+ if (rc)
+ return rc;
+ rc = TABLE_HOOK_RESP(meta_ct, l4_natport, NAT_PORT);
+ if (rc)
+ return rc;
+ rc = TABLE_HOOK_RESP(meta_ct, mark, CT_MARK);
+ if (rc)
+ return rc;
+ rc = TABLE_HOOK_RESP(meta_ct, counter_id, COUNTER_ID);
+ if (rc)
+ return rc;
+ meta_ct->hooked = true;
+ return 0;
+}
+
+static void efx_mae_table_free_desc(struct efx_tc_table_desc *desc)
+{
+ kfree(desc->keys);
+ kfree(desc->resps);
+ memset(desc, 0, sizeof(*desc));
+}
+
+static bool efx_mae_check_table_exists(struct efx_nic *efx, u32 tbl_req)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_TABLE_LIST_OUT_LEN(16));
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_TABLE_LIST_IN_LEN);
+ u32 tbl_id, tbl_total, tbl_cnt, pos = 0;
+ size_t outlen, msg_max;
+ bool ct_tbl = false;
+ int rc, idx;
+
+ msg_max = sizeof(outbuf);
+ efx->tc->meta_ct.hooked = false;
+more:
+ memset(outbuf, 0, sizeof(*outbuf));
+ MCDI_SET_DWORD(inbuf, TABLE_LIST_IN_FIRST_TABLE_ID_INDEX, pos);
+ rc = efx_mcdi_rpc(efx, MC_CMD_TABLE_LIST, inbuf, sizeof(inbuf), outbuf,
+ msg_max, &outlen);
+ if (rc)
+ return false;
+
+ if (outlen < MC_CMD_TABLE_LIST_OUT_LEN(1))
+ return false;
+
+ tbl_total = MCDI_DWORD(outbuf, TABLE_LIST_OUT_N_TABLES);
+ tbl_cnt = MC_CMD_TABLE_LIST_OUT_TABLE_ID_NUM(min(outlen, msg_max));
+
+ for (idx = 0; idx < tbl_cnt; idx++) {
+ tbl_id = MCDI_ARRAY_DWORD(outbuf, TABLE_LIST_OUT_TABLE_ID, idx);
+ if (tbl_id == tbl_req) {
+ ct_tbl = true;
+ break;
+ }
+ }
+
+ pos += tbl_cnt;
+ if (!ct_tbl && pos < tbl_total)
+ goto more;
+
+ return ct_tbl;
+}
+
+int efx_mae_get_tables(struct efx_nic *efx)
+{
+ int rc;
+
+ efx->tc->meta_ct.hooked = false;
+ if (efx_mae_check_table_exists(efx, TABLE_ID_CONNTRACK_TABLE)) {
+ rc = efx_mae_table_get_desc(efx, &efx->tc->meta_ct.desc,
+ TABLE_ID_CONNTRACK_TABLE);
+ if (rc) {
+ pci_info(efx->pci_dev,
+ "FW does not support conntrack desc rc %d\n",
+ rc);
+ return 0;
+ }
+
+ rc = efx_mae_table_hook_ct(efx, &efx->tc->meta_ct);
+ if (rc) {
+ pci_info(efx->pci_dev,
+ "FW does not support conntrack hook rc %d\n",
+ rc);
+ return 0;
+ }
+ } else {
+ pci_info(efx->pci_dev,
+ "FW does not support conntrack table\n");
+ }
+ return 0;
+}
+
+void efx_mae_free_tables(struct efx_nic *efx)
+{
+ efx_mae_table_free_desc(&efx->tc->meta_ct.desc);
+ efx->tc->meta_ct.hooked = false;
+}
+
static int efx_mae_get_basic_caps(struct efx_nic *efx, struct mae_caps *caps)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_GET_CAPS_OUT_LEN);
@@ -444,8 +695,13 @@ int efx_mae_match_check_caps(struct efx_nic *efx,
CHECK(L4_SPORT, l4_sport) ||
CHECK(L4_DPORT, l4_dport) ||
CHECK(TCP_FLAGS, tcp_flags) ||
+ CHECK_BIT(TCP_SYN_FIN_RST, tcp_syn_fin_rst) ||
CHECK_BIT(IS_IP_FRAG, ip_frag) ||
CHECK_BIT(IP_FIRST_FRAG, ip_firstfrag) ||
+ CHECK_BIT(DO_CT, ct_state_trk) ||
+ CHECK_BIT(CT_HIT, ct_state_est) ||
+ CHECK(CT_MARK, ct_mark) ||
+ CHECK(CT_DOMAIN, ct_zone) ||
CHECK(RECIRC_ID, recirc_id))
return rc;
/* Matches on outer fields are done in a separate hardware table,
@@ -471,6 +727,90 @@ int efx_mae_match_check_caps(struct efx_nic *efx,
}
return 0;
}
+
+/* Checks for match fields not supported in LHS Outer Rules */
+#define UNSUPPORTED(_field) ({ \
+ enum mask_type typ = classify_mask((const u8 *)&mask->_field, \
+ sizeof(mask->_field)); \
+ \
+ if (typ != MASK_ZEROES) { \
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported match field " #_field);\
+ rc = -EOPNOTSUPP; \
+ } \
+ rc; \
+})
+#define UNSUPPORTED_BIT(_field) ({ \
+ if (mask->_field) { \
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported match field " #_field);\
+ rc = -EOPNOTSUPP; \
+ } \
+ rc; \
+})
+
+/* LHS rules are (normally) inserted in the Outer Rule table, which means
+ * they use ENC_ fields in hardware to match regular (not enc_) fields from
+ * &struct efx_tc_match_fields.
+ */
+int efx_mae_match_check_caps_lhs(struct efx_nic *efx,
+ const struct efx_tc_match_fields *mask,
+ struct netlink_ext_ack *extack)
+{
+ const u8 *supported_fields = efx->tc->caps->outer_rule_fields;
+ __be32 ingress_port = cpu_to_be32(mask->ingress_port);
+ enum mask_type ingress_port_mask_type;
+ int rc;
+
+ /* Check for _PREFIX assumes big-endian, so we need to convert */
+ ingress_port_mask_type = classify_mask((const u8 *)&ingress_port,
+ sizeof(ingress_port));
+ rc = efx_mae_match_check_cap_typ(supported_fields[MAE_FIELD_INGRESS_PORT],
+ ingress_port_mask_type);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "No support for %s mask in field %s\n",
+ mask_type_name(ingress_port_mask_type),
+ "ingress_port");
+ return rc;
+ }
+ if (CHECK(ENC_ETHER_TYPE, eth_proto) ||
+ CHECK(ENC_VLAN0_TCI, vlan_tci[0]) ||
+ CHECK(ENC_VLAN0_PROTO, vlan_proto[0]) ||
+ CHECK(ENC_VLAN1_TCI, vlan_tci[1]) ||
+ CHECK(ENC_VLAN1_PROTO, vlan_proto[1]) ||
+ CHECK(ENC_ETH_SADDR, eth_saddr) ||
+ CHECK(ENC_ETH_DADDR, eth_daddr) ||
+ CHECK(ENC_IP_PROTO, ip_proto) ||
+ CHECK(ENC_IP_TOS, ip_tos) ||
+ CHECK(ENC_IP_TTL, ip_ttl) ||
+ CHECK_BIT(ENC_IP_FRAG, ip_frag) ||
+ UNSUPPORTED_BIT(ip_firstfrag) ||
+ CHECK(ENC_SRC_IP4, src_ip) ||
+ CHECK(ENC_DST_IP4, dst_ip) ||
+#ifdef CONFIG_IPV6
+ CHECK(ENC_SRC_IP6, src_ip6) ||
+ CHECK(ENC_DST_IP6, dst_ip6) ||
+#endif
+ CHECK(ENC_L4_SPORT, l4_sport) ||
+ CHECK(ENC_L4_DPORT, l4_dport) ||
+ UNSUPPORTED(tcp_flags) ||
+ CHECK_BIT(TCP_SYN_FIN_RST, tcp_syn_fin_rst))
+ return rc;
+ if (efx_tc_match_is_encap(mask)) {
+ /* can't happen; disallowed for local rules, translated
+ * for foreign rules.
+ */
+ NL_SET_ERR_MSG_MOD(extack, "Unexpected encap match in LHS rule");
+ return -EOPNOTSUPP;
+ }
+ if (UNSUPPORTED(enc_keyid) ||
+ /* Can't filter on conntrack in LHS rules */
+ UNSUPPORTED_BIT(ct_state_trk) ||
+ UNSUPPORTED_BIT(ct_state_est) ||
+ UNSUPPORTED(ct_mark) ||
+ UNSUPPORTED(recirc_id))
+ return rc;
+ return 0;
+}
+#undef UNSUPPORTED
#undef CHECK_BIT
#undef CHECK
@@ -1153,6 +1493,465 @@ int efx_mae_unregister_encap_match(struct efx_nic *efx,
return 0;
}
+static int efx_mae_populate_lhs_match_criteria(MCDI_DECLARE_STRUCT_PTR(match_crit),
+ const struct efx_tc_match *match)
+{
+ if (match->mask.ingress_port) {
+ if (~match->mask.ingress_port)
+ return -EOPNOTSUPP;
+ MCDI_STRUCT_SET_DWORD(match_crit,
+ MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR,
+ match->value.ingress_port);
+ }
+ MCDI_STRUCT_SET_DWORD(match_crit, MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR_MASK,
+ match->mask.ingress_port);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE,
+ match->value.eth_proto);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_MASK,
+ match->mask.eth_proto);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE,
+ match->value.vlan_tci[0]);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE_MASK,
+ match->mask.vlan_tci[0]);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE,
+ match->value.vlan_proto[0]);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE_MASK,
+ match->mask.vlan_proto[0]);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE,
+ match->value.vlan_tci[1]);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE_MASK,
+ match->mask.vlan_tci[1]);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE,
+ match->value.vlan_proto[1]);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE_MASK,
+ match->mask.vlan_proto[1]);
+ memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE),
+ match->value.eth_saddr, ETH_ALEN);
+ memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE_MASK),
+ match->mask.eth_saddr, ETH_ALEN);
+ memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE),
+ match->value.eth_daddr, ETH_ALEN);
+ memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE_MASK),
+ match->mask.eth_daddr, ETH_ALEN);
+ MCDI_STRUCT_SET_BYTE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO,
+ match->value.ip_proto);
+ MCDI_STRUCT_SET_BYTE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_MASK,
+ match->mask.ip_proto);
+ MCDI_STRUCT_SET_BYTE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_IP_TOS,
+ match->value.ip_tos);
+ MCDI_STRUCT_SET_BYTE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_MASK,
+ match->mask.ip_tos);
+ MCDI_STRUCT_SET_BYTE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_IP_TTL,
+ match->value.ip_ttl);
+ MCDI_STRUCT_SET_BYTE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_IP_TTL_MASK,
+ match->mask.ip_ttl);
+ MCDI_STRUCT_POPULATE_BYTE_1(match_crit,
+ MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS,
+ MAE_ENC_FIELD_PAIRS_ENC_IP_FRAG,
+ match->value.ip_frag);
+ MCDI_STRUCT_POPULATE_BYTE_1(match_crit,
+ MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS_MASK,
+ MAE_ENC_FIELD_PAIRS_ENC_IP_FRAG_MASK,
+ match->mask.ip_frag);
+ MCDI_STRUCT_SET_DWORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE,
+ match->value.src_ip);
+ MCDI_STRUCT_SET_DWORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_MASK,
+ match->mask.src_ip);
+ MCDI_STRUCT_SET_DWORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE,
+ match->value.dst_ip);
+ MCDI_STRUCT_SET_DWORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_MASK,
+ match->mask.dst_ip);
+#ifdef CONFIG_IPV6
+ memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE),
+ &match->value.src_ip6, sizeof(struct in6_addr));
+ memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_MASK),
+ &match->mask.src_ip6, sizeof(struct in6_addr));
+ memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE),
+ &match->value.dst_ip6, sizeof(struct in6_addr));
+ memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_MASK),
+ &match->mask.dst_ip6, sizeof(struct in6_addr));
+#endif
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE,
+ match->value.l4_sport);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE_MASK,
+ match->mask.l4_sport);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE,
+ match->value.l4_dport);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_MASK,
+ match->mask.l4_dport);
+ /* No enc-keys in LHS rules. Caps check should have caught this; any
+ * enc-keys from an fLHS should have been translated to regular keys
+ * and any EM should be a pseudo (we're an OR so can't have a direct
+ * EM with another OR).
+ */
+ if (WARN_ON_ONCE(match->encap && !match->encap->type))
+ return -EOPNOTSUPP;
+ if (WARN_ON_ONCE(match->mask.enc_src_ip))
+ return -EOPNOTSUPP;
+ if (WARN_ON_ONCE(match->mask.enc_dst_ip))
+ return -EOPNOTSUPP;
+#ifdef CONFIG_IPV6
+ if (WARN_ON_ONCE(!ipv6_addr_any(&match->mask.enc_src_ip6)))
+ return -EOPNOTSUPP;
+ if (WARN_ON_ONCE(!ipv6_addr_any(&match->mask.enc_dst_ip6)))
+ return -EOPNOTSUPP;
+#endif
+ if (WARN_ON_ONCE(match->mask.enc_ip_tos))
+ return -EOPNOTSUPP;
+ if (WARN_ON_ONCE(match->mask.enc_ip_ttl))
+ return -EOPNOTSUPP;
+ if (WARN_ON_ONCE(match->mask.enc_sport))
+ return -EOPNOTSUPP;
+ if (WARN_ON_ONCE(match->mask.enc_dport))
+ return -EOPNOTSUPP;
+ if (WARN_ON_ONCE(match->mask.enc_keyid))
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+static int efx_mae_insert_lhs_outer_rule(struct efx_nic *efx,
+ struct efx_tc_lhs_rule *rule, u32 prio)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_OUTER_RULE_INSERT_IN_LEN(MAE_ENC_FIELD_PAIRS_LEN));
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_OUTER_RULE_INSERT_OUT_LEN);
+ MCDI_DECLARE_STRUCT_PTR(match_crit);
+ const struct efx_tc_lhs_action *act;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, MAE_OUTER_RULE_INSERT_IN_PRIO, prio);
+ /* match */
+ match_crit = _MCDI_DWORD(inbuf, MAE_OUTER_RULE_INSERT_IN_FIELD_MATCH_CRITERIA);
+ rc = efx_mae_populate_lhs_match_criteria(match_crit, &rule->match);
+ if (rc)
+ return rc;
+
+ /* action */
+ act = &rule->lhs_act;
+ MCDI_SET_DWORD(inbuf, MAE_OUTER_RULE_INSERT_IN_ENCAP_TYPE,
+ MAE_MCDI_ENCAP_TYPE_NONE);
+ /* We always inhibit CT lookup on TCP_INTERESTING_FLAGS, since the
+ * SW path needs to process the packet to update the conntrack tables
+ * on connection establishment (SYN) or termination (FIN, RST).
+ */
+ MCDI_POPULATE_DWORD_6(inbuf, MAE_OUTER_RULE_INSERT_IN_LOOKUP_CONTROL,
+ MAE_OUTER_RULE_INSERT_IN_DO_CT, !!act->zone,
+ MAE_OUTER_RULE_INSERT_IN_CT_TCP_FLAGS_INHIBIT, 1,
+ MAE_OUTER_RULE_INSERT_IN_CT_DOMAIN,
+ act->zone ? act->zone->zone : 0,
+ MAE_OUTER_RULE_INSERT_IN_CT_VNI_MODE,
+ MAE_CT_VNI_MODE_ZERO,
+ MAE_OUTER_RULE_INSERT_IN_DO_COUNT, !!act->count,
+ MAE_OUTER_RULE_INSERT_IN_RECIRC_ID,
+ act->rid ? act->rid->fw_id : 0);
+ if (act->count)
+ MCDI_SET_DWORD(inbuf, MAE_OUTER_RULE_INSERT_IN_COUNTER_ID,
+ act->count->cnt->fw_id);
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_OUTER_RULE_INSERT, inbuf,
+ sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ rule->fw_id = MCDI_DWORD(outbuf, MAE_OUTER_RULE_INSERT_OUT_OR_ID);
+ return 0;
+}
+
+int efx_mae_insert_lhs_rule(struct efx_nic *efx, struct efx_tc_lhs_rule *rule,
+ u32 prio)
+{
+ return efx_mae_insert_lhs_outer_rule(efx, rule, prio);
+}
+
+static int efx_mae_remove_lhs_outer_rule(struct efx_nic *efx,
+ struct efx_tc_lhs_rule *rule)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_LEN(1));
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_OUTER_RULE_REMOVE_IN_LEN(1));
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, MAE_OUTER_RULE_REMOVE_IN_OR_ID, rule->fw_id);
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_OUTER_RULE_REMOVE, inbuf,
+ sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ /* FW freed a different ID than we asked for, should also never happen.
+ * Warn because it means we've now got a different idea to the FW of
+ * what encap_mds exist, which could cause mayhem later.
+ */
+ if (WARN_ON(MCDI_DWORD(outbuf, MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID) != rule->fw_id))
+ return -EIO;
+ /* We're probably about to free @rule, but let's just make sure its
+ * fw_id is blatted so that it won't look valid if it leaks out.
+ */
+ rule->fw_id = MC_CMD_MAE_OUTER_RULE_INSERT_OUT_OUTER_RULE_ID_NULL;
+ return 0;
+}
+
+int efx_mae_remove_lhs_rule(struct efx_nic *efx, struct efx_tc_lhs_rule *rule)
+{
+ return efx_mae_remove_lhs_outer_rule(efx, rule);
+}
+
+/* Populating is done by taking each byte of @value in turn and storing
+ * it in the appropriate bits of @row. @value must be big-endian; we
+ * convert it to little-endianness as we go.
+ */
+static int efx_mae_table_populate(struct efx_tc_table_field_fmt field,
+ __le32 *row, size_t row_bits,
+ void *value, size_t value_size)
+{
+ unsigned int i;
+
+ /* For now only scheme 0 is supported for any field, so we check here
+ * (rather than, say, in calling code, which knows the semantics and
+ * could in principle encode for other schemes).
+ */
+ if (field.scheme)
+ return -EOPNOTSUPP;
+ if (DIV_ROUND_UP(field.width, 8) != value_size)
+ return -EINVAL;
+ if (field.lbn + field.width > row_bits)
+ return -EINVAL;
+ for (i = 0; i < value_size; i++) {
+ unsigned int bn = field.lbn + i * 8;
+ unsigned int wn = bn / 32;
+ u64 v;
+
+ v = ((u8 *)value)[value_size - i - 1];
+ v <<= (bn % 32);
+ row[wn] |= cpu_to_le32(v & 0xffffffff);
+ if (wn * 32 < row_bits)
+ row[wn + 1] |= cpu_to_le32(v >> 32);
+ }
+ return 0;
+}
+
+static int efx_mae_table_populate_bool(struct efx_tc_table_field_fmt field,
+ __le32 *row, size_t row_bits, bool value)
+{
+ u8 v = value ? 1 : 0;
+
+ if (field.width != 1)
+ return -EINVAL;
+ return efx_mae_table_populate(field, row, row_bits, &v, 1);
+}
+
+static int efx_mae_table_populate_ipv4(struct efx_tc_table_field_fmt field,
+ __le32 *row, size_t row_bits, __be32 value)
+{
+ /* IPv4 is placed in the first 4 bytes of an IPv6-sized field */
+ struct in6_addr v = {};
+
+ if (field.width != 128)
+ return -EINVAL;
+ v.s6_addr32[0] = value;
+ return efx_mae_table_populate(field, row, row_bits, &v, sizeof(v));
+}
+
+static int efx_mae_table_populate_u24(struct efx_tc_table_field_fmt field,
+ __le32 *row, size_t row_bits, u32 value)
+{
+ __be32 v = cpu_to_be32(value);
+
+ /* We adjust value_size here since just 3 bytes will be copied, and
+ * the pointer to the value is set discarding the first byte which is
+ * the most significant byte for a big-endian 4-bytes value.
+ */
+ return efx_mae_table_populate(field, row, row_bits, ((void *)&v) + 1,
+ sizeof(v) - 1);
+}
+
+#define _TABLE_POPULATE(dst, dw, _field, _value) ({ \
+ typeof(_value) _v = _value; \
+ \
+ (_field.width == sizeof(_value) * 8) ? \
+ efx_mae_table_populate(_field, dst, dw, &_v, \
+ sizeof(_v)) : -EINVAL; \
+})
+#define TABLE_POPULATE_KEY_IPV4(dst, _table, _field, _value) \
+ efx_mae_table_populate_ipv4(efx->tc->meta_##_table.desc.keys \
+ [efx->tc->meta_##_table.keys._field##_idx],\
+ dst, efx->tc->meta_##_table.desc.key_width,\
+ _value)
+#define TABLE_POPULATE_KEY(dst, _table, _field, _value) \
+ _TABLE_POPULATE(dst, efx->tc->meta_##_table.desc.key_width, \
+ efx->tc->meta_##_table.desc.keys \
+ [efx->tc->meta_##_table.keys._field##_idx], \
+ _value)
+
+#define TABLE_POPULATE_RESP_BOOL(dst, _table, _field, _value) \
+ efx_mae_table_populate_bool(efx->tc->meta_##_table.desc.resps \
+ [efx->tc->meta_##_table.resps._field##_idx],\
+ dst, efx->tc->meta_##_table.desc.resp_width,\
+ _value)
+#define TABLE_POPULATE_RESP(dst, _table, _field, _value) \
+ _TABLE_POPULATE(dst, efx->tc->meta_##_table.desc.resp_width, \
+ efx->tc->meta_##_table.desc.resps \
+ [efx->tc->meta_##_table.resps._field##_idx], \
+ _value)
+
+#define TABLE_POPULATE_RESP_U24(dst, _table, _field, _value) \
+ efx_mae_table_populate_u24(efx->tc->meta_##_table.desc.resps \
+ [efx->tc->meta_##_table.resps._field##_idx],\
+ dst, efx->tc->meta_##_table.desc.resp_width,\
+ _value)
+
+static int efx_mae_populate_ct_key(struct efx_nic *efx, __le32 *key, size_t kw,
+ struct efx_tc_ct_entry *conn)
+{
+ bool ipv6 = conn->eth_proto == htons(ETH_P_IPV6);
+ int rc;
+
+ rc = TABLE_POPULATE_KEY(key, ct, eth_proto, conn->eth_proto);
+ if (rc)
+ return rc;
+ rc = TABLE_POPULATE_KEY(key, ct, ip_proto, conn->ip_proto);
+ if (rc)
+ return rc;
+ if (ipv6)
+ rc = TABLE_POPULATE_KEY(key, ct, src_ip, conn->src_ip6);
+ else
+ rc = TABLE_POPULATE_KEY_IPV4(key, ct, src_ip, conn->src_ip);
+ if (rc)
+ return rc;
+ if (ipv6)
+ rc = TABLE_POPULATE_KEY(key, ct, dst_ip, conn->dst_ip6);
+ else
+ rc = TABLE_POPULATE_KEY_IPV4(key, ct, dst_ip, conn->dst_ip);
+ if (rc)
+ return rc;
+ rc = TABLE_POPULATE_KEY(key, ct, l4_sport, conn->l4_sport);
+ if (rc)
+ return rc;
+ rc = TABLE_POPULATE_KEY(key, ct, l4_dport, conn->l4_dport);
+ if (rc)
+ return rc;
+ return TABLE_POPULATE_KEY(key, ct, zone, cpu_to_be16(conn->zone->zone));
+}
+
+int efx_mae_insert_ct(struct efx_nic *efx, struct efx_tc_ct_entry *conn)
+{
+ bool ipv6 = conn->eth_proto == htons(ETH_P_IPV6);
+ __le32 *key = NULL, *resp = NULL;
+ size_t inlen, kw, rw;
+ efx_dword_t *inbuf;
+ int rc = -ENOMEM;
+
+ /* Check table access is supported */
+ if (!efx->tc->meta_ct.hooked)
+ return -EOPNOTSUPP;
+
+ /* key/resp widths are in bits; convert to dwords for IN_LEN */
+ kw = DIV_ROUND_UP(efx->tc->meta_ct.desc.key_width, 32);
+ rw = DIV_ROUND_UP(efx->tc->meta_ct.desc.resp_width, 32);
+ BUILD_BUG_ON(sizeof(__le32) != MC_CMD_TABLE_INSERT_IN_DATA_LEN);
+ inlen = MC_CMD_TABLE_INSERT_IN_LEN(kw + rw);
+ if (inlen > MC_CMD_TABLE_INSERT_IN_LENMAX_MCDI2)
+ return -E2BIG;
+ inbuf = kzalloc(inlen, GFP_KERNEL);
+ if (!inbuf)
+ return -ENOMEM;
+
+ key = kcalloc(kw, sizeof(__le32), GFP_KERNEL);
+ if (!key)
+ goto out_free;
+ resp = kcalloc(rw, sizeof(__le32), GFP_KERNEL);
+ if (!resp)
+ goto out_free;
+
+ rc = efx_mae_populate_ct_key(efx, key, kw, conn);
+ if (rc)
+ goto out_free;
+
+ rc = TABLE_POPULATE_RESP_BOOL(resp, ct, dnat, conn->dnat);
+ if (rc)
+ goto out_free;
+ /* No support in hw for IPv6 NAT; field is only 32 bits */
+ if (!ipv6)
+ rc = TABLE_POPULATE_RESP(resp, ct, nat_ip, conn->nat_ip);
+ if (rc)
+ goto out_free;
+ rc = TABLE_POPULATE_RESP(resp, ct, l4_natport, conn->l4_natport);
+ if (rc)
+ goto out_free;
+ rc = TABLE_POPULATE_RESP(resp, ct, mark, cpu_to_be32(conn->mark));
+ if (rc)
+ goto out_free;
+ rc = TABLE_POPULATE_RESP_U24(resp, ct, counter_id, conn->cnt->fw_id);
+ if (rc)
+ goto out_free;
+
+ MCDI_SET_DWORD(inbuf, TABLE_INSERT_IN_TABLE_ID, TABLE_ID_CONNTRACK_TABLE);
+ MCDI_SET_WORD(inbuf, TABLE_INSERT_IN_KEY_WIDTH,
+ efx->tc->meta_ct.desc.key_width);
+ /* MASK_WIDTH is zero as CT is a BCAM */
+ MCDI_SET_WORD(inbuf, TABLE_INSERT_IN_RESP_WIDTH,
+ efx->tc->meta_ct.desc.resp_width);
+ memcpy(MCDI_PTR(inbuf, TABLE_INSERT_IN_DATA), key, kw * sizeof(__le32));
+ memcpy(MCDI_PTR(inbuf, TABLE_INSERT_IN_DATA) + kw * sizeof(__le32),
+ resp, rw * sizeof(__le32));
+
+ BUILD_BUG_ON(MC_CMD_TABLE_INSERT_OUT_LEN);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_TABLE_INSERT, inbuf, inlen, NULL, 0, NULL);
+
+out_free:
+ kfree(resp);
+ kfree(key);
+ kfree(inbuf);
+ return rc;
+}
+
+int efx_mae_remove_ct(struct efx_nic *efx, struct efx_tc_ct_entry *conn)
+{
+ __le32 *key = NULL;
+ efx_dword_t *inbuf;
+ size_t inlen, kw;
+ int rc = -ENOMEM;
+
+ /* Check table access is supported */
+ if (!efx->tc->meta_ct.hooked)
+ return -EOPNOTSUPP;
+
+ /* key width is in bits; convert to dwords for IN_LEN */
+ kw = DIV_ROUND_UP(efx->tc->meta_ct.desc.key_width, 32);
+ BUILD_BUG_ON(sizeof(__le32) != MC_CMD_TABLE_DELETE_IN_DATA_LEN);
+ inlen = MC_CMD_TABLE_DELETE_IN_LEN(kw);
+ if (inlen > MC_CMD_TABLE_DELETE_IN_LENMAX_MCDI2)
+ return -E2BIG;
+ inbuf = kzalloc(inlen, GFP_KERNEL);
+ if (!inbuf)
+ return -ENOMEM;
+
+ key = kcalloc(kw, sizeof(__le32), GFP_KERNEL);
+ if (!key)
+ goto out_free;
+
+ rc = efx_mae_populate_ct_key(efx, key, kw, conn);
+ if (rc)
+ goto out_free;
+
+ MCDI_SET_DWORD(inbuf, TABLE_DELETE_IN_TABLE_ID, TABLE_ID_CONNTRACK_TABLE);
+ MCDI_SET_WORD(inbuf, TABLE_DELETE_IN_KEY_WIDTH,
+ efx->tc->meta_ct.desc.key_width);
+ /* MASK_WIDTH is zero as CT is a BCAM */
+ /* RESP_WIDTH is zero for DELETE */
+ memcpy(MCDI_PTR(inbuf, TABLE_DELETE_IN_DATA), key, kw * sizeof(__le32));
+
+ BUILD_BUG_ON(MC_CMD_TABLE_DELETE_OUT_LEN);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_TABLE_DELETE, inbuf, inlen, NULL, 0, NULL);
+
+out_free:
+ kfree(key);
+ kfree(inbuf);
+ return rc;
+}
+
static int efx_mae_populate_match_criteria(MCDI_DECLARE_STRUCT_PTR(match_crit),
const struct efx_tc_match *match)
{
@@ -1165,20 +1964,40 @@ static int efx_mae_populate_match_criteria(MCDI_DECLARE_STRUCT_PTR(match_crit),
}
MCDI_STRUCT_SET_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_MASK,
match->mask.ingress_port);
- EFX_POPULATE_DWORD_2(*_MCDI_STRUCT_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS),
+ EFX_POPULATE_DWORD_5(*_MCDI_STRUCT_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS),
+ MAE_FIELD_MASK_VALUE_PAIRS_V2_DO_CT,
+ match->value.ct_state_trk,
+ MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_HIT,
+ match->value.ct_state_est,
MAE_FIELD_MASK_VALUE_PAIRS_V2_IS_IP_FRAG,
match->value.ip_frag,
MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FIRST_FRAG,
- match->value.ip_firstfrag);
- EFX_POPULATE_DWORD_2(*_MCDI_STRUCT_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_MASK),
+ match->value.ip_firstfrag,
+ MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_SYN_FIN_RST,
+ match->value.tcp_syn_fin_rst);
+ EFX_POPULATE_DWORD_5(*_MCDI_STRUCT_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_MASK),
+ MAE_FIELD_MASK_VALUE_PAIRS_V2_DO_CT,
+ match->mask.ct_state_trk,
+ MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_HIT,
+ match->mask.ct_state_est,
MAE_FIELD_MASK_VALUE_PAIRS_V2_IS_IP_FRAG,
match->mask.ip_frag,
MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FIRST_FRAG,
- match->mask.ip_firstfrag);
+ match->mask.ip_firstfrag,
+ MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_SYN_FIN_RST,
+ match->mask.tcp_syn_fin_rst);
MCDI_STRUCT_SET_BYTE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID,
match->value.recirc_id);
MCDI_STRUCT_SET_BYTE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_MASK,
match->mask.recirc_id);
+ MCDI_STRUCT_SET_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK,
+ match->value.ct_mark);
+ MCDI_STRUCT_SET_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK_MASK,
+ match->mask.ct_mark);
+ MCDI_STRUCT_SET_WORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN,
+ match->value.ct_zone);
+ MCDI_STRUCT_SET_WORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN_MASK,
+ match->mask.ct_zone);
MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE,
match->value.eth_proto);
MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_MASK,
diff --git a/drivers/net/ethernet/sfc/mae.h b/drivers/net/ethernet/sfc/mae.h
index 24abfe509690..e88e80574f15 100644
--- a/drivers/net/ethernet/sfc/mae.h
+++ b/drivers/net/ethernet/sfc/mae.h
@@ -66,6 +66,9 @@ int efx_mae_start_counters(struct efx_nic *efx, struct efx_rx_queue *rx_queue);
int efx_mae_stop_counters(struct efx_nic *efx, struct efx_rx_queue *rx_queue);
void efx_mae_counters_grant_credits(struct work_struct *work);
+int efx_mae_get_tables(struct efx_nic *efx);
+void efx_mae_free_tables(struct efx_nic *efx);
+
#define MAE_NUM_FIELDS (MAE_FIELD_ENC_VNET_ID + 1)
struct mae_caps {
@@ -81,6 +84,9 @@ int efx_mae_get_caps(struct efx_nic *efx, struct mae_caps *caps);
int efx_mae_match_check_caps(struct efx_nic *efx,
const struct efx_tc_match_fields *mask,
struct netlink_ext_ack *extack);
+int efx_mae_match_check_caps_lhs(struct efx_nic *efx,
+ const struct efx_tc_match_fields *mask,
+ struct netlink_ext_ack *extack);
int efx_mae_check_encap_match_caps(struct efx_nic *efx, bool ipv6,
u8 ip_tos_mask, __be16 udp_sport_mask,
struct netlink_ext_ack *extack);
@@ -109,6 +115,12 @@ int efx_mae_register_encap_match(struct efx_nic *efx,
struct efx_tc_encap_match *encap);
int efx_mae_unregister_encap_match(struct efx_nic *efx,
struct efx_tc_encap_match *encap);
+int efx_mae_insert_lhs_rule(struct efx_nic *efx, struct efx_tc_lhs_rule *rule,
+ u32 prio);
+int efx_mae_remove_lhs_rule(struct efx_nic *efx, struct efx_tc_lhs_rule *rule);
+struct efx_tc_ct_entry; /* see tc_conntrack.h */
+int efx_mae_insert_ct(struct efx_nic *efx, struct efx_tc_ct_entry *conn);
+int efx_mae_remove_ct(struct efx_nic *efx, struct efx_tc_ct_entry *conn);
int efx_mae_insert_rule(struct efx_nic *efx, const struct efx_tc_match *match,
u32 prio, u32 acts_id, u32 *id);
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 454e9d51a4c2..ea612c619874 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -218,14 +218,28 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
BUILD_BUG_ON(_field ## _LEN != 1); \
*(u8 *)MCDI_STRUCT_PTR(_buf, _field) = _value; \
} while (0)
+#define MCDI_STRUCT_POPULATE_BYTE_1(_buf, _field, _name, _value) do { \
+ efx_dword_t _temp; \
+ EFX_POPULATE_DWORD_1(_temp, _name, _value); \
+ MCDI_STRUCT_SET_BYTE(_buf, _field, \
+ EFX_DWORD_FIELD(_temp, EFX_BYTE_0)); \
+ } while (0)
#define MCDI_BYTE(_buf, _field) \
((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \
*MCDI_PTR(_buf, _field))
+#define MCDI_STRUCT_BYTE(_buf, _field) \
+ ((void)BUILD_BUG_ON_ZERO(_field ## _LEN != 1), \
+ *MCDI_STRUCT_PTR(_buf, _field))
#define MCDI_SET_WORD(_buf, _field, _value) do { \
BUILD_BUG_ON(MC_CMD_ ## _field ## _LEN != 2); \
BUILD_BUG_ON(MC_CMD_ ## _field ## _OFST & 1); \
*(__force __le16 *)MCDI_PTR(_buf, _field) = cpu_to_le16(_value);\
} while (0)
+#define MCDI_STRUCT_SET_WORD(_buf, _field, _value) do { \
+ BUILD_BUG_ON(_field ## _LEN != 2); \
+ BUILD_BUG_ON(_field ## _OFST & 1); \
+ *(__force __le16 *)MCDI_STRUCT_PTR(_buf, _field) = cpu_to_le16(_value);\
+ } while (0)
#define MCDI_WORD(_buf, _field) \
((u16)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \
le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c
index 4dc881159246..246657222958 100644
--- a/drivers/net/ethernet/sfc/tc.c
+++ b/drivers/net/ethernet/sfc/tc.c
@@ -12,9 +12,11 @@
#include <net/pkt_cls.h>
#include <net/vxlan.h>
#include <net/geneve.h>
+#include <net/tc_act/tc_ct.h>
#include "tc.h"
#include "tc_bindings.h"
#include "tc_encap_actions.h"
+#include "tc_conntrack.h"
#include "mae.h"
#include "ef100_rep.h"
#include "efx.h"
@@ -96,6 +98,18 @@ static const struct rhashtable_params efx_tc_match_action_ht_params = {
.head_offset = offsetof(struct efx_tc_flow_rule, linkage),
};
+static const struct rhashtable_params efx_tc_lhs_rule_ht_params = {
+ .key_len = sizeof(unsigned long),
+ .key_offset = offsetof(struct efx_tc_lhs_rule, cookie),
+ .head_offset = offsetof(struct efx_tc_lhs_rule, linkage),
+};
+
+static const struct rhashtable_params efx_tc_recirc_ht_params = {
+ .key_len = offsetof(struct efx_tc_recirc_id, linkage),
+ .key_offset = 0,
+ .head_offset = offsetof(struct efx_tc_recirc_id, linkage),
+};
+
static void efx_tc_free_action_set(struct efx_nic *efx,
struct efx_tc_action_set *act, bool in_hw)
{
@@ -215,6 +229,7 @@ static int efx_tc_flower_parse_match(struct efx_nic *efx,
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) |
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) |
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_CT) |
BIT_ULL(FLOW_DISSECTOR_KEY_TCP) |
BIT_ULL(FLOW_DISSECTOR_KEY_IP))) {
NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported flower keys %#llx",
@@ -356,6 +371,31 @@ static int efx_tc_flower_parse_match(struct efx_nic *efx,
dissector->used_keys);
return -EOPNOTSUPP;
}
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CT)) {
+ struct flow_match_ct fm;
+
+ flow_rule_match_ct(rule, &fm);
+ match->value.ct_state_trk = !!(fm.key->ct_state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED);
+ match->mask.ct_state_trk = !!(fm.mask->ct_state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED);
+ match->value.ct_state_est = !!(fm.key->ct_state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED);
+ match->mask.ct_state_est = !!(fm.mask->ct_state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED);
+ if (fm.mask->ct_state & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
+ TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED)) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported ct_state match %#x",
+ fm.mask->ct_state);
+ return -EOPNOTSUPP;
+ }
+ match->value.ct_mark = fm.key->ct_mark;
+ match->mask.ct_mark = fm.mask->ct_mark;
+ match->value.ct_zone = fm.key->ct_zone;
+ match->mask.ct_zone = fm.mask->ct_zone;
+
+ if (memchr_inv(fm.mask->ct_labels, 0, sizeof(fm.mask->ct_labels))) {
+ NL_SET_ERR_MSG_MOD(extack, "Matching on ct_label not supported");
+ return -EOPNOTSUPP;
+ }
+ }
return 0;
}
@@ -575,12 +615,65 @@ fail_pseudo:
return rc;
}
+static struct efx_tc_recirc_id *efx_tc_get_recirc_id(struct efx_nic *efx,
+ u32 chain_index,
+ struct net_device *net_dev)
+{
+ struct efx_tc_recirc_id *rid, *old;
+ int rc;
+
+ rid = kzalloc(sizeof(*rid), GFP_USER);
+ if (!rid)
+ return ERR_PTR(-ENOMEM);
+ rid->chain_index = chain_index;
+ /* We don't take a reference here, because it's implied - if there's
+ * a rule on the net_dev that's been offloaded to us, then the net_dev
+ * can't go away until the rule has been deoffloaded.
+ */
+ rid->net_dev = net_dev;
+ old = rhashtable_lookup_get_insert_fast(&efx->tc->recirc_ht,
+ &rid->linkage,
+ efx_tc_recirc_ht_params);
+ if (old) {
+ /* don't need our new entry */
+ kfree(rid);
+ if (!refcount_inc_not_zero(&old->ref))
+ return ERR_PTR(-EAGAIN);
+ /* existing entry found */
+ rid = old;
+ } else {
+ rc = ida_alloc_range(&efx->tc->recirc_ida, 1, U8_MAX, GFP_USER);
+ if (rc < 0) {
+ rhashtable_remove_fast(&efx->tc->recirc_ht,
+ &rid->linkage,
+ efx_tc_recirc_ht_params);
+ kfree(rid);
+ return ERR_PTR(rc);
+ }
+ rid->fw_id = rc;
+ refcount_set(&rid->ref, 1);
+ }
+ return rid;
+}
+
+static void efx_tc_put_recirc_id(struct efx_nic *efx, struct efx_tc_recirc_id *rid)
+{
+ if (!refcount_dec_and_test(&rid->ref))
+ return; /* still in use */
+ rhashtable_remove_fast(&efx->tc->recirc_ht, &rid->linkage,
+ efx_tc_recirc_ht_params);
+ ida_free(&efx->tc->recirc_ida, rid->fw_id);
+ kfree(rid);
+}
+
static void efx_tc_delete_rule(struct efx_nic *efx, struct efx_tc_flow_rule *rule)
{
efx_mae_delete_rule(efx, rule->fw_id);
/* Release entries in subsidiary tables */
efx_tc_free_action_set_list(efx, &rule->acts, true);
+ if (rule->match.rid)
+ efx_tc_put_recirc_id(efx, rule->match.rid);
if (rule->match.encap)
efx_tc_flower_release_encap_match(efx, rule->match.encap);
rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
@@ -650,6 +743,163 @@ static bool efx_tc_flower_action_order_ok(const struct efx_tc_action_set *act,
}
}
+/**
+ * DOC: TC conntrack sequences
+ *
+ * The MAE hardware can handle at most two rounds of action rule matching,
+ * consequently we support conntrack through the notion of a "left-hand side
+ * rule". This is a rule which typically contains only the actions "ct" and
+ * "goto chain N", and corresponds to one or more "right-hand side rules" in
+ * chain N, which typically match on +trk+est, and may perform ct(nat) actions.
+ * RHS rules go in the Action Rule table as normal but with a nonzero recirc_id
+ * (the hardware equivalent of chain_index), while LHS rules may go in either
+ * the Action Rule or the Outer Rule table, the latter being preferred for
+ * performance reasons, and set both DO_CT and a recirc_id in their response.
+ *
+ * Besides the RHS rules, there are often also similar rules matching on
+ * +trk+new which perform the ct(commit) action. These are not offloaded.
+ */
+
+static bool efx_tc_rule_is_lhs_rule(struct flow_rule *fr,
+ struct efx_tc_match *match)
+{
+ const struct flow_action_entry *fa;
+ int i;
+
+ flow_action_for_each(i, fa, &fr->action) {
+ switch (fa->id) {
+ case FLOW_ACTION_GOTO:
+ return true;
+ case FLOW_ACTION_CT:
+ /* If rule is -trk, or doesn't mention trk at all, then
+ * a CT action implies a conntrack lookup (hence it's an
+ * LHS rule). If rule is +trk, then a CT action could
+ * just be ct(nat) or even ct(commit) (though the latter
+ * can't be offloaded).
+ */
+ if (!match->mask.ct_state_trk || !match->value.ct_state_trk)
+ return true;
+ break;
+ default:
+ break;
+ }
+ }
+ return false;
+}
+
+static int efx_tc_flower_handle_lhs_actions(struct efx_nic *efx,
+ struct flow_cls_offload *tc,
+ struct flow_rule *fr,
+ struct net_device *net_dev,
+ struct efx_tc_lhs_rule *rule)
+
+{
+ struct netlink_ext_ack *extack = tc->common.extack;
+ struct efx_tc_lhs_action *act = &rule->lhs_act;
+ const struct flow_action_entry *fa;
+ bool pipe = true;
+ int i;
+
+ flow_action_for_each(i, fa, &fr->action) {
+ struct efx_tc_ct_zone *ct_zone;
+ struct efx_tc_recirc_id *rid;
+
+ if (!pipe) {
+ /* more actions after a non-pipe action */
+ NL_SET_ERR_MSG_MOD(extack, "Action follows non-pipe action");
+ return -EINVAL;
+ }
+ switch (fa->id) {
+ case FLOW_ACTION_GOTO:
+ if (!fa->chain_index) {
+ NL_SET_ERR_MSG_MOD(extack, "Can't goto chain 0, no looping in hw");
+ return -EOPNOTSUPP;
+ }
+ rid = efx_tc_get_recirc_id(efx, fa->chain_index,
+ net_dev);
+ if (IS_ERR(rid)) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to allocate a hardware recirculation ID for this chain_index");
+ return PTR_ERR(rid);
+ }
+ act->rid = rid;
+ if (fa->hw_stats) {
+ struct efx_tc_counter_index *cnt;
+
+ if (!(fa->hw_stats & FLOW_ACTION_HW_STATS_DELAYED)) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "hw_stats_type %u not supported (only 'delayed')",
+ fa->hw_stats);
+ return -EOPNOTSUPP;
+ }
+ cnt = efx_tc_flower_get_counter_index(efx, tc->cookie,
+ EFX_TC_COUNTER_TYPE_OR);
+ if (IS_ERR(cnt)) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to obtain a counter");
+ return PTR_ERR(cnt);
+ }
+ WARN_ON(act->count); /* can't happen */
+ act->count = cnt;
+ }
+ pipe = false;
+ break;
+ case FLOW_ACTION_CT:
+ if (act->zone) {
+ NL_SET_ERR_MSG_MOD(extack, "Can't offload multiple ct actions");
+ return -EOPNOTSUPP;
+ }
+ if (fa->ct.action & (TCA_CT_ACT_COMMIT |
+ TCA_CT_ACT_FORCE)) {
+ NL_SET_ERR_MSG_MOD(extack, "Can't offload ct commit/force");
+ return -EOPNOTSUPP;
+ }
+ if (fa->ct.action & TCA_CT_ACT_CLEAR) {
+ NL_SET_ERR_MSG_MOD(extack, "Can't clear ct in LHS rule");
+ return -EOPNOTSUPP;
+ }
+ if (fa->ct.action & (TCA_CT_ACT_NAT |
+ TCA_CT_ACT_NAT_SRC |
+ TCA_CT_ACT_NAT_DST)) {
+ NL_SET_ERR_MSG_MOD(extack, "Can't perform NAT in LHS rule - packet isn't conntracked yet");
+ return -EOPNOTSUPP;
+ }
+ if (fa->ct.action) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled ct.action %u for LHS rule\n",
+ fa->ct.action);
+ return -EOPNOTSUPP;
+ }
+ ct_zone = efx_tc_ct_register_zone(efx, fa->ct.zone,
+ fa->ct.flow_table);
+ if (IS_ERR(ct_zone)) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to register for CT updates");
+ return PTR_ERR(ct_zone);
+ }
+ act->zone = ct_zone;
+ break;
+ default:
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled action %u for LHS rule\n",
+ fa->id);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (pipe) {
+ NL_SET_ERR_MSG_MOD(extack, "Missing goto chain in LHS rule");
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static void efx_tc_flower_release_lhs_actions(struct efx_nic *efx,
+ struct efx_tc_lhs_action *act)
+{
+ if (act->rid)
+ efx_tc_put_recirc_id(efx, act->rid);
+ if (act->zone)
+ efx_tc_ct_unregister_zone(efx, act->zone);
+ if (act->count)
+ efx_tc_flower_put_counter_index(efx, act->count);
+}
+
static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
struct net_device *net_dev,
struct flow_cls_offload *tc)
@@ -684,11 +934,40 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
match.mask.ingress_port = ~0;
if (tc->common.chain_index) {
- NL_SET_ERR_MSG_MOD(extack, "No support for nonzero chain_index");
- return -EOPNOTSUPP;
+ struct efx_tc_recirc_id *rid;
+
+ rid = efx_tc_get_recirc_id(efx, tc->common.chain_index, net_dev);
+ if (IS_ERR(rid)) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Failed to allocate a hardware recirculation ID for chain_index %u",
+ tc->common.chain_index);
+ return PTR_ERR(rid);
+ }
+ match.rid = rid;
+ match.value.recirc_id = rid->fw_id;
}
match.mask.recirc_id = 0xff;
+ /* AR table can't match on DO_CT (+trk). But a commonly used pattern is
+ * +trk+est, which is strictly implied by +est, so rewrite it to that.
+ */
+ if (match.mask.ct_state_trk && match.value.ct_state_trk &&
+ match.mask.ct_state_est && match.value.ct_state_est)
+ match.mask.ct_state_trk = 0;
+ /* Thanks to CT_TCP_FLAGS_INHIBIT, packets with interesting flags could
+ * match +trk-est (CT_HIT=0) despite being on an established connection.
+ * So make -est imply -tcp_syn_fin_rst match to ensure these packets
+ * still hit the software path.
+ */
+ if (match.mask.ct_state_est && !match.value.ct_state_est) {
+ if (match.value.tcp_syn_fin_rst) {
+ /* Can't offload this combination */
+ rc = -EOPNOTSUPP;
+ goto release;
+ }
+ match.mask.tcp_syn_fin_rst = true;
+ }
+
flow_action_for_each(i, fa, &fr->action) {
switch (fa->id) {
case FLOW_ACTION_REDIRECT:
@@ -705,12 +984,13 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
if (!found) { /* We don't care. */
netif_dbg(efx, drv, efx->net_dev,
"Ignoring foreign filter that doesn't egdev us\n");
- return -EOPNOTSUPP;
+ rc = -EOPNOTSUPP;
+ goto release;
}
rc = efx_mae_match_check_caps(efx, &match.mask, NULL);
if (rc)
- return rc;
+ goto release;
if (efx_tc_match_is_encap(&match.mask)) {
enum efx_encap_type type;
@@ -719,7 +999,8 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
if (type == EFX_ENCAP_TYPE_NONE) {
NL_SET_ERR_MSG_MOD(extack,
"Egress encap match on unsupported tunnel device");
- return -EOPNOTSUPP;
+ rc = -EOPNOTSUPP;
+ goto release;
}
rc = efx_mae_check_encap_type_supported(efx, type);
@@ -727,25 +1008,26 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
NL_SET_ERR_MSG_FMT_MOD(extack,
"Firmware reports no support for %s encap match",
efx_tc_encap_type_name(type));
- return rc;
+ goto release;
}
rc = efx_tc_flower_record_encap_match(efx, &match, type,
EFX_TC_EM_DIRECT, 0, 0,
extack);
if (rc)
- return rc;
+ goto release;
} else {
/* This is not a tunnel decap rule, ignore it */
netif_dbg(efx, drv, efx->net_dev,
"Ignoring foreign filter without encap match\n");
- return -EOPNOTSUPP;
+ rc = -EOPNOTSUPP;
+ goto release;
}
rule = kzalloc(sizeof(*rule), GFP_USER);
if (!rule) {
rc = -ENOMEM;
- goto out_free;
+ goto release;
}
INIT_LIST_HEAD(&rule->acts.list);
rule->cookie = tc->cookie;
@@ -757,7 +1039,7 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
"Ignoring already-offloaded rule (cookie %lx)\n",
tc->cookie);
rc = -EEXIST;
- goto out_free;
+ goto release;
}
act = kzalloc(sizeof(*act), GFP_USER);
@@ -915,21 +1197,95 @@ release:
/* We failed to insert the rule, so free up any entries we created in
* subsidiary tables.
*/
+ if (match.rid)
+ efx_tc_put_recirc_id(efx, match.rid);
if (act)
efx_tc_free_action_set(efx, act, false);
if (rule) {
- rhashtable_remove_fast(&efx->tc->match_action_ht,
- &rule->linkage,
- efx_tc_match_action_ht_params);
+ if (!old)
+ rhashtable_remove_fast(&efx->tc->match_action_ht,
+ &rule->linkage,
+ efx_tc_match_action_ht_params);
efx_tc_free_action_set_list(efx, &rule->acts, false);
}
-out_free:
kfree(rule);
if (match.encap)
efx_tc_flower_release_encap_match(efx, match.encap);
return rc;
}
+static int efx_tc_flower_replace_lhs(struct efx_nic *efx,
+ struct flow_cls_offload *tc,
+ struct flow_rule *fr,
+ struct efx_tc_match *match,
+ struct efx_rep *efv,
+ struct net_device *net_dev)
+{
+ struct netlink_ext_ack *extack = tc->common.extack;
+ struct efx_tc_lhs_rule *rule, *old;
+ int rc;
+
+ if (tc->common.chain_index) {
+ NL_SET_ERR_MSG_MOD(extack, "LHS rule only allowed in chain 0");
+ return -EOPNOTSUPP;
+ }
+
+ if (match->mask.ct_state_trk && match->value.ct_state_trk) {
+ NL_SET_ERR_MSG_MOD(extack, "LHS rule can never match +trk");
+ return -EOPNOTSUPP;
+ }
+ /* LHS rules are always -trk, so we don't need to match on that */
+ match->mask.ct_state_trk = 0;
+ match->value.ct_state_trk = 0;
+
+ rc = efx_mae_match_check_caps_lhs(efx, &match->mask, extack);
+ if (rc)
+ return rc;
+
+ rule = kzalloc(sizeof(*rule), GFP_USER);
+ if (!rule)
+ return -ENOMEM;
+ rule->cookie = tc->cookie;
+ old = rhashtable_lookup_get_insert_fast(&efx->tc->lhs_rule_ht,
+ &rule->linkage,
+ efx_tc_lhs_rule_ht_params);
+ if (old) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Already offloaded rule (cookie %lx)\n", tc->cookie);
+ rc = -EEXIST;
+ NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded");
+ goto release;
+ }
+
+ /* Parse actions */
+ /* See note in efx_tc_flower_replace() regarding passed net_dev
+ * (used for efx_tc_get_recirc_id()).
+ */
+ rc = efx_tc_flower_handle_lhs_actions(efx, tc, fr, efx->net_dev, rule);
+ if (rc)
+ goto release;
+
+ rule->match = *match;
+
+ rc = efx_mae_insert_lhs_rule(efx, rule, EFX_TC_PRIO_TC);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to insert rule in hw");
+ goto release;
+ }
+ netif_dbg(efx, drv, efx->net_dev,
+ "Successfully parsed lhs rule (cookie %lx)\n",
+ tc->cookie);
+ return 0;
+
+release:
+ efx_tc_flower_release_lhs_actions(efx, &rule->lhs_act);
+ if (!old)
+ rhashtable_remove_fast(&efx->tc->lhs_rule_ht, &rule->linkage,
+ efx_tc_lhs_rule_ht_params);
+ kfree(rule);
+ return rc;
+}
+
static int efx_tc_flower_replace(struct efx_nic *efx,
struct net_device *net_dev,
struct flow_cls_offload *tc,
@@ -985,19 +1341,69 @@ static int efx_tc_flower_replace(struct efx_nic *efx,
return -EOPNOTSUPP;
}
+ if (efx_tc_rule_is_lhs_rule(fr, &match))
+ return efx_tc_flower_replace_lhs(efx, tc, fr, &match, efv,
+ net_dev);
+
+ /* chain_index 0 is always recirc_id 0 (and does not appear in recirc_ht).
+ * Conveniently, match.rid == NULL and match.value.recirc_id == 0 owing
+ * to the initial memset(), so we don't need to do anything in that case.
+ */
if (tc->common.chain_index) {
- NL_SET_ERR_MSG_MOD(extack, "No support for nonzero chain_index");
- return -EOPNOTSUPP;
+ struct efx_tc_recirc_id *rid;
+
+ /* Note regarding passed net_dev:
+ * VFreps and PF can share chain namespace, as they have
+ * distinct ingress_mports. So we don't need to burn an
+ * extra recirc_id if both use the same chain_index.
+ * (Strictly speaking, we could give each VFrep its own
+ * recirc_id namespace that doesn't take IDs away from the
+ * PF, but that would require a bunch of additional IDAs -
+ * one for each representor - and that's not likely to be
+ * the main cause of recirc_id exhaustion anyway.)
+ */
+ rid = efx_tc_get_recirc_id(efx, tc->common.chain_index,
+ efx->net_dev);
+ if (IS_ERR(rid)) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Failed to allocate a hardware recirculation ID for chain_index %u",
+ tc->common.chain_index);
+ return PTR_ERR(rid);
+ }
+ match.rid = rid;
+ match.value.recirc_id = rid->fw_id;
}
match.mask.recirc_id = 0xff;
+ /* AR table can't match on DO_CT (+trk). But a commonly used pattern is
+ * +trk+est, which is strictly implied by +est, so rewrite it to that.
+ */
+ if (match.mask.ct_state_trk && match.value.ct_state_trk &&
+ match.mask.ct_state_est && match.value.ct_state_est)
+ match.mask.ct_state_trk = 0;
+ /* Thanks to CT_TCP_FLAGS_INHIBIT, packets with interesting flags could
+ * match +trk-est (CT_HIT=0) despite being on an established connection.
+ * So make -est imply -tcp_syn_fin_rst match to ensure these packets
+ * still hit the software path.
+ */
+ if (match.mask.ct_state_est && !match.value.ct_state_est) {
+ if (match.value.tcp_syn_fin_rst) {
+ /* Can't offload this combination */
+ rc = -EOPNOTSUPP;
+ goto release;
+ }
+ match.mask.tcp_syn_fin_rst = true;
+ }
+
rc = efx_mae_match_check_caps(efx, &match.mask, extack);
if (rc)
- return rc;
+ goto release;
rule = kzalloc(sizeof(*rule), GFP_USER);
- if (!rule)
- return -ENOMEM;
+ if (!rule) {
+ rc = -ENOMEM;
+ goto release;
+ }
INIT_LIST_HEAD(&rule->acts.list);
rule->cookie = tc->cookie;
old = rhashtable_lookup_get_insert_fast(&efx->tc->match_action_ht,
@@ -1007,8 +1413,8 @@ static int efx_tc_flower_replace(struct efx_nic *efx,
netif_dbg(efx, drv, efx->net_dev,
"Already offloaded rule (cookie %lx)\n", tc->cookie);
NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded");
- kfree(rule);
- return -EEXIST;
+ rc = -EEXIST;
+ goto release;
}
/* Parse actions */
@@ -1326,12 +1732,15 @@ release:
/* We failed to insert the rule, so free up any entries we created in
* subsidiary tables.
*/
+ if (match.rid)
+ efx_tc_put_recirc_id(efx, match.rid);
if (act)
efx_tc_free_action_set(efx, act, false);
if (rule) {
- rhashtable_remove_fast(&efx->tc->match_action_ht,
- &rule->linkage,
- efx_tc_match_action_ht_params);
+ if (!old)
+ rhashtable_remove_fast(&efx->tc->match_action_ht,
+ &rule->linkage,
+ efx_tc_match_action_ht_params);
efx_tc_free_action_set_list(efx, &rule->acts, false);
}
kfree(rule);
@@ -1343,8 +1752,26 @@ static int efx_tc_flower_destroy(struct efx_nic *efx,
struct flow_cls_offload *tc)
{
struct netlink_ext_ack *extack = tc->common.extack;
+ struct efx_tc_lhs_rule *lhs_rule;
struct efx_tc_flow_rule *rule;
+ lhs_rule = rhashtable_lookup_fast(&efx->tc->lhs_rule_ht, &tc->cookie,
+ efx_tc_lhs_rule_ht_params);
+ if (lhs_rule) {
+ /* Remove it from HW */
+ efx_mae_remove_lhs_rule(efx, lhs_rule);
+ /* Delete it from SW */
+ efx_tc_flower_release_lhs_actions(efx, &lhs_rule->lhs_act);
+ rhashtable_remove_fast(&efx->tc->lhs_rule_ht, &lhs_rule->linkage,
+ efx_tc_lhs_rule_ht_params);
+ if (lhs_rule->match.encap)
+ efx_tc_flower_release_encap_match(efx, lhs_rule->match.encap);
+ netif_dbg(efx, drv, efx->net_dev, "Removed (lhs) filter %lx\n",
+ lhs_rule->cookie);
+ kfree(lhs_rule);
+ return 0;
+ }
+
rule = rhashtable_lookup_fast(&efx->tc->match_action_ht, &tc->cookie,
efx_tc_match_action_ht_params);
if (!rule) {
@@ -1660,11 +2087,17 @@ int efx_init_tc(struct efx_nic *efx)
rc = efx_tc_configure_fallback_acts_reps(efx);
if (rc)
return rc;
+ rc = efx_mae_get_tables(efx);
+ if (rc)
+ return rc;
efx->tc->up = true;
rc = flow_indr_dev_register(efx_tc_indr_setup_cb, efx);
if (rc)
- return rc;
+ goto out_free;
return 0;
+out_free:
+ efx_mae_free_tables(efx);
+ return rc;
}
void efx_fini_tc(struct efx_nic *efx)
@@ -1680,6 +2113,7 @@ void efx_fini_tc(struct efx_nic *efx)
efx_tc_deconfigure_fallback_acts(efx, &efx->tc->facts.pf);
efx_tc_deconfigure_fallback_acts(efx, &efx->tc->facts.reps);
efx->tc->up = false;
+ efx_mae_free_tables(efx);
}
/* At teardown time, all TC filter rules (and thus all resources they created)
@@ -1694,6 +2128,34 @@ static void efx_tc_encap_match_free(void *ptr, void *__unused)
kfree(encap);
}
+static void efx_tc_recirc_free(void *ptr, void *arg)
+{
+ struct efx_tc_recirc_id *rid = ptr;
+ struct efx_nic *efx = arg;
+
+ WARN_ON(refcount_read(&rid->ref));
+ ida_free(&efx->tc->recirc_ida, rid->fw_id);
+ kfree(rid);
+}
+
+static void efx_tc_lhs_free(void *ptr, void *arg)
+{
+ struct efx_tc_lhs_rule *rule = ptr;
+ struct efx_nic *efx = arg;
+
+ netif_err(efx, drv, efx->net_dev,
+ "tc lhs_rule %lx still present at teardown, removing\n",
+ rule->cookie);
+
+ if (rule->lhs_act.zone)
+ efx_tc_ct_unregister_zone(efx, rule->lhs_act.zone);
+ if (rule->lhs_act.count)
+ efx_tc_flower_put_counter_index(efx, rule->lhs_act.count);
+ efx_mae_remove_lhs_rule(efx, rule);
+
+ kfree(rule);
+}
+
static void efx_tc_flow_free(void *ptr, void *arg)
{
struct efx_tc_flow_rule *rule = ptr;
@@ -1740,6 +2202,16 @@ int efx_init_struct_tc(struct efx_nic *efx)
rc = rhashtable_init(&efx->tc->match_action_ht, &efx_tc_match_action_ht_params);
if (rc < 0)
goto fail_match_action_ht;
+ rc = rhashtable_init(&efx->tc->lhs_rule_ht, &efx_tc_lhs_rule_ht_params);
+ if (rc < 0)
+ goto fail_lhs_rule_ht;
+ rc = efx_tc_init_conntrack(efx);
+ if (rc < 0)
+ goto fail_conntrack;
+ rc = rhashtable_init(&efx->tc->recirc_ht, &efx_tc_recirc_ht_params);
+ if (rc < 0)
+ goto fail_recirc_ht;
+ ida_init(&efx->tc->recirc_ida);
efx->tc->reps_filter_uc = -1;
efx->tc->reps_filter_mc = -1;
INIT_LIST_HEAD(&efx->tc->dflt.pf.acts.list);
@@ -1752,6 +2224,12 @@ int efx_init_struct_tc(struct efx_nic *efx)
efx->tc->facts.reps.fw_id = MC_CMD_MAE_ACTION_SET_ALLOC_OUT_ACTION_SET_ID_NULL;
efx->extra_channel_type[EFX_EXTRA_CHANNEL_TC] = &efx_tc_channel_type;
return 0;
+fail_recirc_ht:
+ efx_tc_destroy_conntrack(efx);
+fail_conntrack:
+ rhashtable_destroy(&efx->tc->lhs_rule_ht);
+fail_lhs_rule_ht:
+ rhashtable_destroy(&efx->tc->match_action_ht);
fail_match_action_ht:
rhashtable_destroy(&efx->tc->encap_match_ht);
fail_encap_match_ht:
@@ -1781,10 +2259,15 @@ void efx_fini_struct_tc(struct efx_nic *efx)
MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL);
EFX_WARN_ON_PARANOID(efx->tc->facts.reps.fw_id !=
MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL);
+ rhashtable_free_and_destroy(&efx->tc->lhs_rule_ht, efx_tc_lhs_free, efx);
rhashtable_free_and_destroy(&efx->tc->match_action_ht, efx_tc_flow_free,
efx);
rhashtable_free_and_destroy(&efx->tc->encap_match_ht,
efx_tc_encap_match_free, NULL);
+ efx_tc_fini_conntrack(efx);
+ rhashtable_free_and_destroy(&efx->tc->recirc_ht, efx_tc_recirc_free, efx);
+ WARN_ON(!ida_is_empty(&efx->tc->recirc_ida));
+ ida_destroy(&efx->tc->recirc_ida);
efx_tc_fini_counters(efx);
efx_tc_fini_encap_actions(efx);
mutex_unlock(&efx->tc->mutex);
diff --git a/drivers/net/ethernet/sfc/tc.h b/drivers/net/ethernet/sfc/tc.h
index 1549c3df43bb..40d2c803fca8 100644
--- a/drivers/net/ethernet/sfc/tc.h
+++ b/drivers/net/ethernet/sfc/tc.h
@@ -18,12 +18,10 @@
#define IS_ALL_ONES(v) (!(typeof (v))~(v))
-#ifdef CONFIG_IPV6
static inline bool efx_ipv6_addr_all_ones(struct in6_addr *addr)
{
return !memchr_inv(addr, 0xff, sizeof(*addr));
}
-#endif
struct efx_tc_encap_action; /* see tc_encap_actions.h */
@@ -47,7 +45,7 @@ struct efx_tc_action_set {
struct efx_tc_match_fields {
/* L1 */
u32 ingress_port;
- u8 recirc_id;
+ u8 recirc_id; /* mapped from (u32) TC chain_index to smaller space */
/* L2 (inner when encap) */
__be16 eth_proto;
__be16 vlan_tci[2], vlan_proto[2];
@@ -62,6 +60,7 @@ struct efx_tc_match_fields {
/* L4 */
__be16 l4_sport, l4_dport; /* Ports (UDP, TCP) */
__be16 tcp_flags;
+ bool tcp_syn_fin_rst; /* true if ANY of SYN/FIN/RST are set */
/* Encap. The following are *outer* fields. Note that there are no
* outer eth (L2) fields; this is because TC doesn't have them.
*/
@@ -70,6 +69,10 @@ struct efx_tc_match_fields {
u8 enc_ip_tos, enc_ip_ttl;
__be16 enc_sport, enc_dport;
__be32 enc_keyid; /* e.g. VNI, VSID */
+ /* Conntrack. */
+ u16 ct_state_trk:1, ct_state_est:1;
+ u32 ct_mark;
+ u16 ct_zone;
};
static inline bool efx_tc_match_is_encap(const struct efx_tc_match_fields *mask)
@@ -117,10 +120,19 @@ struct efx_tc_encap_match {
struct efx_tc_encap_match *pseudo; /* Referenced pseudo EM if needed */
};
+struct efx_tc_recirc_id {
+ u32 chain_index;
+ struct net_device *net_dev;
+ struct rhash_head linkage;
+ refcount_t ref;
+ u8 fw_id; /* index allocated for use in the MAE */
+};
+
struct efx_tc_match {
struct efx_tc_match_fields value;
struct efx_tc_match_fields mask;
struct efx_tc_encap_match *encap;
+ struct efx_tc_recirc_id *rid;
};
struct efx_tc_action_set_list {
@@ -128,6 +140,12 @@ struct efx_tc_action_set_list {
u32 fw_id;
};
+struct efx_tc_lhs_action {
+ struct efx_tc_recirc_id *rid;
+ struct efx_tc_ct_zone *zone;
+ struct efx_tc_counter_index *count;
+};
+
struct efx_tc_flow_rule {
unsigned long cookie;
struct rhash_head linkage;
@@ -137,12 +155,62 @@ struct efx_tc_flow_rule {
u32 fw_id;
};
+struct efx_tc_lhs_rule {
+ unsigned long cookie;
+ struct efx_tc_match match;
+ struct efx_tc_lhs_action lhs_act;
+ struct rhash_head linkage;
+ u32 fw_id;
+};
+
enum efx_tc_rule_prios {
EFX_TC_PRIO_TC, /* Rule inserted by TC */
EFX_TC_PRIO_DFLT, /* Default switch rule; one of efx_tc_default_rules */
EFX_TC_PRIO__NUM
};
+struct efx_tc_table_field_fmt {
+ u16 field_id;
+ u16 lbn;
+ u16 width;
+ u8 masking;
+ u8 scheme;
+};
+
+struct efx_tc_table_desc {
+ u16 type;
+ u16 key_width;
+ u16 resp_width;
+ u16 n_keys;
+ u16 n_resps;
+ u16 n_prios;
+ u8 flags;
+ u8 scheme;
+ struct efx_tc_table_field_fmt *keys;
+ struct efx_tc_table_field_fmt *resps;
+};
+
+struct efx_tc_table_ct { /* TABLE_ID_CONNTRACK_TABLE */
+ struct efx_tc_table_desc desc;
+ bool hooked;
+ struct { /* indices of named fields within @desc.keys */
+ u8 eth_proto_idx;
+ u8 ip_proto_idx;
+ u8 src_ip_idx; /* either v4 or v6 */
+ u8 dst_ip_idx;
+ u8 l4_sport_idx;
+ u8 l4_dport_idx;
+ u8 zone_idx; /* for TABLE_FIELD_ID_DOMAIN */
+ } keys;
+ struct { /* indices of named fields within @desc.resps */
+ u8 dnat_idx;
+ u8 nat_ip_idx;
+ u8 l4_natport_idx;
+ u8 mark_idx;
+ u8 counter_id_idx;
+ } resps;
+};
+
/**
* struct efx_tc_state - control plane data for TC offload
*
@@ -154,7 +222,13 @@ enum efx_tc_rule_prios {
* @encap_ht: Hashtable of TC encap actions
* @encap_match_ht: Hashtable of TC encap matches
* @match_action_ht: Hashtable of TC match-action rules
+ * @lhs_rule_ht: Hashtable of TC left-hand (act ct & goto chain) rules
+ * @ct_zone_ht: Hashtable of TC conntrack flowtable bindings
+ * @ct_ht: Hashtable of TC conntrack flow entries
* @neigh_ht: Hashtable of neighbour watches (&struct efx_neigh_binder)
+ * @recirc_ht: Hashtable of recirculation ID mappings (&struct efx_tc_recirc_id)
+ * @recirc_ida: Recirculation ID allocator
+ * @meta_ct: MAE table layout for conntrack table
* @reps_mport_id: MAE port allocated for representor RX
* @reps_filter_uc: VNIC filter for representor unicast RX (promisc)
* @reps_filter_mc: VNIC filter for representor multicast RX (allmulti)
@@ -185,7 +259,13 @@ struct efx_tc_state {
struct rhashtable encap_ht;
struct rhashtable encap_match_ht;
struct rhashtable match_action_ht;
+ struct rhashtable lhs_rule_ht;
+ struct rhashtable ct_zone_ht;
+ struct rhashtable ct_ht;
struct rhashtable neigh_ht;
+ struct rhashtable recirc_ht;
+ struct ida recirc_ida;
+ struct efx_tc_table_ct meta_ct;
u32 reps_mport_id, reps_mport_vport_id;
s32 reps_filter_uc, reps_filter_mc;
bool flush_counters;
diff --git a/drivers/net/ethernet/sfc/tc_conntrack.c b/drivers/net/ethernet/sfc/tc_conntrack.c
new file mode 100644
index 000000000000..54ed288543d0
--- /dev/null
+++ b/drivers/net/ethernet/sfc/tc_conntrack.c
@@ -0,0 +1,533 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2023, Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "tc_conntrack.h"
+#include "tc.h"
+#include "mae.h"
+
+static int efx_tc_flow_block(enum tc_setup_type type, void *type_data,
+ void *cb_priv);
+
+static const struct rhashtable_params efx_tc_ct_zone_ht_params = {
+ .key_len = offsetof(struct efx_tc_ct_zone, linkage),
+ .key_offset = 0,
+ .head_offset = offsetof(struct efx_tc_ct_zone, linkage),
+};
+
+static const struct rhashtable_params efx_tc_ct_ht_params = {
+ .key_len = offsetof(struct efx_tc_ct_entry, linkage),
+ .key_offset = 0,
+ .head_offset = offsetof(struct efx_tc_ct_entry, linkage),
+};
+
+static void efx_tc_ct_zone_free(void *ptr, void *arg)
+{
+ struct efx_tc_ct_zone *zone = ptr;
+ struct efx_nic *efx = zone->efx;
+
+ netif_err(efx, drv, efx->net_dev,
+ "tc ct_zone %u still present at teardown, removing\n",
+ zone->zone);
+
+ nf_flow_table_offload_del_cb(zone->nf_ft, efx_tc_flow_block, zone);
+ kfree(zone);
+}
+
+static void efx_tc_ct_free(void *ptr, void *arg)
+{
+ struct efx_tc_ct_entry *conn = ptr;
+ struct efx_nic *efx = arg;
+
+ netif_err(efx, drv, efx->net_dev,
+ "tc ct_entry %lx still present at teardown\n",
+ conn->cookie);
+
+ /* We can release the counter, but we can't remove the CT itself
+ * from hardware because the table meta is already gone.
+ */
+ efx_tc_flower_release_counter(efx, conn->cnt);
+ kfree(conn);
+}
+
+int efx_tc_init_conntrack(struct efx_nic *efx)
+{
+ int rc;
+
+ rc = rhashtable_init(&efx->tc->ct_zone_ht, &efx_tc_ct_zone_ht_params);
+ if (rc < 0)
+ goto fail_ct_zone_ht;
+ rc = rhashtable_init(&efx->tc->ct_ht, &efx_tc_ct_ht_params);
+ if (rc < 0)
+ goto fail_ct_ht;
+ return 0;
+fail_ct_ht:
+ rhashtable_destroy(&efx->tc->ct_zone_ht);
+fail_ct_zone_ht:
+ return rc;
+}
+
+/* Only call this in init failure teardown.
+ * Normal exit should fini instead as there may be entries in the table.
+ */
+void efx_tc_destroy_conntrack(struct efx_nic *efx)
+{
+ rhashtable_destroy(&efx->tc->ct_ht);
+ rhashtable_destroy(&efx->tc->ct_zone_ht);
+}
+
+void efx_tc_fini_conntrack(struct efx_nic *efx)
+{
+ rhashtable_free_and_destroy(&efx->tc->ct_zone_ht, efx_tc_ct_zone_free, NULL);
+ rhashtable_free_and_destroy(&efx->tc->ct_ht, efx_tc_ct_free, efx);
+}
+
+#define EFX_NF_TCP_FLAG(flg) cpu_to_be16(be32_to_cpu(TCP_FLAG_##flg) >> 16)
+
+static int efx_tc_ct_parse_match(struct efx_nic *efx, struct flow_rule *fr,
+ struct efx_tc_ct_entry *conn)
+{
+ struct flow_dissector *dissector = fr->match.dissector;
+ unsigned char ipv = 0;
+ bool tcp = false;
+
+ if (flow_rule_match_key(fr, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control fm;
+
+ flow_rule_match_control(fr, &fm);
+ if (IS_ALL_ONES(fm.mask->addr_type))
+ switch (fm.key->addr_type) {
+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+ ipv = 4;
+ break;
+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+ ipv = 6;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (!ipv) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Conntrack missing ipv specification\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (dissector->used_keys &
+ ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_TCP) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_META))) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Unsupported conntrack keys %#llx\n",
+ dissector->used_keys);
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(fr, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic fm;
+
+ flow_rule_match_basic(fr, &fm);
+ if (!IS_ALL_ONES(fm.mask->n_proto)) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Conntrack eth_proto is not exact-match; mask %04x\n",
+ ntohs(fm.mask->n_proto));
+ return -EOPNOTSUPP;
+ }
+ conn->eth_proto = fm.key->n_proto;
+ if (conn->eth_proto != (ipv == 4 ? htons(ETH_P_IP)
+ : htons(ETH_P_IPV6))) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Conntrack eth_proto is not IPv%u, is %04x\n",
+ ipv, ntohs(conn->eth_proto));
+ return -EOPNOTSUPP;
+ }
+ if (!IS_ALL_ONES(fm.mask->ip_proto)) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Conntrack ip_proto is not exact-match; mask %02x\n",
+ fm.mask->ip_proto);
+ return -EOPNOTSUPP;
+ }
+ conn->ip_proto = fm.key->ip_proto;
+ switch (conn->ip_proto) {
+ case IPPROTO_TCP:
+ tcp = true;
+ break;
+ case IPPROTO_UDP:
+ break;
+ default:
+ netif_dbg(efx, drv, efx->net_dev,
+ "Conntrack ip_proto not TCP or UDP, is %02x\n",
+ conn->ip_proto);
+ return -EOPNOTSUPP;
+ }
+ } else {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Conntrack missing eth_proto, ip_proto\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (ipv == 4 && flow_rule_match_key(fr, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs fm;
+
+ flow_rule_match_ipv4_addrs(fr, &fm);
+ if (!IS_ALL_ONES(fm.mask->src)) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Conntrack ipv4.src is not exact-match; mask %08x\n",
+ ntohl(fm.mask->src));
+ return -EOPNOTSUPP;
+ }
+ conn->src_ip = fm.key->src;
+ if (!IS_ALL_ONES(fm.mask->dst)) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Conntrack ipv4.dst is not exact-match; mask %08x\n",
+ ntohl(fm.mask->dst));
+ return -EOPNOTSUPP;
+ }
+ conn->dst_ip = fm.key->dst;
+ } else if (ipv == 6 && flow_rule_match_key(fr, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ struct flow_match_ipv6_addrs fm;
+
+ flow_rule_match_ipv6_addrs(fr, &fm);
+ if (!efx_ipv6_addr_all_ones(&fm.mask->src)) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Conntrack ipv6.src is not exact-match; mask %pI6\n",
+ &fm.mask->src);
+ return -EOPNOTSUPP;
+ }
+ conn->src_ip6 = fm.key->src;
+ if (!efx_ipv6_addr_all_ones(&fm.mask->dst)) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Conntrack ipv6.dst is not exact-match; mask %pI6\n",
+ &fm.mask->dst);
+ return -EOPNOTSUPP;
+ }
+ conn->dst_ip6 = fm.key->dst;
+ } else {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Conntrack missing IPv%u addrs\n", ipv);
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(fr, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports fm;
+
+ flow_rule_match_ports(fr, &fm);
+ if (!IS_ALL_ONES(fm.mask->src)) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Conntrack ports.src is not exact-match; mask %04x\n",
+ ntohs(fm.mask->src));
+ return -EOPNOTSUPP;
+ }
+ conn->l4_sport = fm.key->src;
+ if (!IS_ALL_ONES(fm.mask->dst)) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Conntrack ports.dst is not exact-match; mask %04x\n",
+ ntohs(fm.mask->dst));
+ return -EOPNOTSUPP;
+ }
+ conn->l4_dport = fm.key->dst;
+ } else {
+ netif_dbg(efx, drv, efx->net_dev, "Conntrack missing L4 ports\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(fr, FLOW_DISSECTOR_KEY_TCP)) {
+ __be16 tcp_interesting_flags;
+ struct flow_match_tcp fm;
+
+ if (!tcp) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Conntrack matching on TCP keys but ipproto is not tcp\n");
+ return -EOPNOTSUPP;
+ }
+ flow_rule_match_tcp(fr, &fm);
+ tcp_interesting_flags = EFX_NF_TCP_FLAG(SYN) |
+ EFX_NF_TCP_FLAG(RST) |
+ EFX_NF_TCP_FLAG(FIN);
+ /* If any of the tcp_interesting_flags is set, we always
+ * inhibit CT lookup in LHS (so SW can update CT table).
+ */
+ if (fm.key->flags & tcp_interesting_flags) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Unsupported conntrack tcp.flags %04x/%04x\n",
+ ntohs(fm.key->flags), ntohs(fm.mask->flags));
+ return -EOPNOTSUPP;
+ }
+ /* Other TCP flags cannot be filtered at CT */
+ if (fm.mask->flags & ~tcp_interesting_flags) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Unsupported conntrack tcp.flags %04x/%04x\n",
+ ntohs(fm.key->flags), ntohs(fm.mask->flags));
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int efx_tc_ct_replace(struct efx_tc_ct_zone *ct_zone,
+ struct flow_cls_offload *tc)
+{
+ struct flow_rule *fr = flow_cls_offload_flow_rule(tc);
+ struct efx_tc_ct_entry *conn, *old;
+ struct efx_nic *efx = ct_zone->efx;
+ const struct flow_action_entry *fa;
+ struct efx_tc_counter *cnt;
+ int rc, i;
+
+ if (WARN_ON(!efx->tc))
+ return -ENETDOWN;
+ if (WARN_ON(!efx->tc->up))
+ return -ENETDOWN;
+
+ conn = kzalloc(sizeof(*conn), GFP_USER);
+ if (!conn)
+ return -ENOMEM;
+ conn->cookie = tc->cookie;
+ old = rhashtable_lookup_get_insert_fast(&efx->tc->ct_ht,
+ &conn->linkage,
+ efx_tc_ct_ht_params);
+ if (old) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Already offloaded conntrack (cookie %lx)\n", tc->cookie);
+ rc = -EEXIST;
+ goto release;
+ }
+
+ /* Parse match */
+ conn->zone = ct_zone;
+ rc = efx_tc_ct_parse_match(efx, fr, conn);
+ if (rc)
+ goto release;
+
+ /* Parse actions */
+ flow_action_for_each(i, fa, &fr->action) {
+ switch (fa->id) {
+ case FLOW_ACTION_CT_METADATA:
+ conn->mark = fa->ct_metadata.mark;
+ if (memchr_inv(fa->ct_metadata.labels, 0, sizeof(fa->ct_metadata.labels))) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Setting CT label not supported\n");
+ rc = -EOPNOTSUPP;
+ goto release;
+ }
+ break;
+ default:
+ netif_dbg(efx, drv, efx->net_dev,
+ "Unhandled action %u for conntrack\n", fa->id);
+ rc = -EOPNOTSUPP;
+ goto release;
+ }
+ }
+
+ /* fill in defaults for unmangled values */
+ conn->nat_ip = conn->dnat ? conn->dst_ip : conn->src_ip;
+ conn->l4_natport = conn->dnat ? conn->l4_dport : conn->l4_sport;
+
+ cnt = efx_tc_flower_allocate_counter(efx, EFX_TC_COUNTER_TYPE_CT);
+ if (IS_ERR(cnt)) {
+ rc = PTR_ERR(cnt);
+ goto release;
+ }
+ conn->cnt = cnt;
+
+ rc = efx_mae_insert_ct(efx, conn);
+ if (rc) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Failed to insert conntrack, %d\n", rc);
+ goto release;
+ }
+ mutex_lock(&ct_zone->mutex);
+ list_add_tail(&conn->list, &ct_zone->cts);
+ mutex_unlock(&ct_zone->mutex);
+ return 0;
+release:
+ if (conn->cnt)
+ efx_tc_flower_release_counter(efx, conn->cnt);
+ if (!old)
+ rhashtable_remove_fast(&efx->tc->ct_ht, &conn->linkage,
+ efx_tc_ct_ht_params);
+ kfree(conn);
+ return rc;
+}
+
+/* Caller must follow with efx_tc_ct_remove_finish() after RCU grace period! */
+static void efx_tc_ct_remove(struct efx_nic *efx, struct efx_tc_ct_entry *conn)
+{
+ int rc;
+
+ /* Remove it from HW */
+ rc = efx_mae_remove_ct(efx, conn);
+ /* Delete it from SW */
+ rhashtable_remove_fast(&efx->tc->ct_ht, &conn->linkage,
+ efx_tc_ct_ht_params);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "Failed to remove conntrack %lx from hw, rc %d\n",
+ conn->cookie, rc);
+ } else {
+ netif_dbg(efx, drv, efx->net_dev, "Removed conntrack %lx\n",
+ conn->cookie);
+ }
+}
+
+static void efx_tc_ct_remove_finish(struct efx_nic *efx, struct efx_tc_ct_entry *conn)
+{
+ /* Remove related CT counter. This is delayed after the conn object we
+ * are working with has been successfully removed. This protects the
+ * counter from being used-after-free inside efx_tc_ct_stats.
+ */
+ efx_tc_flower_release_counter(efx, conn->cnt);
+ kfree(conn);
+}
+
+static int efx_tc_ct_destroy(struct efx_tc_ct_zone *ct_zone,
+ struct flow_cls_offload *tc)
+{
+ struct efx_nic *efx = ct_zone->efx;
+ struct efx_tc_ct_entry *conn;
+
+ conn = rhashtable_lookup_fast(&efx->tc->ct_ht, &tc->cookie,
+ efx_tc_ct_ht_params);
+ if (!conn) {
+ netif_warn(efx, drv, efx->net_dev,
+ "Conntrack %lx not found to remove\n", tc->cookie);
+ return -ENOENT;
+ }
+
+ mutex_lock(&ct_zone->mutex);
+ list_del(&conn->list);
+ efx_tc_ct_remove(efx, conn);
+ mutex_unlock(&ct_zone->mutex);
+ synchronize_rcu();
+ efx_tc_ct_remove_finish(efx, conn);
+ return 0;
+}
+
+static int efx_tc_ct_stats(struct efx_tc_ct_zone *ct_zone,
+ struct flow_cls_offload *tc)
+{
+ struct efx_nic *efx = ct_zone->efx;
+ struct efx_tc_ct_entry *conn;
+ struct efx_tc_counter *cnt;
+
+ rcu_read_lock();
+ conn = rhashtable_lookup_fast(&efx->tc->ct_ht, &tc->cookie,
+ efx_tc_ct_ht_params);
+ if (!conn) {
+ netif_warn(efx, drv, efx->net_dev,
+ "Conntrack %lx not found for stats\n", tc->cookie);
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+
+ cnt = conn->cnt;
+ spin_lock_bh(&cnt->lock);
+ /* Report only last use */
+ flow_stats_update(&tc->stats, 0, 0, 0, cnt->touched,
+ FLOW_ACTION_HW_STATS_DELAYED);
+ spin_unlock_bh(&cnt->lock);
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static int efx_tc_flow_block(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct flow_cls_offload *tcb = type_data;
+ struct efx_tc_ct_zone *ct_zone = cb_priv;
+
+ if (type != TC_SETUP_CLSFLOWER)
+ return -EOPNOTSUPP;
+
+ switch (tcb->command) {
+ case FLOW_CLS_REPLACE:
+ return efx_tc_ct_replace(ct_zone, tcb);
+ case FLOW_CLS_DESTROY:
+ return efx_tc_ct_destroy(ct_zone, tcb);
+ case FLOW_CLS_STATS:
+ return efx_tc_ct_stats(ct_zone, tcb);
+ default:
+ break;
+ };
+
+ return -EOPNOTSUPP;
+}
+
+struct efx_tc_ct_zone *efx_tc_ct_register_zone(struct efx_nic *efx, u16 zone,
+ struct nf_flowtable *ct_ft)
+{
+ struct efx_tc_ct_zone *ct_zone, *old;
+ int rc;
+
+ ct_zone = kzalloc(sizeof(*ct_zone), GFP_USER);
+ if (!ct_zone)
+ return ERR_PTR(-ENOMEM);
+ ct_zone->zone = zone;
+ old = rhashtable_lookup_get_insert_fast(&efx->tc->ct_zone_ht,
+ &ct_zone->linkage,
+ efx_tc_ct_zone_ht_params);
+ if (old) {
+ /* don't need our new entry */
+ kfree(ct_zone);
+ if (!refcount_inc_not_zero(&old->ref))
+ return ERR_PTR(-EAGAIN);
+ /* existing entry found */
+ WARN_ON_ONCE(old->nf_ft != ct_ft);
+ netif_dbg(efx, drv, efx->net_dev,
+ "Found existing ct_zone for %u\n", zone);
+ return old;
+ }
+ ct_zone->nf_ft = ct_ft;
+ ct_zone->efx = efx;
+ INIT_LIST_HEAD(&ct_zone->cts);
+ mutex_init(&ct_zone->mutex);
+ rc = nf_flow_table_offload_add_cb(ct_ft, efx_tc_flow_block, ct_zone);
+ netif_dbg(efx, drv, efx->net_dev, "Adding new ct_zone for %u, rc %d\n",
+ zone, rc);
+ if (rc < 0)
+ goto fail;
+ refcount_set(&ct_zone->ref, 1);
+ return ct_zone;
+fail:
+ rhashtable_remove_fast(&efx->tc->ct_zone_ht, &ct_zone->linkage,
+ efx_tc_ct_zone_ht_params);
+ kfree(ct_zone);
+ return ERR_PTR(rc);
+}
+
+void efx_tc_ct_unregister_zone(struct efx_nic *efx,
+ struct efx_tc_ct_zone *ct_zone)
+{
+ struct efx_tc_ct_entry *conn, *next;
+
+ if (!refcount_dec_and_test(&ct_zone->ref))
+ return; /* still in use */
+ nf_flow_table_offload_del_cb(ct_zone->nf_ft, efx_tc_flow_block, ct_zone);
+ rhashtable_remove_fast(&efx->tc->ct_zone_ht, &ct_zone->linkage,
+ efx_tc_ct_zone_ht_params);
+ mutex_lock(&ct_zone->mutex);
+ list_for_each_entry(conn, &ct_zone->cts, list)
+ efx_tc_ct_remove(efx, conn);
+ synchronize_rcu();
+ /* need to use _safe because efx_tc_ct_remove_finish() frees conn */
+ list_for_each_entry_safe(conn, next, &ct_zone->cts, list)
+ efx_tc_ct_remove_finish(efx, conn);
+ mutex_unlock(&ct_zone->mutex);
+ mutex_destroy(&ct_zone->mutex);
+ netif_dbg(efx, drv, efx->net_dev, "Removed ct_zone for %u\n",
+ ct_zone->zone);
+ kfree(ct_zone);
+}
diff --git a/drivers/net/ethernet/sfc/tc_conntrack.h b/drivers/net/ethernet/sfc/tc_conntrack.h
new file mode 100644
index 000000000000..e75c8eb1965d
--- /dev/null
+++ b/drivers/net/ethernet/sfc/tc_conntrack.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2023, Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_TC_CONNTRACK_H
+#define EFX_TC_CONNTRACK_H
+#include "net_driver.h"
+
+#if IS_ENABLED(CONFIG_SFC_SRIOV)
+#include <linux/refcount.h>
+#include <net/netfilter/nf_flow_table.h>
+
+struct efx_tc_ct_zone {
+ u16 zone;
+ struct rhash_head linkage;
+ refcount_t ref;
+ struct nf_flowtable *nf_ft;
+ struct efx_nic *efx;
+ struct mutex mutex; /* protects cts list */
+ struct list_head cts; /* list of efx_tc_ct_entry in this zone */
+};
+
+/* create/uncreate/teardown hashtables */
+int efx_tc_init_conntrack(struct efx_nic *efx);
+void efx_tc_destroy_conntrack(struct efx_nic *efx);
+void efx_tc_fini_conntrack(struct efx_nic *efx);
+
+struct efx_tc_ct_zone *efx_tc_ct_register_zone(struct efx_nic *efx, u16 zone,
+ struct nf_flowtable *ct_ft);
+void efx_tc_ct_unregister_zone(struct efx_nic *efx,
+ struct efx_tc_ct_zone *ct_zone);
+
+struct efx_tc_ct_entry {
+ unsigned long cookie;
+ struct rhash_head linkage;
+ __be16 eth_proto;
+ u8 ip_proto;
+ bool dnat;
+ __be32 src_ip, dst_ip, nat_ip;
+ struct in6_addr src_ip6, dst_ip6;
+ __be16 l4_sport, l4_dport, l4_natport; /* Ports (UDP, TCP) */
+ struct efx_tc_ct_zone *zone;
+ u32 mark;
+ struct efx_tc_counter *cnt;
+ struct list_head list; /* entry on zone->cts */
+};
+
+#endif /* CONFIG_SFC_SRIOV */
+#endif /* EFX_TC_CONNTRACK_H */
diff --git a/drivers/net/ethernet/sfc/tc_counters.c b/drivers/net/ethernet/sfc/tc_counters.c
index 979f49058a0c..0fafb47ea082 100644
--- a/drivers/net/ethernet/sfc/tc_counters.c
+++ b/drivers/net/ethernet/sfc/tc_counters.c
@@ -129,8 +129,8 @@ static void efx_tc_counter_work(struct work_struct *work)
/* Counter allocation */
-static struct efx_tc_counter *efx_tc_flower_allocate_counter(struct efx_nic *efx,
- int type)
+struct efx_tc_counter *efx_tc_flower_allocate_counter(struct efx_nic *efx,
+ int type)
{
struct efx_tc_counter *cnt;
int rc, rc2;
@@ -169,8 +169,8 @@ fail1:
return ERR_PTR(rc > 0 ? -EIO : rc);
}
-static void efx_tc_flower_release_counter(struct efx_nic *efx,
- struct efx_tc_counter *cnt)
+void efx_tc_flower_release_counter(struct efx_nic *efx,
+ struct efx_tc_counter *cnt)
{
int rc;
diff --git a/drivers/net/ethernet/sfc/tc_counters.h b/drivers/net/ethernet/sfc/tc_counters.h
index 41e57f34b763..f18d71c13600 100644
--- a/drivers/net/ethernet/sfc/tc_counters.h
+++ b/drivers/net/ethernet/sfc/tc_counters.h
@@ -49,6 +49,10 @@ int efx_tc_init_counters(struct efx_nic *efx);
void efx_tc_destroy_counters(struct efx_nic *efx);
void efx_tc_fini_counters(struct efx_nic *efx);
+struct efx_tc_counter *efx_tc_flower_allocate_counter(struct efx_nic *efx,
+ int type);
+void efx_tc_flower_release_counter(struct efx_nic *efx,
+ struct efx_tc_counter *cnt);
struct efx_tc_counter_index *efx_tc_flower_get_counter_index(
struct efx_nic *efx, unsigned long cookie,
enum efx_tc_counter_type type);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
index 8779645a54be..819d1db34643 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
@@ -26,7 +26,7 @@ static int txgbe_swnodes_register(struct txgbe *txgbe)
struct software_node *swnodes;
u32 id;
- id = (pdev->bus->number << 8) | pdev->devfn;
+ id = pci_dev_id(pdev);
snprintf(nodes->gpio_name, sizeof(nodes->gpio_name), "txgbe_gpio-%x", id);
snprintf(nodes->i2c_name, sizeof(nodes->i2c_name), "txgbe_i2c-%x", id);
@@ -140,7 +140,7 @@ static int txgbe_mdio_pcs_init(struct txgbe *txgbe)
mii_bus->phy_mask = ~0;
mii_bus->priv = wx;
snprintf(mii_bus->id, MII_BUS_ID_SIZE, "txgbe_pcs-%x",
- (pdev->bus->number << 8) | pdev->devfn);
+ pci_dev_id(pdev));
ret = devm_mdiobus_register(&pdev->dev, mii_bus);
if (ret)
@@ -459,7 +459,7 @@ static int txgbe_gpio_init(struct txgbe *txgbe)
return -ENOMEM;
gc->label = devm_kasprintf(dev, GFP_KERNEL, "txgbe_gpio-%x",
- (wx->pdev->bus->number << 8) | wx->pdev->devfn);
+ pci_dev_id(wx->pdev));
if (!gc->label)
return -ENOMEM;
@@ -503,7 +503,7 @@ static int txgbe_clock_register(struct txgbe *txgbe)
struct clk *clk;
snprintf(clk_name, sizeof(clk_name), "i2c_designware.%d",
- (pdev->bus->number << 8) | pdev->devfn);
+ pci_dev_id(pdev));
clk = clk_register_fixed_rate(NULL, clk_name, NULL, 0, 156250000);
if (IS_ERR(clk))
@@ -566,7 +566,7 @@ static int txgbe_i2c_register(struct txgbe *txgbe)
info.parent = &pdev->dev;
info.fwnode = software_node_fwnode(txgbe->nodes.group[SWNODE_I2C]);
info.name = "i2c_designware";
- info.id = (pdev->bus->number << 8) | pdev->devfn;
+ info.id = pci_dev_id(pdev);
info.res = &DEFINE_RES_IRQ(pdev->irq);
info.num_res = 1;
@@ -588,7 +588,7 @@ static int txgbe_sfp_register(struct txgbe *txgbe)
info.parent = &pdev->dev;
info.fwnode = software_node_fwnode(txgbe->nodes.group[SWNODE_SFP]);
info.name = "sfp";
- info.id = (pdev->bus->number << 8) | pdev->devfn;
+ info.id = pci_dev_id(pdev);
sfp_dev = platform_device_register_full(&info);
if (IS_ERR(sfp_dev))
return PTR_ERR(sfp_dev);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index bc50fc3f6913..8243563a40f0 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -149,7 +149,6 @@ static int __team_option_inst_add(struct team *team, struct team_option *option,
struct team_option_inst *opt_inst;
unsigned int array_size;
unsigned int i;
- int err;
array_size = option->array_size;
if (!array_size)
@@ -165,11 +164,8 @@ static int __team_option_inst_add(struct team *team, struct team_option *option,
opt_inst->changed = true;
opt_inst->removed = false;
list_add_tail(&opt_inst->list, &team->option_inst_list);
- if (option->init) {
- err = option->init(team, &opt_inst->info);
- if (err)
- return err;
- }
+ if (option->init)
+ option->init(team, &opt_inst->info);
}
return 0;
@@ -362,7 +358,9 @@ static int team_option_get(struct team *team,
{
if (!opt_inst->option->getter)
return -EOPNOTSUPP;
- return opt_inst->option->getter(team, ctx);
+
+ opt_inst->option->getter(team, ctx);
+ return 0;
}
static int team_option_set(struct team *team,
@@ -1377,10 +1375,9 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
* Net device ops
*****************/
-static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx)
+static void team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx)
{
ctx->data.str_val = team->mode->kind;
- return 0;
}
static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
@@ -1388,11 +1385,10 @@ static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
return team_change_mode(team, ctx->data.str_val);
}
-static int team_notify_peers_count_get(struct team *team,
- struct team_gsetter_ctx *ctx)
+static void team_notify_peers_count_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
{
ctx->data.u32_val = team->notify_peers.count;
- return 0;
}
static int team_notify_peers_count_set(struct team *team,
@@ -1402,11 +1398,10 @@ static int team_notify_peers_count_set(struct team *team,
return 0;
}
-static int team_notify_peers_interval_get(struct team *team,
- struct team_gsetter_ctx *ctx)
+static void team_notify_peers_interval_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
{
ctx->data.u32_val = team->notify_peers.interval;
- return 0;
}
static int team_notify_peers_interval_set(struct team *team,
@@ -1416,11 +1411,10 @@ static int team_notify_peers_interval_set(struct team *team,
return 0;
}
-static int team_mcast_rejoin_count_get(struct team *team,
- struct team_gsetter_ctx *ctx)
+static void team_mcast_rejoin_count_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
{
ctx->data.u32_val = team->mcast_rejoin.count;
- return 0;
}
static int team_mcast_rejoin_count_set(struct team *team,
@@ -1430,11 +1424,10 @@ static int team_mcast_rejoin_count_set(struct team *team,
return 0;
}
-static int team_mcast_rejoin_interval_get(struct team *team,
- struct team_gsetter_ctx *ctx)
+static void team_mcast_rejoin_interval_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
{
ctx->data.u32_val = team->mcast_rejoin.interval;
- return 0;
}
static int team_mcast_rejoin_interval_set(struct team *team,
@@ -1444,13 +1437,12 @@ static int team_mcast_rejoin_interval_set(struct team *team,
return 0;
}
-static int team_port_en_option_get(struct team *team,
- struct team_gsetter_ctx *ctx)
+static void team_port_en_option_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
{
struct team_port *port = ctx->info->port;
ctx->data.bool_val = team_port_enabled(port);
- return 0;
}
static int team_port_en_option_set(struct team *team,
@@ -1465,13 +1457,12 @@ static int team_port_en_option_set(struct team *team,
return 0;
}
-static int team_user_linkup_option_get(struct team *team,
- struct team_gsetter_ctx *ctx)
+static void team_user_linkup_option_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
{
struct team_port *port = ctx->info->port;
ctx->data.bool_val = port->user.linkup;
- return 0;
}
static void __team_carrier_check(struct team *team);
@@ -1487,13 +1478,12 @@ static int team_user_linkup_option_set(struct team *team,
return 0;
}
-static int team_user_linkup_en_option_get(struct team *team,
- struct team_gsetter_ctx *ctx)
+static void team_user_linkup_en_option_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
{
struct team_port *port = ctx->info->port;
ctx->data.bool_val = port->user.linkup_enabled;
- return 0;
}
static int team_user_linkup_en_option_set(struct team *team,
@@ -1507,13 +1497,12 @@ static int team_user_linkup_en_option_set(struct team *team,
return 0;
}
-static int team_priority_option_get(struct team *team,
- struct team_gsetter_ctx *ctx)
+static void team_priority_option_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
{
struct team_port *port = ctx->info->port;
ctx->data.s32_val = port->priority;
- return 0;
}
static int team_priority_option_set(struct team *team,
@@ -1529,13 +1518,12 @@ static int team_priority_option_set(struct team *team,
return 0;
}
-static int team_queue_id_option_get(struct team *team,
- struct team_gsetter_ctx *ctx)
+static void team_queue_id_option_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
{
struct team_port *port = ctx->info->port;
ctx->data.u32_val = port->queue_id;
- return 0;
}
static int team_queue_id_option_set(struct team *team,
@@ -2892,7 +2880,7 @@ static int __init team_nl_init(void)
return genl_register_family(&team_nl_family);
}
-static void team_nl_fini(void)
+static void __exit team_nl_fini(void)
{
genl_unregister_family(&team_nl_family);
}
diff --git a/drivers/net/team/team_mode_activebackup.c b/drivers/net/team/team_mode_activebackup.c
index 3147a4fdf8d9..e0f599e2a51d 100644
--- a/drivers/net/team/team_mode_activebackup.c
+++ b/drivers/net/team/team_mode_activebackup.c
@@ -57,14 +57,13 @@ static void ab_port_leave(struct team *team, struct team_port *port)
}
}
-static int ab_active_port_init(struct team *team,
- struct team_option_inst_info *info)
+static void ab_active_port_init(struct team *team,
+ struct team_option_inst_info *info)
{
ab_priv(team)->ap_opt_inst_info = info;
- return 0;
}
-static int ab_active_port_get(struct team *team, struct team_gsetter_ctx *ctx)
+static void ab_active_port_get(struct team *team, struct team_gsetter_ctx *ctx)
{
struct team_port *active_port;
@@ -74,7 +73,6 @@ static int ab_active_port_get(struct team *team, struct team_gsetter_ctx *ctx)
ctx->data.u32_val = active_port->dev->ifindex;
else
ctx->data.u32_val = 0;
- return 0;
}
static int ab_active_port_set(struct team *team, struct team_gsetter_ctx *ctx)
diff --git a/drivers/net/team/team_mode_broadcast.c b/drivers/net/team/team_mode_broadcast.c
index 313a3e2d68bf..61d7d79f0c36 100644
--- a/drivers/net/team/team_mode_broadcast.c
+++ b/drivers/net/team/team_mode_broadcast.c
@@ -8,7 +8,6 @@
#include <linux/types.h>
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/if_team.h>
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
index 18d99fda997c..00f8989c29c0 100644
--- a/drivers/net/team/team_mode_loadbalance.c
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -30,8 +30,6 @@ static rx_handler_result_t lb_receive(struct team *team, struct team_port *port,
struct lb_priv;
typedef struct team_port *lb_select_tx_port_func_t(struct team *,
- struct lb_priv *,
- struct sk_buff *,
unsigned char);
#define LB_TX_HASHTABLE_SIZE 256 /* hash is a char */
@@ -118,8 +116,6 @@ static void lb_tx_hash_to_port_mapping_null_port(struct team *team,
/* Basic tx selection based solely by hash */
static struct team_port *lb_hash_select_tx_port(struct team *team,
- struct lb_priv *lb_priv,
- struct sk_buff *skb,
unsigned char hash)
{
int port_index = team_num_to_port_index(team, hash);
@@ -129,17 +125,16 @@ static struct team_port *lb_hash_select_tx_port(struct team *team,
/* Hash to port mapping select tx port */
static struct team_port *lb_htpm_select_tx_port(struct team *team,
- struct lb_priv *lb_priv,
- struct sk_buff *skb,
unsigned char hash)
{
+ struct lb_priv *lb_priv = get_lb_priv(team);
struct team_port *port;
port = rcu_dereference_bh(LB_HTPM_PORT_BY_HASH(lb_priv, hash));
if (likely(port))
return port;
/* If no valid port in the table, fall back to simple hash */
- return lb_hash_select_tx_port(team, lb_priv, skb, hash);
+ return lb_hash_select_tx_port(team, hash);
}
struct lb_select_tx_port {
@@ -229,7 +224,7 @@ static bool lb_transmit(struct team *team, struct sk_buff *skb)
hash = lb_get_skb_hash(lb_priv, skb);
select_tx_port_func = rcu_dereference_bh(lb_priv->select_tx_port_func);
- port = select_tx_port_func(team, lb_priv, skb, hash);
+ port = select_tx_port_func(team, hash);
if (unlikely(!port))
goto drop;
if (team_dev_queue_xmit(team, port, skb))
@@ -242,19 +237,18 @@ drop:
return false;
}
-static int lb_bpf_func_get(struct team *team, struct team_gsetter_ctx *ctx)
+static void lb_bpf_func_get(struct team *team, struct team_gsetter_ctx *ctx)
{
struct lb_priv *lb_priv = get_lb_priv(team);
if (!lb_priv->ex->orig_fprog) {
ctx->data.bin_val.len = 0;
ctx->data.bin_val.ptr = NULL;
- return 0;
+ return;
}
ctx->data.bin_val.len = lb_priv->ex->orig_fprog->len *
sizeof(struct sock_filter);
ctx->data.bin_val.ptr = lb_priv->ex->orig_fprog->filter;
- return 0;
}
static int __fprog_create(struct sock_fprog_kern **pfprog, u32 data_len,
@@ -335,7 +329,7 @@ static void lb_bpf_func_free(struct team *team)
bpf_prog_destroy(fp);
}
-static int lb_tx_method_get(struct team *team, struct team_gsetter_ctx *ctx)
+static void lb_tx_method_get(struct team *team, struct team_gsetter_ctx *ctx)
{
struct lb_priv *lb_priv = get_lb_priv(team);
lb_select_tx_port_func_t *func;
@@ -346,7 +340,6 @@ static int lb_tx_method_get(struct team *team, struct team_gsetter_ctx *ctx)
name = lb_select_tx_port_get_name(func);
BUG_ON(!name);
ctx->data.str_val = name;
- return 0;
}
static int lb_tx_method_set(struct team *team, struct team_gsetter_ctx *ctx)
@@ -361,18 +354,17 @@ static int lb_tx_method_set(struct team *team, struct team_gsetter_ctx *ctx)
return 0;
}
-static int lb_tx_hash_to_port_mapping_init(struct team *team,
- struct team_option_inst_info *info)
+static void lb_tx_hash_to_port_mapping_init(struct team *team,
+ struct team_option_inst_info *info)
{
struct lb_priv *lb_priv = get_lb_priv(team);
unsigned char hash = info->array_index;
LB_HTPM_OPT_INST_INFO_BY_HASH(lb_priv, hash) = info;
- return 0;
}
-static int lb_tx_hash_to_port_mapping_get(struct team *team,
- struct team_gsetter_ctx *ctx)
+static void lb_tx_hash_to_port_mapping_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
{
struct lb_priv *lb_priv = get_lb_priv(team);
struct team_port *port;
@@ -380,7 +372,6 @@ static int lb_tx_hash_to_port_mapping_get(struct team *team,
port = LB_HTPM_PORT_BY_HASH(lb_priv, hash);
ctx->data.u32_val = port ? port->dev->ifindex : 0;
- return 0;
}
static int lb_tx_hash_to_port_mapping_set(struct team *team,
@@ -401,44 +392,40 @@ static int lb_tx_hash_to_port_mapping_set(struct team *team,
return -ENODEV;
}
-static int lb_hash_stats_init(struct team *team,
- struct team_option_inst_info *info)
+static void lb_hash_stats_init(struct team *team,
+ struct team_option_inst_info *info)
{
struct lb_priv *lb_priv = get_lb_priv(team);
unsigned char hash = info->array_index;
lb_priv->ex->stats.info[hash].opt_inst_info = info;
- return 0;
}
-static int lb_hash_stats_get(struct team *team, struct team_gsetter_ctx *ctx)
+static void lb_hash_stats_get(struct team *team, struct team_gsetter_ctx *ctx)
{
struct lb_priv *lb_priv = get_lb_priv(team);
unsigned char hash = ctx->info->array_index;
ctx->data.bin_val.ptr = &lb_priv->ex->stats.info[hash].stats;
ctx->data.bin_val.len = sizeof(struct lb_stats);
- return 0;
}
-static int lb_port_stats_init(struct team *team,
- struct team_option_inst_info *info)
+static void lb_port_stats_init(struct team *team,
+ struct team_option_inst_info *info)
{
struct team_port *port = info->port;
struct lb_port_priv *lb_port_priv = get_lb_port_priv(port);
lb_port_priv->stats_info.opt_inst_info = info;
- return 0;
}
-static int lb_port_stats_get(struct team *team, struct team_gsetter_ctx *ctx)
+static void lb_port_stats_get(struct team *team, struct team_gsetter_ctx *ctx)
{
struct team_port *port = ctx->info->port;
struct lb_port_priv *lb_port_priv = get_lb_port_priv(port);
ctx->data.bin_val.ptr = &lb_port_priv->stats_info.stats;
ctx->data.bin_val.len = sizeof(struct lb_stats);
- return 0;
}
static void __lb_stats_info_refresh_prepare(struct lb_stats_info *s_info)
@@ -531,13 +518,12 @@ static void lb_stats_refresh(struct work_struct *work)
mutex_unlock(&team->lock);
}
-static int lb_stats_refresh_interval_get(struct team *team,
- struct team_gsetter_ctx *ctx)
+static void lb_stats_refresh_interval_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
{
struct lb_priv *lb_priv = get_lb_priv(team);
ctx->data.u32_val = lb_priv->ex->stats.refresh_interval;
- return 0;
}
static int lb_stats_refresh_interval_set(struct team *team,
diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c
index 3ec63de97ae3..dd405d82c6ac 100644
--- a/drivers/net/team/team_mode_roundrobin.c
+++ b/drivers/net/team/team_mode_roundrobin.c
@@ -8,7 +8,6 @@
#include <linux/types.h>
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/if_team.h>
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 9138ef2fb2c8..bece4df7b8dd 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -580,7 +580,7 @@ static int cq_create(struct mlx5_vdpa_net *ndev, u16 idx, u32 num_ent)
/* Use vector 0 by default. Consider adding code to choose least used
* vector.
*/
- err = mlx5_vector2eqn(mdev, 0, &eqn);
+ err = mlx5_comp_eqn_get(mdev, 0, &eqn);
if (err)
goto err_vec;
diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c
index deed156e6165..c82c1f4fc588 100644
--- a/drivers/vfio/pci/mlx5/cmd.c
+++ b/drivers/vfio/pci/mlx5/cmd.c
@@ -1025,8 +1025,8 @@ static int mlx5vf_create_cq(struct mlx5_core_dev *mdev,
goto err_buff;
}
- vector = raw_smp_processor_id() % mlx5_comp_vectors_count(mdev);
- err = mlx5_vector2eqn(mdev, vector, &eqn);
+ vector = raw_smp_processor_id() % mlx5_comp_vectors_max(mdev);
+ err = mlx5_comp_eqn_get(mdev, vector, &eqn);
if (err)
goto err_vec;
diff --git a/include/linux/fs_enet_pd.h b/include/linux/fs_enet_pd.h
deleted file mode 100644
index 77d783f71527..000000000000
--- a/include/linux/fs_enet_pd.h
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Platform information definitions for the
- * universal Freescale Ethernet driver.
- *
- * Copyright (c) 2003 Intracom S.A.
- * by Pantelis Antoniou <[email protected]>
- *
- * 2005 (c) MontaVista Software, Inc.
- * Vitaly Bordug <[email protected]>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#ifndef FS_ENET_PD_H
-#define FS_ENET_PD_H
-
-#include <linux/clk.h>
-#include <linux/string.h>
-#include <linux/of_mdio.h>
-#include <linux/if_ether.h>
-#include <asm/types.h>
-
-#define FS_ENET_NAME "fs_enet"
-
-enum fs_id {
- fsid_fec1,
- fsid_fec2,
- fsid_fcc1,
- fsid_fcc2,
- fsid_fcc3,
- fsid_scc1,
- fsid_scc2,
- fsid_scc3,
- fsid_scc4,
-};
-
-#define FS_MAX_INDEX 9
-
-static inline int fs_get_fec_index(enum fs_id id)
-{
- if (id >= fsid_fec1 && id <= fsid_fec2)
- return id - fsid_fec1;
- return -1;
-}
-
-static inline int fs_get_fcc_index(enum fs_id id)
-{
- if (id >= fsid_fcc1 && id <= fsid_fcc3)
- return id - fsid_fcc1;
- return -1;
-}
-
-static inline int fs_get_scc_index(enum fs_id id)
-{
- if (id >= fsid_scc1 && id <= fsid_scc4)
- return id - fsid_scc1;
- return -1;
-}
-
-static inline int fs_fec_index2id(int index)
-{
- int id = fsid_fec1 + index - 1;
- if (id >= fsid_fec1 && id <= fsid_fec2)
- return id;
- return FS_MAX_INDEX;
- }
-
-static inline int fs_fcc_index2id(int index)
-{
- int id = fsid_fcc1 + index - 1;
- if (id >= fsid_fcc1 && id <= fsid_fcc3)
- return id;
- return FS_MAX_INDEX;
-}
-
-static inline int fs_scc_index2id(int index)
-{
- int id = fsid_scc1 + index - 1;
- if (id >= fsid_scc1 && id <= fsid_scc4)
- return id;
- return FS_MAX_INDEX;
-}
-
-enum fs_mii_method {
- fsmii_fixed,
- fsmii_fec,
- fsmii_bitbang,
-};
-
-enum fs_ioport {
- fsiop_porta,
- fsiop_portb,
- fsiop_portc,
- fsiop_portd,
- fsiop_porte,
-};
-
-struct fs_mii_bit {
- u32 offset;
- u8 bit;
- u8 polarity;
-};
-struct fs_mii_bb_platform_info {
- struct fs_mii_bit mdio_dir;
- struct fs_mii_bit mdio_dat;
- struct fs_mii_bit mdc_dat;
- int delay; /* delay in us */
- int irq[32]; /* irqs per phy's */
-};
-
-struct fs_platform_info {
-
- void(*init_ioports)(struct fs_platform_info *);
- /* device specific information */
- int fs_no; /* controller index */
- char fs_type[4]; /* controller type */
-
- u32 cp_page; /* CPM page */
- u32 cp_block; /* CPM sblock */
- u32 cp_command; /* CPM page/sblock/mcn */
-
- u32 clk_trx; /* some stuff for pins & mux configuration*/
- u32 clk_rx;
- u32 clk_tx;
- u32 clk_route;
- u32 clk_mask;
-
- u32 mem_offset;
- u32 dpram_offset;
- u32 fcc_regs_c;
-
- u32 device_flags;
-
- struct device_node *phy_node;
- const struct fs_mii_bus_info *bus_info;
-
- int rx_ring, tx_ring; /* number of buffers on rx */
- __u8 macaddr[ETH_ALEN]; /* mac address */
- int rx_copybreak; /* limit we copy small frames */
- int napi_weight; /* NAPI weight */
-
- int use_rmii; /* use RMII mode */
- int has_phy; /* if the network is phy container as well...*/
-
- struct clk *clk_per; /* 'per' clock for register access */
-};
-struct fs_mii_fec_platform_info {
- u32 irq[32];
- u32 mii_speed;
-};
-
-static inline int fs_get_id(struct fs_platform_info *fpi)
-{
- if(strstr(fpi->fs_type, "SCC"))
- return fs_scc_index2id(fpi->fs_no);
- if(strstr(fpi->fs_type, "FCC"))
- return fs_fcc_index2id(fpi->fs_no);
- if(strstr(fpi->fs_type, "FEC"))
- return fs_fec_index2id(fpi->fs_no);
- return fpi->fs_no;
-}
-
-#endif
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index 8de6b6e67829..1b9b15a492fa 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -162,8 +162,8 @@ struct team_option {
bool per_port;
unsigned int array_size; /* != 0 means the option is array */
enum team_option_type type;
- int (*init)(struct team *team, struct team_option_inst_info *info);
- int (*getter)(struct team *team, struct team_gsetter_ctx *ctx);
+ void (*init)(struct team *team, struct team_option_inst_info *info);
+ void (*getter)(struct team *team, struct team_gsetter_ctx *ctx);
int (*setter)(struct team *team, struct team_gsetter_ctx *ctx);
};
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index fa70c25423b2..3e1017d764b7 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -1058,7 +1058,7 @@ void mlx5_unregister_debugfs(void);
void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm);
void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
-int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn);
+int mlx5_comp_eqn_get(struct mlx5_core_dev *dev, u16 vecidx, int *eqn);
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
@@ -1108,9 +1108,8 @@ int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
bool map_wc, bool fast_path);
void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
-unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev);
-struct cpumask *
-mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector);
+unsigned int mlx5_comp_vectors_max(struct mlx5_core_dev *dev);
+int mlx5_comp_vector_get_cpu(struct mlx5_core_dev *dev, int vector);
unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev);
int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
u8 roce_version, u8 roce_l3_type, const u8 *gid,
diff --git a/include/linux/phy.h b/include/linux/phy.h
index ba08b0e60279..b963ce22e7c7 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -1732,10 +1732,6 @@ int phy_start_cable_test_tdr(struct phy_device *phydev,
}
#endif
-int phy_cable_test_result(struct phy_device *phydev, u8 pair, u16 result);
-int phy_cable_test_fault_length(struct phy_device *phydev, u8 pair,
- u16 cm);
-
static inline void phy_device_reset(struct phy_device *phydev, int value)
{
mdio_device_reset(&phydev->mdio, value);
diff --git a/include/net/devlink.h b/include/net/devlink.h
index a1a8e1b6e7df..f7fec0791acc 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -1743,9 +1743,6 @@ int devl_resource_size_get(struct devlink *devlink,
int devl_dpipe_table_resource_set(struct devlink *devlink,
const char *table_name, u64 resource_id,
u64 resource_units);
-int devlink_dpipe_table_resource_set(struct devlink *devlink,
- const char *table_name, u64 resource_id,
- u64 resource_units);
void devl_resource_occ_get_register(struct devlink *devlink,
u64 resource_id,
devlink_resource_occ_get_t *occ_get,
diff --git a/include/net/fq.h b/include/net/fq.h
index 07b5aff6ec58..99fbe4127b95 100644
--- a/include/net/fq.h
+++ b/include/net/fq.h
@@ -98,9 +98,4 @@ typedef bool fq_skb_filter_t(struct fq *,
struct sk_buff *,
void *);
-typedef struct fq_flow *fq_flow_get_default_t(struct fq *,
- struct fq_tin *,
- int idx,
- struct sk_buff *);
-
#endif
diff --git a/include/net/page_pool/helpers.h b/include/net/page_pool/helpers.h
index 78df91804c87..94231533a369 100644
--- a/include/net/page_pool/helpers.h
+++ b/include/net/page_pool/helpers.h
@@ -8,23 +8,23 @@
/**
* DOC: page_pool allocator
*
- * This page_pool allocator is optimized for the XDP mode that
- * uses one-frame-per-page, but have fallbacks that act like the
+ * The page_pool allocator is optimized for the XDP mode that
+ * uses one frame per-page, but it can fallback on the
* regular page allocator APIs.
*
- * Basic use involve replacing alloc_pages() calls with the
- * page_pool_alloc_pages() call. Drivers should likely use
+ * Basic use involves replacing alloc_pages() calls with the
+ * page_pool_alloc_pages() call. Drivers should use
* page_pool_dev_alloc_pages() replacing dev_alloc_pages().
*
- * API keeps track of in-flight pages, in-order to let API user know
- * when it is safe to dealloactor page_pool object. Thus, API users
- * must call page_pool_put_page() where appropriate and only attach
- * the page to a page_pool-aware objects, like skbs marked for recycling.
+ * API keeps track of in-flight pages, in order to let API user know
+ * when it is safe to free a page_pool object. Thus, API users
+ * must call page_pool_put_page() to free the page, or attach
+ * the page to a page_pool-aware objects like skbs marked with
+ * skb_mark_for_recycle().
*
- * API user must only call page_pool_put_page() once on a page, as it
- * will either recycle the page, or in case of elevated refcnt, it
- * will release the DMA mapping and in-flight state accounting. We
- * hope to lift this requirement in the future.
+ * API user must call page_pool_put_page() once on a page, as it
+ * will either recycle the page, or in case of refcnt > 1, it will
+ * release the DMA mapping and in-flight state accounting.
*/
#ifndef _NET_PAGE_POOL_HELPERS_H
#define _NET_PAGE_POOL_HELPERS_H
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index 0294cfec9c37..a43062d4c734 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -326,10 +326,6 @@ int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
struct switchdev_notifier_info *info,
struct netlink_ext_ack *extack);
-void switchdev_port_fwd_mark_set(struct net_device *dev,
- struct net_device *group_dev,
- bool joining);
-
int switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long event,
const struct switchdev_notifier_fdb_info *fdb_info,
bool (*check_cb)(const struct net_device *dev),
diff --git a/net/devlink/leftover.c b/net/devlink/leftover.c
index 3bf42f5335ed..e7900d9fa205 100644
--- a/net/devlink/leftover.c
+++ b/net/devlink/leftover.c
@@ -6829,8 +6829,10 @@ int devl_port_register_with_ops(struct devlink *devlink,
spin_lock_init(&devlink_port->type_lock);
INIT_LIST_HEAD(&devlink_port->reporter_list);
err = xa_insert(&devlink->ports, port_index, devlink_port, GFP_KERNEL);
- if (err)
+ if (err) {
+ devlink_port->registered = false;
return err;
+ }
INIT_DELAYED_WORK(&devlink_port->type_warn_dw, &devlink_port_type_warn);
devlink_port_type_warn_schedule(devlink_port);
diff --git a/net/dsa/port.c b/net/dsa/port.c
index 24015e11255f..37ab238e8304 100644
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@ -1690,10 +1690,14 @@ int dsa_port_phylink_create(struct dsa_port *dp)
ds->ops->phylink_get_caps(ds, dp->index, &dp->pl_config);
} else {
/* For legacy drivers */
- __set_bit(PHY_INTERFACE_MODE_INTERNAL,
- dp->pl_config.supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_GMII,
- dp->pl_config.supported_interfaces);
+ if (mode != PHY_INTERFACE_MODE_NA) {
+ __set_bit(mode, dp->pl_config.supported_interfaces);
+ } else {
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ dp->pl_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ dp->pl_config.supported_interfaces);
+ }
}
pl = phylink_create(&dp->pl_config, of_fwnode_handle(dp->dn),
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 6ba1a0fafbaa..f28c87533a46 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -236,7 +236,7 @@ static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *s
net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
__func__);
kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_CREATEFAIL);
- return -EINVAL;
+ return PTR_ERR(neigh);
}
static int ip_finish_output_gso(struct net *net, struct sock *sk,
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index f4bfccae003c..4952ae792450 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -648,7 +648,6 @@ static int ipv6_rthdr_rcv(struct sk_buff *skb)
struct inet6_dev *idev = __in6_dev_get(skb->dev);
struct inet6_skb_parm *opt = IP6CB(skb);
struct in6_addr *addr = NULL;
- struct in6_addr daddr;
int n, i;
struct ipv6_rt_hdr *hdr;
struct rt0_hdr *rthdr;
@@ -796,9 +795,7 @@ looped_back:
return -1;
}
- daddr = *addr;
- *addr = ipv6_hdr(skb)->daddr;
- ipv6_hdr(skb)->daddr = daddr;
+ swap(*addr, ipv6_hdr(skb)->daddr);
ip6_route_input(skb);
if (skb_dst(skb)->error) {
diff --git a/net/tipc/link.h b/net/tipc/link.h
index a16f401fdabd..d80f5649b395 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -148,8 +148,6 @@ int tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked, u16 gap,
struct tipc_gap_ack_blks *ga,
struct sk_buff_head *xmitq,
struct sk_buff_head *retrq);
-void tipc_link_build_bc_sync_msg(struct tipc_link *l,
- struct sk_buff_head *xmitq);
void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr);
int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
struct sk_buff_head *xmitq);
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 9c1f13541708..5c122d7bb784 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -2240,7 +2240,6 @@ int tls_sw_read_sock(struct sock *sk, read_descriptor_t *desc,
tlm = tls_msg(skb);
} else {
struct tls_decrypt_arg darg;
- int to_decrypt;
err = tls_rx_rec_wait(sk, NULL, true, released);
if (err <= 0)
@@ -2248,20 +2247,18 @@ int tls_sw_read_sock(struct sock *sk, read_descriptor_t *desc,
memset(&darg.inargs, 0, sizeof(darg.inargs));
- rxm = strp_msg(tls_strp_msg(ctx));
- tlm = tls_msg(tls_strp_msg(ctx));
-
- to_decrypt = rxm->full_len - prot->overhead_size;
-
err = tls_rx_one_record(sk, NULL, &darg);
if (err < 0) {
tls_err_abort(sk, -EBADMSG);
goto read_sock_end;
}
- released = tls_read_flush_backlog(sk, prot, rxm->full_len, to_decrypt,
- decrypted, &flushed_at);
+ released = tls_read_flush_backlog(sk, prot, INT_MAX,
+ 0, decrypted,
+ &flushed_at);
skb = darg.skb;
+ rxm = strp_msg(skb);
+ tlm = tls_msg(skb);
decrypted += rxm->full_len;
tls_rx_rec_done(ctx);
diff --git a/tools/net/ynl/ynl-gen-c.py b/tools/net/ynl/ynl-gen-c.py
index e64311331726..5f39d2490655 100755
--- a/tools/net/ynl/ynl-gen-c.py
+++ b/tools/net/ynl/ynl-gen-c.py
@@ -1871,6 +1871,7 @@ def print_req_policy(cw, struct, ri=None):
for _, arg in struct.member_list():
arg.attr_policy(cw)
cw.p("};")
+ cw.nl()
def kernel_can_gen_family_struct(family):
@@ -2000,9 +2001,10 @@ def print_kernel_op_table(family, cw):
continue
dont_validate.append(x)
- members.append(('validate',
- ' | '.join([c_upper('genl-dont-validate-' + x)
- for x in dont_validate])), )
+ if dont_validate:
+ members.append(('validate',
+ ' | '.join([c_upper('genl-dont-validate-' + x)
+ for x in dont_validate])), )
name = c_lower(f"{family.name}-nl-{op_name}-{op_mode}it")
if 'pre' in op[op_mode]:
members.append((cb_names[op_mode]['pre'], c_lower(op[op_mode]['pre'])))