aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_common.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h35
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c96
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c41
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h41
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h45
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c99
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_utils.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.h4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c349
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h31
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c375
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h31
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c1326
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h544
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h1521
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c82
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h62
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/Makefile2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c754
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h87
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c32
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h11936
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c221
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c30
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h6
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c7
-rw-r--r--drivers/net/ethernet/cavium/Kconfig13
-rw-r--r--drivers/net/ethernet/cavium/Makefile1
-rw-r--r--drivers/net/ethernet/cavium/common/Makefile1
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.c353
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.h70
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h36
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c56
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_reg.h1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c29
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c169
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c26
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c29
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c24
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c280
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.c81
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.h56
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h17
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c65
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c113
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c546
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c218
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c106
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h164
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h17
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h27
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c23
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c4
-rw-r--r--drivers/net/ethernet/cortina/Kconfig22
-rw-r--r--drivers/net/ethernet/cortina/Makefile4
-rw-r--r--drivers/net/ethernet/cortina/gemini.c2593
-rw-r--r--drivers/net/ethernet/cortina/gemini.h958
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c16
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h1
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c152
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h26
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c259
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c50
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c43
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c14
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c27
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h41
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h28
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h10
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c29
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h10
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h28
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h4
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c196
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c48
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c121
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c22
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c72
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c516
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c63
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h39
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c19
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dim.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c91
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c341
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c280
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c99
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c184
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c92
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/item.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c105
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c473
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h85
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c302
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c558
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c57
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/cmsg.c453
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/fw.h103
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c416
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c100
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h140
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c183
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c85
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c47
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.c14
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h67
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.c88
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.h9
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c17
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h15
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c98
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c135
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h210
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c19
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c76
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c81
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.h16
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c42
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c38
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c4
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c8
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c25
-rw-r--r--drivers/net/ethernet/sfc/efx.c43
-rw-r--r--drivers/net/ethernet/sfc/efx.h4
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c6
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c158
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/socionext/Kconfig12
-rw-r--r--drivers/net/ethernet/socionext/Makefile1
-rw-r--r--drivers/net/ethernet/socionext/netsec.c1777
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c113
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c2
206 files changed, 22352 insertions, 11427 deletions
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index d50519ed7549..b6cf4b6962f5 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -42,6 +42,7 @@ source "drivers/net/ethernet/cavium/Kconfig"
source "drivers/net/ethernet/chelsio/Kconfig"
source "drivers/net/ethernet/cirrus/Kconfig"
source "drivers/net/ethernet/cisco/Kconfig"
+source "drivers/net/ethernet/cortina/Kconfig"
config CX_ECAT
tristate "Beckhoff CX5020 EtherCAT master support"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 6cf5aded9423..3cdf01e96e0b 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_NET_VENDOR_CAVIUM) += cavium/
obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
+obj-$(CONFIG_NET_VENDOR_CORTINA) += cortina/
obj-$(CONFIG_CX_ECAT) += ec_bhf.o
obj-$(CONFIG_DM9000) += davicom/
obj-$(CONFIG_DNET) += dnet.o
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
index 9eb5e222a234..f79da4b5900b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_common.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
@@ -16,7 +16,6 @@
#include <linux/pci.h>
#include "ver.h"
-#include "aq_nic.h"
#include "aq_cfg.h"
#include "aq_utils.h"
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index b3825de6cdfb..5d67f1335f4d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -7,7 +7,7 @@
* version 2, as published by the Free Software Foundation.
*/
-/* File aq_hw.h: Declaraion of abstract interface for NIC hardware specific
+/* File aq_hw.h: Declaration of abstract interface for NIC hardware specific
* functions.
*/
@@ -15,6 +15,8 @@
#define AQ_HW_H
#include "aq_common.h"
+#include "aq_rss.h"
+#include "hw_atl/hw_atl_utils.h"
/* NIC H/W capabilities */
struct aq_hw_caps_s {
@@ -86,13 +88,33 @@ struct aq_stats_s {
#define AQ_HW_FLAG_ERRORS (AQ_HW_FLAG_ERR_HW | AQ_HW_FLAG_ERR_UNPLUG)
+#define AQ_NIC_FLAGS_IS_NOT_READY (AQ_NIC_FLAG_STOPPING | \
+ AQ_NIC_FLAG_RESETTING | AQ_NIC_FLAG_CLOSING | \
+ AQ_NIC_FLAG_ERR_UNPLUG | AQ_NIC_FLAG_ERR_HW)
+
+#define AQ_NIC_FLAGS_IS_NOT_TX_READY (AQ_NIC_FLAGS_IS_NOT_READY | \
+ AQ_NIC_LINK_DOWN)
+
struct aq_hw_s {
- struct aq_obj_s header;
+ atomic_t flags;
struct aq_nic_cfg_s *aq_nic_cfg;
struct aq_pci_func_s *aq_pci_func;
void __iomem *mmio;
unsigned int not_ff_addr;
struct aq_hw_link_status_s aq_link_status;
+ struct hw_aq_atl_utils_mbox mbox;
+ struct hw_atl_stats_s last_stats;
+ struct aq_stats_s curr_stats;
+ u64 speed;
+ u32 itr_tx;
+ u32 itr_rx;
+ unsigned int chip_features;
+ u32 fw_ver_actual;
+ atomic_t dpc;
+ u32 mbox_addr;
+ u32 rpc_addr;
+ u32 rpc_tid;
+ struct hw_aq_atl_utils_fw_rpc rpc;
};
struct aq_ring_s;
@@ -102,7 +124,7 @@ struct sk_buff;
struct aq_hw_ops {
struct aq_hw_s *(*create)(struct aq_pci_func_s *aq_pci_func,
- unsigned int port, struct aq_hw_ops *ops);
+ unsigned int port);
void (*destroy)(struct aq_hw_s *self);
@@ -124,7 +146,6 @@ struct aq_hw_ops {
struct aq_ring_s *aq_ring);
int (*hw_get_mac_permanent)(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps,
u8 *mac);
int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr);
@@ -135,8 +156,7 @@ struct aq_hw_ops {
int (*hw_reset)(struct aq_hw_s *self);
- int (*hw_init)(struct aq_hw_s *self, struct aq_nic_cfg_s *aq_nic_cfg,
- u8 *mac_addr);
+ int (*hw_init)(struct aq_hw_s *self, u8 *mac_addr);
int (*hw_start)(struct aq_hw_s *self);
@@ -184,7 +204,8 @@ struct aq_hw_ops {
struct aq_rss_parameters *rss_params);
int (*hw_get_regs)(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff);
+ const struct aq_hw_caps_s *aq_hw_caps,
+ u32 *regs_buff);
int (*hw_update_stats)(struct aq_hw_s *self);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
index 5f13465995f6..27e250d61da7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
@@ -40,7 +40,7 @@ u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg)
u32 value = readl(hw->mmio + reg);
if ((~0U) == value && (~0U) == readl(hw->mmio + hw->not_ff_addr))
- aq_utils_obj_set(&hw->header.flags, AQ_HW_FLAG_ERR_UNPLUG);
+ aq_utils_obj_set(&hw->flags, AQ_HW_FLAG_ERR_UNPLUG);
return value;
}
@@ -54,11 +54,11 @@ int aq_hw_err_from_flags(struct aq_hw_s *hw)
{
int err = 0;
- if (aq_utils_obj_test(&hw->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) {
+ if (aq_utils_obj_test(&hw->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
err = -ENXIO;
goto err_exit;
}
- if (aq_utils_obj_test(&hw->header.flags, AQ_HW_FLAG_ERR_HW)) {
+ if (aq_utils_obj_test(&hw->flags, AQ_HW_FLAG_ERR_HW)) {
err = -EIO;
goto err_exit;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 5d6c40d86775..887bc846375a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -13,37 +13,32 @@
#include "aq_nic.h"
#include "aq_pci_func.h"
#include "aq_ethtool.h"
-#include "hw_atl/hw_atl_a0.h"
-#include "hw_atl/hw_atl_b0.h"
#include <linux/netdevice.h>
#include <linux/module.h>
-static const struct pci_device_id aq_pci_tbl[] = {
- { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_0001), },
- { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D100), },
- { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D107), },
- { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D108), },
- { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D109), },
- {}
-};
-
-MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
-
MODULE_LICENSE("GPL v2");
MODULE_VERSION(AQ_CFG_DRV_VERSION);
MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR);
MODULE_DESCRIPTION(AQ_CFG_DRV_DESC);
-static struct aq_hw_ops *aq_pci_probe_get_hw_ops_by_id(struct pci_dev *pdev)
+static const struct net_device_ops aq_ndev_ops;
+
+struct net_device *aq_ndev_alloc(void)
{
- struct aq_hw_ops *ops = NULL;
+ struct net_device *ndev = NULL;
+ struct aq_nic_s *aq_nic = NULL;
- ops = hw_atl_a0_get_ops_by_id(pdev);
- if (!ops)
- ops = hw_atl_b0_get_ops_by_id(pdev);
+ ndev = alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_CFG_VECS_MAX);
+ if (!ndev)
+ return NULL;
- return ops;
+ aq_nic = netdev_priv(ndev);
+ aq_nic->ndev = ndev;
+ ndev->netdev_ops = &aq_ndev_ops;
+ ndev->ethtool_ops = &aq_ethtool_ops;
+
+ return ndev;
}
static int aq_ndev_open(struct net_device *ndev)
@@ -170,66 +165,3 @@ static const struct net_device_ops aq_ndev_ops = {
.ndo_set_mac_address = aq_ndev_set_mac_address,
.ndo_set_features = aq_ndev_set_features
};
-
-static int aq_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *pci_id)
-{
- struct aq_hw_ops *aq_hw_ops = NULL;
- struct aq_pci_func_s *aq_pci_func = NULL;
- int err = 0;
-
- err = pci_enable_device(pdev);
- if (err < 0)
- goto err_exit;
- aq_hw_ops = aq_pci_probe_get_hw_ops_by_id(pdev);
- aq_pci_func = aq_pci_func_alloc(aq_hw_ops, pdev,
- &aq_ndev_ops, &aq_ethtool_ops);
- if (!aq_pci_func) {
- err = -ENOMEM;
- goto err_exit;
- }
- err = aq_pci_func_init(aq_pci_func);
- if (err < 0)
- goto err_exit;
-
-err_exit:
- if (err < 0) {
- if (aq_pci_func)
- aq_pci_func_free(aq_pci_func);
- }
- return err;
-}
-
-static void aq_pci_remove(struct pci_dev *pdev)
-{
- struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
-
- aq_pci_func_deinit(aq_pci_func);
- aq_pci_func_free(aq_pci_func);
-}
-
-static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg)
-{
- struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
-
- return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg);
-}
-
-static int aq_pci_resume(struct pci_dev *pdev)
-{
- struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
- pm_message_t pm_msg = PMSG_RESTORE;
-
- return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg);
-}
-
-static struct pci_driver aq_pci_ops = {
- .name = AQ_CFG_DRV_NAME,
- .id_table = aq_pci_tbl,
- .probe = aq_pci_probe,
- .remove = aq_pci_remove,
- .suspend = aq_pci_suspend,
- .resume = aq_pci_resume,
-};
-
-module_pci_driver(aq_pci_ops);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.h b/drivers/net/ethernet/aquantia/atlantic/aq_main.h
index 9748e7e575e0..ce92152eb43e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.h
@@ -14,4 +14,6 @@
#include "aq_common.h"
+struct net_device *aq_ndev_alloc(void);
+
#endif /* AQ_MAIN_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 75a894a9251c..d98251371ee4 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -14,7 +14,7 @@
#include "aq_vec.h"
#include "aq_hw.h"
#include "aq_pci_func.h"
-#include "aq_nic_internal.h"
+#include "aq_main.h"
#include <linux/moduleparam.h>
#include <linux/netdevice.h>
@@ -150,9 +150,9 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
self->link_status = self->aq_hw->aq_link_status;
if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) {
- aq_utils_obj_set(&self->header.flags,
+ aq_utils_obj_set(&self->flags,
AQ_NIC_FLAG_STARTED);
- aq_utils_obj_clear(&self->header.flags,
+ aq_utils_obj_clear(&self->flags,
AQ_NIC_LINK_DOWN);
netif_carrier_on(self->ndev);
netif_tx_wake_all_queues(self->ndev);
@@ -160,7 +160,7 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) {
netif_carrier_off(self->ndev);
netif_tx_disable(self->ndev);
- aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
+ aq_utils_obj_set(&self->flags, AQ_NIC_LINK_DOWN);
}
return 0;
}
@@ -171,7 +171,7 @@ static void aq_nic_service_timer_cb(struct timer_list *t)
int ctimer = AQ_CFG_SERVICE_TIMER_INTERVAL;
int err = 0;
- if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
+ if (aq_utils_obj_test(&self->flags, AQ_NIC_FLAGS_IS_NOT_READY))
goto err_exit;
err = aq_nic_update_link_status(self);
@@ -205,14 +205,7 @@ static void aq_nic_polling_timer_cb(struct timer_list *t)
AQ_CFG_POLLING_TIMER_INTERVAL);
}
-static struct net_device *aq_nic_ndev_alloc(void)
-{
- return alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_CFG_VECS_MAX);
-}
-
-struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
- const struct ethtool_ops *et_ops,
- struct pci_dev *pdev,
+struct aq_nic_s *aq_nic_alloc_cold(struct pci_dev *pdev,
struct aq_pci_func_s *aq_pci_func,
unsigned int port,
const struct aq_hw_ops *aq_hw_ops)
@@ -221,7 +214,7 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
struct aq_nic_s *self = NULL;
int err = 0;
- ndev = aq_nic_ndev_alloc();
+ ndev = aq_ndev_alloc();
if (!ndev) {
err = -ENOMEM;
goto err_exit;
@@ -229,9 +222,6 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
self = netdev_priv(ndev);
- ndev->netdev_ops = ndev_ops;
- ndev->ethtool_ops = et_ops;
-
SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->if_port = port;
@@ -242,8 +232,9 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
self->aq_hw_ops = *aq_hw_ops;
self->port = (u8)port;
- self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port,
- &self->aq_hw_ops);
+ self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port);
+ self->aq_hw->aq_nic_cfg = &self->aq_nic_cfg;
+
err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps,
pdev->device, pdev->subsystem_device);
if (err < 0)
@@ -268,7 +259,6 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
goto err_exit;
}
err = self->aq_hw_ops.hw_get_mac_permanent(self->aq_hw,
- self->aq_nic_cfg.aq_hw_caps,
self->ndev->dev_addr);
if (err < 0)
goto err_exit;
@@ -295,7 +285,7 @@ err_exit:
int aq_nic_ndev_init(struct aq_nic_s *self)
{
- struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps;
+ const struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps;
struct aq_nic_cfg_s *aq_nic_cfg = &self->aq_nic_cfg;
self->ndev->hw_features |= aq_hw_caps->hw_features;
@@ -366,11 +356,6 @@ void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
self->aq_ring_tx[idx] = ring;
}
-struct device *aq_nic_get_dev(struct aq_nic_s *self)
-{
- return self->ndev->dev.parent;
-}
-
struct net_device *aq_nic_get_ndev(struct aq_nic_s *self)
{
return self->ndev;
@@ -387,7 +372,7 @@ int aq_nic_init(struct aq_nic_s *self)
if (err < 0)
goto err_exit;
- err = self->aq_hw_ops.hw_init(self->aq_hw, &self->aq_nic_cfg,
+ err = self->aq_hw_ops.hw_init(self->aq_hw,
aq_nic_get_ndev(self)->dev_addr);
if (err < 0)
goto err_exit;
@@ -992,7 +977,7 @@ void aq_nic_free_hot_resources(struct aq_nic_s *self)
if (!self)
goto err_exit;
- for (i = AQ_DIMOF(self->aq_vec); i--;) {
+ for (i = ARRAY_SIZE(self->aq_vec); i--;) {
if (self->aq_vec[i]) {
aq_vec_free(self->aq_vec[i]);
self->aq_vec[i] = NULL;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 3c9f8db03d5f..1cd7d728e91b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -14,10 +14,13 @@
#include "aq_common.h"
#include "aq_rss.h"
+#include "aq_hw.h"
struct aq_ring_s;
struct aq_pci_func_s;
struct aq_hw_ops;
+struct aq_fw_s;
+struct aq_vec_s;
#define AQ_NIC_FC_OFF 0U
#define AQ_NIC_FC_TX 1U
@@ -33,7 +36,7 @@ struct aq_hw_ops;
#define AQ_NIC_RATE_100M BIT(5)
struct aq_nic_cfg_s {
- struct aq_hw_caps_s *aq_hw_caps;
+ const struct aq_hw_caps_s *aq_hw_caps;
u64 hw_features;
u32 rxds; /* rx ring size, descriptors # */
u32 txds; /* tx ring size, descriptors # */
@@ -44,7 +47,6 @@ struct aq_nic_cfg_s {
u16 tx_itr;
u32 num_rss_queues;
u32 mtu;
- u32 ucp_0x364;
u32 flow_control;
u32 link_speed_msk;
u32 vlan_id;
@@ -69,9 +71,38 @@ struct aq_nic_cfg_s {
#define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \
((_TC_) * AQ_CFG_TCS_MAX + (_VEC_))
-struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
- const struct ethtool_ops *et_ops,
- struct pci_dev *pdev,
+struct aq_nic_s {
+ atomic_t flags;
+ struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
+ struct aq_ring_s *aq_ring_tx[AQ_CFG_VECS_MAX * AQ_CFG_TCS_MAX];
+ struct aq_hw_s *aq_hw;
+ struct net_device *ndev;
+ struct aq_pci_func_s *aq_pci_func;
+ unsigned int aq_vecs;
+ unsigned int packet_filter;
+ unsigned int power_state;
+ u8 port;
+ struct aq_hw_ops aq_hw_ops;
+ struct aq_hw_caps_s aq_hw_caps;
+ struct aq_nic_cfg_s aq_nic_cfg;
+ struct timer_list service_timer;
+ struct timer_list polling_timer;
+ struct aq_hw_link_status_s link_status;
+ struct {
+ u32 count;
+ u8 ar[AQ_CFG_MULTICAST_ADDRESS_MAX][ETH_ALEN];
+ } mc_list;
+
+ struct pci_dev *pdev;
+ unsigned int msix_entry_mask;
+};
+
+static inline struct device *aq_nic_get_dev(struct aq_nic_s *self)
+{
+ return self->ndev->dev.parent;
+}
+
+struct aq_nic_s *aq_nic_alloc_cold(struct pci_dev *pdev,
struct aq_pci_func_s *aq_pci_func,
unsigned int port,
const struct aq_hw_ops *aq_hw_ops);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h
deleted file mode 100644
index e7d2711dc165..000000000000
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- */
-
-/* File aq_nic_internal.h: Definition of private object structure. */
-
-#ifndef AQ_NIC_INTERNAL_H
-#define AQ_NIC_INTERNAL_H
-
-struct aq_nic_s {
- struct aq_obj_s header;
- struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
- struct aq_ring_s *aq_ring_tx[AQ_CFG_VECS_MAX * AQ_CFG_TCS_MAX];
- struct aq_hw_s *aq_hw;
- struct net_device *ndev;
- struct aq_pci_func_s *aq_pci_func;
- unsigned int aq_vecs;
- unsigned int packet_filter;
- unsigned int power_state;
- u8 port;
- struct aq_hw_ops aq_hw_ops;
- struct aq_hw_caps_s aq_hw_caps;
- struct aq_nic_cfg_s aq_nic_cfg;
- struct timer_list service_timer;
- struct timer_list polling_timer;
- struct aq_hw_link_status_s link_status;
- struct {
- u32 count;
- u8 ar[AQ_CFG_MULTICAST_ADDRESS_MAX][ETH_ALEN];
- } mc_list;
-};
-
-#define AQ_NIC_FLAGS_IS_NOT_READY (AQ_NIC_FLAG_STOPPING | \
- AQ_NIC_FLAG_RESETTING | AQ_NIC_FLAG_CLOSING | \
- AQ_NIC_FLAG_ERR_UNPLUG | AQ_NIC_FLAG_ERR_HW)
-
-#define AQ_NIC_FLAGS_IS_NOT_TX_READY (AQ_NIC_FLAGS_IS_NOT_READY | \
- AQ_NIC_LINK_DOWN)
-
-#endif /* AQ_NIC_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 58c29d04b186..78ef7d2deffe 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -9,11 +9,15 @@
/* File aq_pci_func.c: Definition of PCI functions. */
+#include <linux/interrupt.h>
+#include <linux/module.h>
+
#include "aq_pci_func.h"
#include "aq_nic.h"
#include "aq_vec.h"
#include "aq_hw.h"
-#include <linux/interrupt.h>
+#include "hw_atl/hw_atl_a0.h"
+#include "hw_atl/hw_atl_b0.h"
struct aq_pci_func_s {
struct pci_dev *pdev;
@@ -29,10 +33,30 @@ struct aq_pci_func_s {
struct aq_hw_caps_s aq_hw_caps;
};
-struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops,
- struct pci_dev *pdev,
- const struct net_device_ops *ndev_ops,
- const struct ethtool_ops *eth_ops)
+static const struct pci_device_id aq_pci_tbl[] = {
+ { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_0001), },
+ { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D100), },
+ { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D107), },
+ { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D108), },
+ { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D109), },
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
+
+static const struct aq_hw_ops *aq_pci_probe_get_hw_ops_by_id(struct pci_dev *pdev)
+{
+ const struct aq_hw_ops *ops = NULL;
+
+ ops = hw_atl_a0_get_ops_by_id(pdev);
+ if (!ops)
+ ops = hw_atl_b0_get_ops_by_id(pdev);
+
+ return ops;
+}
+
+struct aq_pci_func_s *aq_pci_func_alloc(const struct aq_hw_ops *aq_hw_ops,
+ struct pci_dev *pdev)
{
struct aq_pci_func_s *self = NULL;
int err = 0;
@@ -59,8 +83,7 @@ struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops,
self->ports = self->aq_hw_caps.ports;
for (port = 0; port < self->ports; ++port) {
- struct aq_nic_s *aq_nic = aq_nic_alloc_cold(ndev_ops, eth_ops,
- pdev, self,
+ struct aq_nic_s *aq_nic = aq_nic_alloc_cold(pdev, self,
port, aq_hw_ops);
if (!aq_nic) {
@@ -297,3 +320,65 @@ int aq_pci_func_change_pm_state(struct aq_pci_func_s *self,
err_exit:
return err;
}
+
+static int aq_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_id)
+{
+ const struct aq_hw_ops *aq_hw_ops = NULL;
+ struct aq_pci_func_s *aq_pci_func = NULL;
+ int err = 0;
+
+ err = pci_enable_device(pdev);
+ if (err < 0)
+ goto err_exit;
+ aq_hw_ops = aq_pci_probe_get_hw_ops_by_id(pdev);
+ aq_pci_func = aq_pci_func_alloc(aq_hw_ops, pdev);
+ if (!aq_pci_func) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+ err = aq_pci_func_init(aq_pci_func);
+ if (err < 0)
+ goto err_exit;
+
+err_exit:
+ if (err < 0) {
+ if (aq_pci_func)
+ aq_pci_func_free(aq_pci_func);
+ }
+ return err;
+}
+
+static void aq_pci_remove(struct pci_dev *pdev)
+{
+ struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
+
+ aq_pci_func_deinit(aq_pci_func);
+ aq_pci_func_free(aq_pci_func);
+}
+
+static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg)
+{
+ struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
+
+ return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg);
+}
+
+static int aq_pci_resume(struct pci_dev *pdev)
+{
+ struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
+ pm_message_t pm_msg = PMSG_RESTORE;
+
+ return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg);
+}
+
+static struct pci_driver aq_pci_ops = {
+ .name = AQ_CFG_DRV_NAME,
+ .id_table = aq_pci_tbl,
+ .probe = aq_pci_probe,
+ .remove = aq_pci_remove,
+ .suspend = aq_pci_suspend,
+ .resume = aq_pci_resume,
+};
+
+module_pci_driver(aq_pci_ops);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h
index ecb033791203..5f100ea1b0d6 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h
@@ -13,11 +13,10 @@
#define AQ_PCI_FUNC_H
#include "aq_common.h"
+#include "aq_nic.h"
-struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *hw_ops,
- struct pci_dev *pdev,
- const struct net_device_ops *ndev_ops,
- const struct ethtool_ops *eth_ops);
+struct aq_pci_func_s *aq_pci_func_alloc(const struct aq_hw_ops *hw_ops,
+ struct pci_dev *pdev);
int aq_pci_func_init(struct aq_pci_func_s *self);
int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i,
char *name, void *aq_vec,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 519ca6534b85..0be6a11370bb 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -279,10 +279,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
skb_record_rx_queue(skb, self->idx);
- napi_gro_receive(napi, skb);
-
++self->stats.rx.packets;
self->stats.rx.bytes += skb->len;
+
+ napi_gro_receive(napi, skb);
}
err_exit:
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index 5844078764bd..965fae0fb6e0 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -15,6 +15,7 @@
#include "aq_common.h"
struct page;
+struct aq_nic_cfg_s;
/* TxC SOP DX EOP
* +----------+----------+----------+-----------
@@ -105,7 +106,6 @@ union aq_ring_stats_s {
};
struct aq_ring_s {
- struct aq_obj_s header;
struct aq_ring_buff_s *buff_ring;
u8 *dx_ring; /* descriptors ring, dma shared mem */
struct aq_nic_s *aq_nic;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
index e12bcdfb874a..786ea8187c69 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
@@ -14,12 +14,6 @@
#include "aq_common.h"
-#define AQ_DIMOF(_ARY_) ARRAY_SIZE(_ARY_)
-
-struct aq_obj_s {
- atomic_t flags;
-};
-
static inline void aq_utils_obj_set(atomic_t *flags, u32 mask)
{
unsigned long flags_old, flags_new;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index 5fecc9a099ef..f890b8a5a862 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -19,8 +19,7 @@
#include <linux/netdevice.h>
struct aq_vec_s {
- struct aq_obj_s header;
- struct aq_hw_ops *aq_hw_ops;
+ const struct aq_hw_ops *aq_hw_ops;
struct aq_hw_s *aq_hw;
struct aq_nic_s *aq_nic;
unsigned int tx_rings;
@@ -166,7 +165,7 @@ err_exit:
return self;
}
-int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops,
+int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
struct aq_hw_s *aq_hw)
{
struct aq_ring_s *ring = NULL;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
index 6c68b184236c..8bdf60bb3f63 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
@@ -19,6 +19,8 @@
struct aq_hw_s;
struct aq_hw_ops;
+struct aq_nic_s;
+struct aq_nic_cfg_s;
struct aq_ring_stats_rx_s;
struct aq_ring_stats_tx_s;
@@ -26,7 +28,7 @@ irqreturn_t aq_vec_isr(int irq, void *private);
irqreturn_t aq_vec_isr_legacy(int irq, void *private);
struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
struct aq_nic_cfg_s *aq_nic_cfg);
-int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops,
+int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
struct aq_hw_s *aq_hw);
void aq_vec_deinit(struct aq_vec_s *self);
void aq_vec_free(struct aq_vec_s *self);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index f18dce14c93c..4a1c1b96b8b6 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -12,6 +12,7 @@
#include "../aq_hw.h"
#include "../aq_hw_utils.h"
#include "../aq_ring.h"
+#include "../aq_nic.h"
#include "hw_atl_a0.h"
#include "hw_atl_utils.h"
#include "hw_atl_llh.h"
@@ -36,21 +37,20 @@ static int hw_atl_a0_get_hw_caps(struct aq_hw_s *self,
}
static struct aq_hw_s *hw_atl_a0_create(struct aq_pci_func_s *aq_pci_func,
- unsigned int port,
- struct aq_hw_ops *ops)
+ unsigned int port)
{
- struct hw_atl_s *self = NULL;
+ struct aq_hw_s *self = NULL;
self = kzalloc(sizeof(*self), GFP_KERNEL);
if (!self)
goto err_exit;
- self->base.aq_pci_func = aq_pci_func;
+ self->aq_pci_func = aq_pci_func;
- self->base.not_ff_addr = 0x10U;
+ self->not_ff_addr = 0x10U;
err_exit:
- return (struct aq_hw_s *)self;
+ return self;
}
static void hw_atl_a0_destroy(struct aq_hw_s *self)
@@ -62,24 +62,24 @@ static int hw_atl_a0_hw_reset(struct aq_hw_s *self)
{
int err = 0;
- glb_glb_reg_res_dis_set(self, 1U);
- pci_pci_reg_res_dis_set(self, 0U);
- rx_rx_reg_res_dis_set(self, 0U);
- tx_tx_reg_res_dis_set(self, 0U);
+ hw_atl_glb_glb_reg_res_dis_set(self, 1U);
+ hw_atl_pci_pci_reg_res_dis_set(self, 0U);
+ hw_atl_rx_rx_reg_res_dis_set(self, 0U);
+ hw_atl_tx_tx_reg_res_dis_set(self, 0U);
HW_ATL_FLUSH();
- glb_soft_res_set(self, 1);
+ hw_atl_glb_soft_res_set(self, 1);
/* check 10 times by 1ms */
- AQ_HW_WAIT_FOR(glb_soft_res_get(self) == 0, 1000U, 10U);
+ AQ_HW_WAIT_FOR(hw_atl_glb_soft_res_get(self) == 0, 1000U, 10U);
if (err < 0)
goto err_exit;
- itr_irq_reg_res_dis_set(self, 0U);
- itr_res_irq_set(self, 1U);
+ hw_atl_itr_irq_reg_res_dis_set(self, 0U);
+ hw_atl_itr_res_irq_set(self, 1U);
/* check 10 times by 1ms */
- AQ_HW_WAIT_FOR(itr_res_irq_get(self) == 0, 1000U, 10U);
+ AQ_HW_WAIT_FOR(hw_atl_itr_res_irq_get(self) == 0, 1000U, 10U);
if (err < 0)
goto err_exit;
@@ -99,51 +99,53 @@ static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self)
bool is_rx_flow_control = false;
/* TPS Descriptor rate init */
- tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
- tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
+ hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
+ hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
/* TPS VM init */
- tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
+ hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
/* TPS TC credits init */
- tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
- tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
+ hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
+ hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
- tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
- tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
- tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
- tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
+ hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
+ hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
+ hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
+ hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
/* Tx buf size */
buff_size = HW_ATL_A0_TXBUF_MAX;
- tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
- tpb_tx_buff_hi_threshold_per_tc_set(self,
- (buff_size * (1024 / 32U) * 66U) /
- 100U, tc);
- tpb_tx_buff_lo_threshold_per_tc_set(self,
- (buff_size * (1024 / 32U) * 50U) /
- 100U, tc);
+ hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+ hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self,
+ (buff_size *
+ (1024 / 32U) * 66U) /
+ 100U, tc);
+ hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self,
+ (buff_size *
+ (1024 / 32U) * 50U) /
+ 100U, tc);
/* QoS Rx buf size per TC */
tc = 0;
is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
buff_size = HW_ATL_A0_RXBUF_MAX;
- rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
- rpb_rx_buff_hi_threshold_per_tc_set(self,
- (buff_size *
- (1024U / 32U) * 66U) /
- 100U, tc);
- rpb_rx_buff_lo_threshold_per_tc_set(self,
- (buff_size *
- (1024U / 32U) * 50U) /
- 100U, tc);
- rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
+ hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+ hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self,
+ (buff_size *
+ (1024U / 32U) * 66U) /
+ 100U, tc);
+ hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self,
+ (buff_size *
+ (1024U / 32U) * 50U) /
+ 100U, tc);
+ hw_atl_rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
/* QoS 802.1p priority -> TC mapping */
for (i_priority = 8U; i_priority--;)
- rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
+ hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
return aq_hw_err_from_flags(self);
}
@@ -151,20 +153,19 @@ static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self)
static int hw_atl_a0_hw_rss_hash_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params)
{
- struct aq_nic_cfg_s *cfg = NULL;
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
int err = 0;
unsigned int i = 0U;
unsigned int addr = 0U;
- cfg = self->aq_nic_cfg;
-
for (i = 10, addr = 0U; i--; ++addr) {
u32 key_data = cfg->is_rss ?
__swab32(rss_params->hash_secret_key[i]) : 0U;
- rpf_rss_key_wr_data_set(self, key_data);
- rpf_rss_key_addr_set(self, addr);
- rpf_rss_key_wr_en_set(self, 1U);
- AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self) == 0, 1000U, 10U);
+ hw_atl_rpf_rss_key_wr_data_set(self, key_data);
+ hw_atl_rpf_rss_key_addr_set(self, addr);
+ hw_atl_rpf_rss_key_wr_en_set(self, 1U);
+ AQ_HW_WAIT_FOR(hw_atl_rpf_rss_key_wr_en_get(self) == 0,
+ 1000U, 10U);
if (err < 0)
goto err_exit;
}
@@ -193,11 +194,12 @@ static int hw_atl_a0_hw_rss_set(struct aq_hw_s *self,
((i * 3U) & 0xFU));
}
- for (i = AQ_DIMOF(bitary); i--;) {
- rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
- rpf_rss_redir_tbl_addr_set(self, i);
- rpf_rss_redir_wr_en_set(self, 1U);
- AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self) == 0, 1000U, 10U);
+ for (i = ARRAY_SIZE(bitary); i--;) {
+ hw_atl_rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
+ hw_atl_rpf_rss_redir_tbl_addr_set(self, i);
+ hw_atl_rpf_rss_redir_wr_en_set(self, 1U);
+ AQ_HW_WAIT_FOR(hw_atl_rpf_rss_redir_wr_en_get(self) == 0,
+ 1000U, 10U);
if (err < 0)
goto err_exit;
}
@@ -212,35 +214,35 @@ static int hw_atl_a0_hw_offload_set(struct aq_hw_s *self,
struct aq_nic_cfg_s *aq_nic_cfg)
{
/* TX checksums offloads*/
- tpo_ipv4header_crc_offload_en_set(self, 1);
- tpo_tcp_udp_crc_offload_en_set(self, 1);
+ hw_atl_tpo_ipv4header_crc_offload_en_set(self, 1);
+ hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
/* RX checksums offloads*/
- rpo_ipv4header_crc_offload_en_set(self, 1);
- rpo_tcp_udp_crc_offload_en_set(self, 1);
+ hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1);
+ hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1);
/* LSO offloads*/
- tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
+ hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s *self)
{
- thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
- thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
- thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
+ hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
+ hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
+ hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
/* Tx interrupts */
- tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+ hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
0x00010000U : 0x00000000U);
- tdm_tx_dca_en_set(self, 0U);
- tdm_tx_dca_mode_set(self, 0U);
+ hw_atl_tdm_tx_dca_en_set(self, 0U);
+ hw_atl_tdm_tx_dca_mode_set(self, 0U);
- tpb_tx_path_scp_ins_en_set(self, 1U);
+ hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U);
return aq_hw_err_from_flags(self);
}
@@ -251,38 +253,38 @@ static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s *self)
int i;
/* Rx TC/RSS number config */
- rpb_rpf_rx_traf_class_mode_set(self, 1U);
+ hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U);
/* Rx flow control */
- rpb_rx_flow_ctl_mode_set(self, 1U);
+ hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
/* RSS Ring selection */
- reg_rx_flr_rss_control1set(self, cfg->is_rss ?
+ hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ?
0xB3333333U : 0x00000000U);
/* Multicast filters */
for (i = HW_ATL_A0_MAC_MAX; i--;) {
- rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
- rpfl2unicast_flr_act_set(self, 1U, i);
+ hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
+ hw_atl_rpfl2unicast_flr_act_set(self, 1U, i);
}
- reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
- reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
+ hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
+ hw_atl_reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
/* Vlan filters */
- rpf_vlan_outer_etht_set(self, 0x88A8U);
- rpf_vlan_inner_etht_set(self, 0x8100U);
- rpf_vlan_prom_mode_en_set(self, 1);
+ hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
+ hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
+ hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
/* Rx Interrupts */
- rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+ hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
- rpfl2broadcast_flr_act_set(self, 1U);
- rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
+ hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
+ hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
- rdm_rx_dca_en_set(self, 0U);
- rdm_rx_dca_mode_set(self, 0U);
+ hw_atl_rdm_rx_dca_en_set(self, 0U);
+ hw_atl_rdm_rx_dca_mode_set(self, 0U);
return aq_hw_err_from_flags(self);
}
@@ -301,10 +303,10 @@ static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
(mac_addr[4] << 8) | mac_addr[5];
- rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC);
- rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_A0_MAC);
- rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_A0_MAC);
- rpfl2_uc_flr_en_set(self, 1U, HW_ATL_A0_MAC);
+ hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC);
+ hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_A0_MAC);
+ hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_A0_MAC);
+ hw_atl_rpfl2_uc_flr_en_set(self, 1U, HW_ATL_A0_MAC);
err = aq_hw_err_from_flags(self);
@@ -312,9 +314,7 @@ err_exit:
return err;
}
-static int hw_atl_a0_hw_init(struct aq_hw_s *self,
- struct aq_nic_cfg_s *aq_nic_cfg,
- u8 *mac_addr)
+static int hw_atl_a0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
{
static u32 aq_hw_atl_igcr_table_[4][2] = {
{ 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */
@@ -325,10 +325,7 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self,
int err = 0;
- self->aq_nic_cfg = aq_nic_cfg;
-
- hw_atl_utils_hw_chip_features_init(self,
- &PHAL_ATLANTIC_A0->chip_features);
+ struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
hw_atl_a0_hw_init_tx_path(self);
hw_atl_a0_hw_init_rx_path(self);
@@ -337,8 +334,8 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self,
hw_atl_utils_mpi_set(self, MPI_INIT, aq_nic_cfg->link_speed_msk);
- reg_tx_dma_debug_ctl_set(self, 0x800000b8U);
- reg_tx_dma_debug_ctl_set(self, 0x000000b8U);
+ hw_atl_reg_tx_dma_debug_ctl_set(self, 0x800000b8U);
+ hw_atl_reg_tx_dma_debug_ctl_set(self, 0x000000b8U);
hw_atl_a0_hw_qos_set(self);
hw_atl_a0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
@@ -353,19 +350,18 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self,
goto err_exit;
/* Interrupts */
- reg_irq_glb_ctl_set(self,
- aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
- [(aq_nic_cfg->vecs > 1U) ?
- 1 : 0]);
+ hw_atl_reg_irq_glb_ctl_set(self,
+ aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
+ [(aq_nic_cfg->vecs > 1U) ? 1 : 0]);
- itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
+ hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
/* Interrupts */
- reg_gen_irq_map_set(self,
- ((HW_ATL_A0_ERR_INT << 0x18) | (1U << 0x1F)) |
- ((HW_ATL_A0_ERR_INT << 0x10) | (1U << 0x17)) |
- ((HW_ATL_A0_ERR_INT << 8) | (1U << 0xF)) |
- ((HW_ATL_A0_ERR_INT) | (1U << 0x7)), 0U);
+ hw_atl_reg_gen_irq_map_set(self,
+ ((HW_ATL_A0_ERR_INT << 0x18) | (1U << 0x1F)) |
+ ((HW_ATL_A0_ERR_INT << 0x10) | (1U << 0x17)) |
+ ((HW_ATL_A0_ERR_INT << 8) | (1U << 0xF)) |
+ ((HW_ATL_A0_ERR_INT) | (1U << 0x7)), 0U);
hw_atl_a0_hw_offload_set(self, aq_nic_cfg);
@@ -376,28 +372,28 @@ err_exit:
static int hw_atl_a0_hw_ring_tx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- tdm_tx_desc_en_set(self, 1, ring->idx);
+ hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_ring_rx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- rdm_rx_desc_en_set(self, 1, ring->idx);
+ hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_start(struct aq_hw_s *self)
{
- tpb_tx_buff_en_set(self, 1);
- rpb_rx_buff_en_set(self, 1);
+ hw_atl_tpb_tx_buff_en_set(self, 1);
+ hw_atl_rpb_rx_buff_en_set(self, 1);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_tx_ring_tail_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
+ hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
return 0;
}
@@ -483,36 +479,37 @@ static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self,
u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
- rdm_rx_desc_en_set(self, false, aq_ring->idx);
+ hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
- rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
- reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
- aq_ring->idx);
+ hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
+ aq_ring->idx);
- reg_rx_dma_desc_base_addressmswset(self,
- dma_desc_addr_msw, aq_ring->idx);
+ hw_atl_reg_rx_dma_desc_base_addressmswset(self,
+ dma_desc_addr_msw,
+ aq_ring->idx);
- rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+ hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
- rdm_rx_desc_data_buff_size_set(self,
- AQ_CFG_RX_FRAME_MAX / 1024U,
+ hw_atl_rdm_rx_desc_data_buff_size_set(self,
+ AQ_CFG_RX_FRAME_MAX / 1024U,
aq_ring->idx);
- rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
- rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
- rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+ hw_atl_rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
/* Rx ring set mode */
/* Mapping interrupt vector */
- itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
- itr_irq_map_en_rx_set(self, true, aq_ring->idx);
+ hw_atl_itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+ hw_atl_itr_irq_map_en_rx_set(self, true, aq_ring->idx);
- rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
- rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
- rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
- rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+ hw_atl_rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
return aq_hw_err_from_flags(self);
}
@@ -524,25 +521,25 @@ static int hw_atl_a0_hw_ring_tx_init(struct aq_hw_s *self,
u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
- reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
- aq_ring->idx);
+ hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
+ aq_ring->idx);
- reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
- aq_ring->idx);
+ hw_atl_reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
+ aq_ring->idx);
- tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+ hw_atl_tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
hw_atl_a0_hw_tx_ring_tail_update(self, aq_ring);
/* Set Tx threshold */
- tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
+ hw_atl_tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
/* Mapping interrupt vector */
- itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
- itr_irq_map_en_tx_set(self, true, aq_ring->idx);
+ hw_atl_itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+ hw_atl_itr_irq_map_en_tx_set(self, true, aq_ring->idx);
- tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
- tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
+ hw_atl_tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+ hw_atl_tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
return aq_hw_err_from_flags(self);
}
@@ -563,7 +560,7 @@ static int hw_atl_a0_hw_ring_rx_fill(struct aq_hw_s *self,
rxd->hdr_addr = 0U;
}
- reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
+ hw_atl_reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
return aq_hw_err_from_flags(self);
}
@@ -572,13 +569,13 @@ static int hw_atl_a0_hw_ring_tx_head_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
int err = 0;
- unsigned int hw_head_ = tdm_tx_desc_head_ptr_get(self, ring->idx);
+ unsigned int hw_head = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
- if (aq_utils_obj_test(&self->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) {
+ if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
err = -ENXIO;
goto err_exit;
}
- ring->hw_head = hw_head_;
+ ring->hw_head = hw_head;
err = aq_hw_err_from_flags(self);
err_exit:
@@ -602,15 +599,16 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
if (!(rxd_wb->status & 0x5U)) { /* RxD is not done */
if ((1U << 4) &
- reg_rx_dma_desc_status_get(self, ring->idx)) {
- rdm_rx_desc_en_set(self, false, ring->idx);
- rdm_rx_desc_res_set(self, true, ring->idx);
- rdm_rx_desc_res_set(self, false, ring->idx);
- rdm_rx_desc_en_set(self, true, ring->idx);
+ hw_atl_reg_rx_dma_desc_status_get(self, ring->idx)) {
+ hw_atl_rdm_rx_desc_en_set(self, false, ring->idx);
+ hw_atl_rdm_rx_desc_res_set(self, true, ring->idx);
+ hw_atl_rdm_rx_desc_res_set(self, false, ring->idx);
+ hw_atl_rdm_rx_desc_en_set(self, true, ring->idx);
}
if (ring->hw_head ||
- (rdm_rx_desc_head_ptr_get(self, ring->idx) < 2U)) {
+ (hw_atl_rdm_rx_desc_head_ptr_get(self,
+ ring->idx) < 2U)) {
break;
} else if (!(rxd_wb->status & 0x1U)) {
struct hw_atl_rxd_wb_s *rxd_wb1 =
@@ -693,26 +691,25 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
static int hw_atl_a0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
{
- itr_irq_msk_setlsw_set(self, LODWORD(mask) |
+ hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask) |
(1U << HW_ATL_A0_ERR_INT));
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
{
- itr_irq_msk_clearlsw_set(self, LODWORD(mask));
- itr_irq_status_clearlsw_set(self, LODWORD(mask));
-
- if ((1U << 16) & reg_gen_irq_status_get(self))
+ hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask));
+ hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
- atomic_inc(&PHAL_ATLANTIC_A0->dpc);
+ if ((1U << 16) & hw_atl_reg_gen_irq_status_get(self))
+ atomic_inc(&self->dpc);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
{
- *mask = itr_irq_statuslsw_get(self);
+ *mask = hw_atl_itr_irq_statuslsw_get(self);
return aq_hw_err_from_flags(self);
}
@@ -723,18 +720,20 @@ static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self,
{
unsigned int i = 0U;
- rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
- rpfl2multicast_flr_en_set(self, IS_FILTER_ENABLED(IFF_MULTICAST), 0);
- rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
+ hw_atl_rpfl2promiscuous_mode_en_set(self,
+ IS_FILTER_ENABLED(IFF_PROMISC));
+ hw_atl_rpfl2multicast_flr_en_set(self,
+ IS_FILTER_ENABLED(IFF_MULTICAST), 0);
+ hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
self->aq_nic_cfg->is_mc_list_enabled =
IS_FILTER_ENABLED(IFF_MULTICAST);
for (i = HW_ATL_A0_MAC_MIN; i < HW_ATL_A0_MAC_MAX; ++i)
- rpfl2_uc_flr_en_set(self,
- (self->aq_nic_cfg->is_mc_list_enabled &&
- (i <= self->aq_nic_cfg->mc_list_count)) ?
- 1U : 0U, i);
+ hw_atl_rpfl2_uc_flr_en_set(self,
+ (self->aq_nic_cfg->is_mc_list_enabled &&
+ (i <= self->aq_nic_cfg->mc_list_count)) ?
+ 1U : 0U, i);
return aq_hw_err_from_flags(self);
}
@@ -761,17 +760,19 @@ static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self,
u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
(ar_mac[i][4] << 8) | ar_mac[i][5];
- rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC_MIN + i);
+ hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC_MIN + i);
- rpfl2unicast_dest_addresslsw_set(self,
- l, HW_ATL_A0_MAC_MIN + i);
+ hw_atl_rpfl2unicast_dest_addresslsw_set(self,
+ l,
+ HW_ATL_A0_MAC_MIN + i);
- rpfl2unicast_dest_addressmsw_set(self,
- h, HW_ATL_A0_MAC_MIN + i);
+ hw_atl_rpfl2unicast_dest_addressmsw_set(self,
+ h,
+ HW_ATL_A0_MAC_MIN + i);
- rpfl2_uc_flr_en_set(self,
- (self->aq_nic_cfg->is_mc_list_enabled),
- HW_ATL_A0_MAC_MIN + i);
+ hw_atl_rpfl2_uc_flr_en_set(self,
+ (self->aq_nic_cfg->is_mc_list_enabled),
+ HW_ATL_A0_MAC_MIN + i);
}
err = aq_hw_err_from_flags(self);
@@ -823,7 +824,7 @@ static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self)
}
for (i = HW_ATL_A0_RINGS_MAX; i--;)
- reg_irq_thr_set(self, itr_rx, i);
+ hw_atl_reg_irq_thr_set(self, itr_rx, i);
return aq_hw_err_from_flags(self);
}
@@ -837,14 +838,14 @@ static int hw_atl_a0_hw_stop(struct aq_hw_s *self)
static int hw_atl_a0_hw_ring_tx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- tdm_tx_desc_en_set(self, 0U, ring->idx);
+ hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- rdm_rx_desc_en_set(self, 0U, ring->idx);
+ hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
return aq_hw_err_from_flags(self);
}
@@ -860,7 +861,7 @@ err_exit:
return err;
}
-static struct aq_hw_ops hw_atl_ops_ = {
+static const struct aq_hw_ops hw_atl_ops_ = {
.create = hw_atl_a0_create,
.destroy = hw_atl_a0_destroy,
.get_hw_caps = hw_atl_a0_get_hw_caps,
@@ -903,7 +904,7 @@ static struct aq_hw_ops hw_atl_ops_ = {
.hw_get_fw_version = hw_atl_utils_get_fw_version,
};
-struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev)
+const struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev)
{
bool is_vid_ok = (pdev->vendor == PCI_VENDOR_ID_AQUANTIA);
bool is_did_ok = ((pdev->device == HW_ATL_DEVICE_ID_0001) ||
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h
index 6e1d527954c9..4fdd51b67097 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h
@@ -29,6 +29,6 @@
#endif
-struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev);
+const struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev);
#endif /* HW_ATL_A0_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
index 0592a0330cf0..7a71330252bd 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
@@ -88,37 +88,6 @@
#define HW_ATL_A0_FW_VER_EXPECTED 0x01050006U
-/* Hardware tx descriptor */
-struct __packed hw_atl_txd_s {
- u64 buf_addr;
- u32 ctl;
- u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */
-};
-
-/* Hardware tx context descriptor */
-struct __packed hw_atl_txc_s {
- u32 rsvd;
- u32 len;
- u32 ctl;
- u32 len2;
-};
-
-/* Hardware rx descriptor */
-struct __packed hw_atl_rxd_s {
- u64 buf_addr;
- u64 hdr_addr;
-};
-
-/* Hardware rx descriptor writeback */
-struct __packed hw_atl_rxd_wb_s {
- u32 type;
- u32 rss_hash;
- u16 status;
- u16 pkt_len;
- u16 next_desc_ptr;
- u16 vlan;
-};
-
/* HW layer capabilities */
static struct aq_hw_caps_s hw_atl_a0_hw_caps_ = {
.ports = 1U,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index e4a22ce7bf09..0b090161ed79 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -12,6 +12,7 @@
#include "../aq_hw.h"
#include "../aq_hw_utils.h"
#include "../aq_ring.h"
+#include "../aq_nic.h"
#include "hw_atl_b0.h"
#include "hw_atl_utils.h"
#include "hw_atl_llh.h"
@@ -37,21 +38,20 @@ static int hw_atl_b0_get_hw_caps(struct aq_hw_s *self,
}
static struct aq_hw_s *hw_atl_b0_create(struct aq_pci_func_s *aq_pci_func,
- unsigned int port,
- struct aq_hw_ops *ops)
+ unsigned int port)
{
- struct hw_atl_s *self = NULL;
+ struct aq_hw_s *self = NULL;
self = kzalloc(sizeof(*self), GFP_KERNEL);
if (!self)
goto err_exit;
- self->base.aq_pci_func = aq_pci_func;
+ self->aq_pci_func = aq_pci_func;
- self->base.not_ff_addr = 0x10U;
+ self->not_ff_addr = 0x10U;
err_exit:
- return (struct aq_hw_s *)self;
+ return self;
}
static void hw_atl_b0_destroy(struct aq_hw_s *self)
@@ -63,24 +63,24 @@ static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
{
int err = 0;
- glb_glb_reg_res_dis_set(self, 1U);
- pci_pci_reg_res_dis_set(self, 0U);
- rx_rx_reg_res_dis_set(self, 0U);
- tx_tx_reg_res_dis_set(self, 0U);
+ hw_atl_glb_glb_reg_res_dis_set(self, 1U);
+ hw_atl_pci_pci_reg_res_dis_set(self, 0U);
+ hw_atl_rx_rx_reg_res_dis_set(self, 0U);
+ hw_atl_tx_tx_reg_res_dis_set(self, 0U);
HW_ATL_FLUSH();
- glb_soft_res_set(self, 1);
+ hw_atl_glb_soft_res_set(self, 1);
/* check 10 times by 1ms */
- AQ_HW_WAIT_FOR(glb_soft_res_get(self) == 0, 1000U, 10U);
+ AQ_HW_WAIT_FOR(hw_atl_glb_soft_res_get(self) == 0, 1000U, 10U);
if (err < 0)
goto err_exit;
- itr_irq_reg_res_dis_set(self, 0U);
- itr_res_irq_set(self, 1U);
+ hw_atl_itr_irq_reg_res_dis_set(self, 0U);
+ hw_atl_itr_res_irq_set(self, 1U);
/* check 10 times by 1ms */
- AQ_HW_WAIT_FOR(itr_res_irq_get(self) == 0, 1000U, 10U);
+ AQ_HW_WAIT_FOR(hw_atl_itr_res_irq_get(self) == 0, 1000U, 10U);
if (err < 0)
goto err_exit;
@@ -100,51 +100,53 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
bool is_rx_flow_control = false;
/* TPS Descriptor rate init */
- tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
- tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
+ hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
+ hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
/* TPS VM init */
- tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
+ hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
/* TPS TC credits init */
- tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
- tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
+ hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
+ hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
- tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
- tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
- tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
- tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
+ hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
+ hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
+ hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
+ hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
/* Tx buf size */
buff_size = HW_ATL_B0_TXBUF_MAX;
- tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
- tpb_tx_buff_hi_threshold_per_tc_set(self,
- (buff_size * (1024 / 32U) * 66U) /
- 100U, tc);
- tpb_tx_buff_lo_threshold_per_tc_set(self,
- (buff_size * (1024 / 32U) * 50U) /
- 100U, tc);
+ hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+ hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self,
+ (buff_size *
+ (1024 / 32U) * 66U) /
+ 100U, tc);
+ hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self,
+ (buff_size *
+ (1024 / 32U) * 50U) /
+ 100U, tc);
/* QoS Rx buf size per TC */
tc = 0;
is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
buff_size = HW_ATL_B0_RXBUF_MAX;
- rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
- rpb_rx_buff_hi_threshold_per_tc_set(self,
- (buff_size *
- (1024U / 32U) * 66U) /
- 100U, tc);
- rpb_rx_buff_lo_threshold_per_tc_set(self,
- (buff_size *
- (1024U / 32U) * 50U) /
- 100U, tc);
- rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
+ hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+ hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self,
+ (buff_size *
+ (1024U / 32U) * 66U) /
+ 100U, tc);
+ hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self,
+ (buff_size *
+ (1024U / 32U) * 50U) /
+ 100U, tc);
+ hw_atl_rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
/* QoS 802.1p priority -> TC mapping */
for (i_priority = 8U; i_priority--;)
- rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
+ hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
return aq_hw_err_from_flags(self);
}
@@ -152,20 +154,19 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params)
{
- struct aq_nic_cfg_s *cfg = NULL;
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
int err = 0;
unsigned int i = 0U;
unsigned int addr = 0U;
- cfg = self->aq_nic_cfg;
-
for (i = 10, addr = 0U; i--; ++addr) {
u32 key_data = cfg->is_rss ?
__swab32(rss_params->hash_secret_key[i]) : 0U;
- rpf_rss_key_wr_data_set(self, key_data);
- rpf_rss_key_addr_set(self, addr);
- rpf_rss_key_wr_en_set(self, 1U);
- AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self) == 0, 1000U, 10U);
+ hw_atl_rpf_rss_key_wr_data_set(self, key_data);
+ hw_atl_rpf_rss_key_addr_set(self, addr);
+ hw_atl_rpf_rss_key_wr_en_set(self, 1U);
+ AQ_HW_WAIT_FOR(hw_atl_rpf_rss_key_wr_en_get(self) == 0,
+ 1000U, 10U);
if (err < 0)
goto err_exit;
}
@@ -194,11 +195,12 @@ static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
((i * 3U) & 0xFU));
}
- for (i = AQ_DIMOF(bitary); i--;) {
- rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
- rpf_rss_redir_tbl_addr_set(self, i);
- rpf_rss_redir_wr_en_set(self, 1U);
- AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self) == 0, 1000U, 10U);
+ for (i = ARRAY_SIZE(bitary); i--;) {
+ hw_atl_rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
+ hw_atl_rpf_rss_redir_tbl_addr_set(self, i);
+ hw_atl_rpf_rss_redir_wr_en_set(self, 1U);
+ AQ_HW_WAIT_FOR(hw_atl_rpf_rss_redir_wr_en_get(self) == 0,
+ 1000U, 10U);
if (err < 0)
goto err_exit;
}
@@ -215,15 +217,15 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
unsigned int i;
/* TX checksums offloads*/
- tpo_ipv4header_crc_offload_en_set(self, 1);
- tpo_tcp_udp_crc_offload_en_set(self, 1);
+ hw_atl_tpo_ipv4header_crc_offload_en_set(self, 1);
+ hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
/* RX checksums offloads*/
- rpo_ipv4header_crc_offload_en_set(self, 1);
- rpo_tcp_udp_crc_offload_en_set(self, 1);
+ hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1);
+ hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1);
/* LSO offloads*/
- tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
+ hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
/* LRO offloads */
{
@@ -232,43 +234,44 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0));
for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++)
- rpo_lro_max_num_of_descriptors_set(self, val, i);
+ hw_atl_rpo_lro_max_num_of_descriptors_set(self, val, i);
- rpo_lro_time_base_divider_set(self, 0x61AU);
- rpo_lro_inactive_interval_set(self, 0);
- rpo_lro_max_coalescing_interval_set(self, 2);
+ hw_atl_rpo_lro_time_base_divider_set(self, 0x61AU);
+ hw_atl_rpo_lro_inactive_interval_set(self, 0);
+ hw_atl_rpo_lro_max_coalescing_interval_set(self, 2);
- rpo_lro_qsessions_lim_set(self, 1U);
+ hw_atl_rpo_lro_qsessions_lim_set(self, 1U);
- rpo_lro_total_desc_lim_set(self, 2U);
+ hw_atl_rpo_lro_total_desc_lim_set(self, 2U);
- rpo_lro_patch_optimization_en_set(self, 0U);
+ hw_atl_rpo_lro_patch_optimization_en_set(self, 0U);
- rpo_lro_min_pay_of_first_pkt_set(self, 10U);
+ hw_atl_rpo_lro_min_pay_of_first_pkt_set(self, 10U);
- rpo_lro_pkt_lim_set(self, 1U);
+ hw_atl_rpo_lro_pkt_lim_set(self, 1U);
- rpo_lro_en_set(self, aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
+ hw_atl_rpo_lro_en_set(self,
+ aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
}
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
{
- thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
- thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
- thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
+ hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
+ hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
+ hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
/* Tx interrupts */
- tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+ hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
0x00010000U : 0x00000000U);
- tdm_tx_dca_en_set(self, 0U);
- tdm_tx_dca_mode_set(self, 0U);
+ hw_atl_tdm_tx_dca_en_set(self, 0U);
+ hw_atl_tdm_tx_dca_mode_set(self, 0U);
- tpb_tx_path_scp_ins_en_set(self, 1U);
+ hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U);
return aq_hw_err_from_flags(self);
}
@@ -279,55 +282,55 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
int i;
/* Rx TC/RSS number config */
- rpb_rpf_rx_traf_class_mode_set(self, 1U);
+ hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U);
/* Rx flow control */
- rpb_rx_flow_ctl_mode_set(self, 1U);
+ hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
/* RSS Ring selection */
- reg_rx_flr_rss_control1set(self, cfg->is_rss ?
+ hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ?
0xB3333333U : 0x00000000U);
/* Multicast filters */
for (i = HW_ATL_B0_MAC_MAX; i--;) {
- rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
- rpfl2unicast_flr_act_set(self, 1U, i);
+ hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
+ hw_atl_rpfl2unicast_flr_act_set(self, 1U, i);
}
- reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
- reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
+ hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
+ hw_atl_reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
/* Vlan filters */
- rpf_vlan_outer_etht_set(self, 0x88A8U);
- rpf_vlan_inner_etht_set(self, 0x8100U);
+ hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
+ hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
if (cfg->vlan_id) {
- rpf_vlan_flr_act_set(self, 1U, 0U);
- rpf_vlan_id_flr_set(self, 0U, 0U);
- rpf_vlan_flr_en_set(self, 0U, 0U);
+ hw_atl_rpf_vlan_flr_act_set(self, 1U, 0U);
+ hw_atl_rpf_vlan_id_flr_set(self, 0U, 0U);
+ hw_atl_rpf_vlan_flr_en_set(self, 0U, 0U);
- rpf_vlan_accept_untagged_packets_set(self, 1U);
- rpf_vlan_untagged_act_set(self, 1U);
+ hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
+ hw_atl_rpf_vlan_untagged_act_set(self, 1U);
- rpf_vlan_flr_act_set(self, 1U, 1U);
- rpf_vlan_id_flr_set(self, cfg->vlan_id, 0U);
- rpf_vlan_flr_en_set(self, 1U, 1U);
+ hw_atl_rpf_vlan_flr_act_set(self, 1U, 1U);
+ hw_atl_rpf_vlan_id_flr_set(self, cfg->vlan_id, 0U);
+ hw_atl_rpf_vlan_flr_en_set(self, 1U, 1U);
} else {
- rpf_vlan_prom_mode_en_set(self, 1);
+ hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
}
/* Rx Interrupts */
- rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+ hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
aq_hw_write_reg(self, 0x00005040U,
IS_CHIP_FEATURE(RPF2) ? 0x000F0000U : 0x00000000U);
- rpfl2broadcast_flr_act_set(self, 1U);
- rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
+ hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
+ hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
- rdm_rx_dca_en_set(self, 0U);
- rdm_rx_dca_mode_set(self, 0U);
+ hw_atl_rdm_rx_dca_en_set(self, 0U);
+ hw_atl_rdm_rx_dca_mode_set(self, 0U);
return aq_hw_err_from_flags(self);
}
@@ -346,10 +349,10 @@ static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
(mac_addr[4] << 8) | mac_addr[5];
- rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC);
- rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC);
- rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC);
- rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC);
+ hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC);
+ hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC);
+ hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC);
+ hw_atl_rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC);
err = aq_hw_err_from_flags(self);
@@ -357,9 +360,7 @@ err_exit:
return err;
}
-static int hw_atl_b0_hw_init(struct aq_hw_s *self,
- struct aq_nic_cfg_s *aq_nic_cfg,
- u8 *mac_addr)
+static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
{
static u32 aq_hw_atl_igcr_table_[4][2] = {
{ 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */
@@ -371,10 +372,7 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self,
int err = 0;
u32 val;
- self->aq_nic_cfg = aq_nic_cfg;
-
- hw_atl_utils_hw_chip_features_init(self,
- &PHAL_ATLANTIC_B0->chip_features);
+ struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
hw_atl_b0_hw_init_tx_path(self);
hw_atl_b0_hw_init_rx_path(self);
@@ -388,14 +386,15 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self,
hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
/* Force limit MRRS on RDM/TDM to 2K */
- val = aq_hw_read_reg(self, pci_reg_control6_adr);
- aq_hw_write_reg(self, pci_reg_control6_adr, (val & ~0x707) | 0x404);
+ val = aq_hw_read_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR);
+ aq_hw_write_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR,
+ (val & ~0x707) | 0x404);
/* TX DMA total request limit. B0 hardware is not capable to
* handle more than (8K-MRRS) incoming DMA data.
* Value 24 in 256byte units
*/
- aq_hw_write_reg(self, tx_dma_total_req_limit_adr, 24);
+ aq_hw_write_reg(self, HW_ATL_TX_DMA_TOTAL_REQ_LIMIT_ADR, 24);
/* Reset link status and read out initial hardware counters */
self->aq_link_status.mbps = 0;
@@ -406,16 +405,16 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self,
goto err_exit;
/* Interrupts */
- reg_irq_glb_ctl_set(self,
- aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
+ hw_atl_reg_irq_glb_ctl_set(self,
+ aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
[(aq_nic_cfg->vecs > 1U) ?
1 : 0]);
- itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
+ hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
/* Interrupts */
- reg_gen_irq_map_set(self,
- ((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) |
+ hw_atl_reg_gen_irq_map_set(self,
+ ((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) |
((HW_ATL_B0_ERR_INT << 0x10) | (1U << 0x17)), 0U);
hw_atl_b0_hw_offload_set(self, aq_nic_cfg);
@@ -427,28 +426,28 @@ err_exit:
static int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- tdm_tx_desc_en_set(self, 1, ring->idx);
+ hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- rdm_rx_desc_en_set(self, 1, ring->idx);
+ hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_start(struct aq_hw_s *self)
{
- tpb_tx_buff_en_set(self, 1);
- rpb_rx_buff_en_set(self, 1);
+ hw_atl_tpb_tx_buff_en_set(self, 1);
+ hw_atl_rpb_rx_buff_en_set(self, 1);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
+ hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
return 0;
}
@@ -534,36 +533,36 @@ static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self,
u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
- rdm_rx_desc_en_set(self, false, aq_ring->idx);
+ hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
- rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
- reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
- aq_ring->idx);
+ hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
+ aq_ring->idx);
- reg_rx_dma_desc_base_addressmswset(self,
- dma_desc_addr_msw, aq_ring->idx);
+ hw_atl_reg_rx_dma_desc_base_addressmswset(self,
+ dma_desc_addr_msw, aq_ring->idx);
- rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+ hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
- rdm_rx_desc_data_buff_size_set(self,
- AQ_CFG_RX_FRAME_MAX / 1024U,
+ hw_atl_rdm_rx_desc_data_buff_size_set(self,
+ AQ_CFG_RX_FRAME_MAX / 1024U,
aq_ring->idx);
- rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
- rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
- rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+ hw_atl_rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
/* Rx ring set mode */
/* Mapping interrupt vector */
- itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
- itr_irq_map_en_rx_set(self, true, aq_ring->idx);
+ hw_atl_itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+ hw_atl_itr_irq_map_en_rx_set(self, true, aq_ring->idx);
- rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
- rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
- rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
- rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+ hw_atl_rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
return aq_hw_err_from_flags(self);
}
@@ -575,25 +574,25 @@ static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self,
u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
- reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
- aq_ring->idx);
+ hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
+ aq_ring->idx);
- reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
- aq_ring->idx);
+ hw_atl_reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
+ aq_ring->idx);
- tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+ hw_atl_tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
hw_atl_b0_hw_tx_ring_tail_update(self, aq_ring);
/* Set Tx threshold */
- tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
+ hw_atl_tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
/* Mapping interrupt vector */
- itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
- itr_irq_map_en_tx_set(self, true, aq_ring->idx);
+ hw_atl_itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+ hw_atl_itr_irq_map_en_tx_set(self, true, aq_ring->idx);
- tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
- tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
+ hw_atl_tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+ hw_atl_tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
return aq_hw_err_from_flags(self);
}
@@ -614,7 +613,7 @@ static int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self,
rxd->hdr_addr = 0U;
}
- reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
+ hw_atl_reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
return aq_hw_err_from_flags(self);
}
@@ -623,9 +622,9 @@ static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
int err = 0;
- unsigned int hw_head_ = tdm_tx_desc_head_ptr_get(self, ring->idx);
+ unsigned int hw_head_ = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
- if (aq_utils_obj_test(&self->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) {
+ if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
err = -ENXIO;
goto err_exit;
}
@@ -728,22 +727,22 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
static int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
{
- itr_irq_msk_setlsw_set(self, LODWORD(mask));
+ hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask));
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
{
- itr_irq_msk_clearlsw_set(self, LODWORD(mask));
- itr_irq_status_clearlsw_set(self, LODWORD(mask));
+ hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask));
+ hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
- atomic_inc(&PHAL_ATLANTIC_B0->dpc);
+ atomic_inc(&self->dpc);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
{
- *mask = itr_irq_statuslsw_get(self);
+ *mask = hw_atl_itr_irq_statuslsw_get(self);
return aq_hw_err_from_flags(self);
}
@@ -754,20 +753,20 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
{
unsigned int i = 0U;
- rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
- rpfl2multicast_flr_en_set(self,
- IS_FILTER_ENABLED(IFF_MULTICAST), 0);
+ hw_atl_rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
+ hw_atl_rpfl2multicast_flr_en_set(self,
+ IS_FILTER_ENABLED(IFF_MULTICAST), 0);
- rpfl2_accept_all_mc_packets_set(self,
- IS_FILTER_ENABLED(IFF_ALLMULTI));
+ hw_atl_rpfl2_accept_all_mc_packets_set(self,
+ IS_FILTER_ENABLED(IFF_ALLMULTI));
- rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
+ hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
self->aq_nic_cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST);
for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i)
- rpfl2_uc_flr_en_set(self,
- (self->aq_nic_cfg->is_mc_list_enabled &&
+ hw_atl_rpfl2_uc_flr_en_set(self,
+ (self->aq_nic_cfg->is_mc_list_enabled &&
(i <= self->aq_nic_cfg->mc_list_count)) ?
1U : 0U, i);
@@ -796,16 +795,16 @@ static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
(ar_mac[i][4] << 8) | ar_mac[i][5];
- rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i);
+ hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i);
- rpfl2unicast_dest_addresslsw_set(self,
- l, HW_ATL_B0_MAC_MIN + i);
+ hw_atl_rpfl2unicast_dest_addresslsw_set(self,
+ l, HW_ATL_B0_MAC_MIN + i);
- rpfl2unicast_dest_addressmsw_set(self,
- h, HW_ATL_B0_MAC_MIN + i);
+ hw_atl_rpfl2unicast_dest_addressmsw_set(self,
+ h, HW_ATL_B0_MAC_MIN + i);
- rpfl2_uc_flr_en_set(self,
- (self->aq_nic_cfg->is_mc_list_enabled),
+ hw_atl_rpfl2_uc_flr_en_set(self,
+ (self->aq_nic_cfg->is_mc_list_enabled),
HW_ATL_B0_MAC_MIN + i);
}
@@ -824,10 +823,10 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
switch (self->aq_nic_cfg->itr) {
case AQ_CFG_INTERRUPT_MODERATION_ON:
case AQ_CFG_INTERRUPT_MODERATION_AUTO:
- tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
- tdm_tdm_intr_moder_en_set(self, 1U);
- rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
- rdm_rdm_intr_moder_en_set(self, 1U);
+ hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
+ hw_atl_tdm_tdm_intr_moder_en_set(self, 1U);
+ hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
+ hw_atl_rdm_rdm_intr_moder_en_set(self, 1U);
if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) {
/* HW timers are in 2us units */
@@ -887,18 +886,18 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
}
break;
case AQ_CFG_INTERRUPT_MODERATION_OFF:
- tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
- tdm_tdm_intr_moder_en_set(self, 0U);
- rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
- rdm_rdm_intr_moder_en_set(self, 0U);
+ hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+ hw_atl_tdm_tdm_intr_moder_en_set(self, 0U);
+ hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+ hw_atl_rdm_rdm_intr_moder_en_set(self, 0U);
itr_tx = 0U;
itr_rx = 0U;
break;
}
for (i = HW_ATL_B0_RINGS_MAX; i--;) {
- reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
- reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
+ hw_atl_reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
+ hw_atl_reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
}
return aq_hw_err_from_flags(self);
@@ -913,14 +912,14 @@ static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- tdm_tx_desc_en_set(self, 0U, ring->idx);
+ hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- rdm_rx_desc_en_set(self, 0U, ring->idx);
+ hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
return aq_hw_err_from_flags(self);
}
@@ -936,7 +935,7 @@ err_exit:
return err;
}
-static struct aq_hw_ops hw_atl_ops_ = {
+static const struct aq_hw_ops hw_atl_ops_ = {
.create = hw_atl_b0_create,
.destroy = hw_atl_b0_destroy,
.get_hw_caps = hw_atl_b0_get_hw_caps,
@@ -979,7 +978,7 @@ static struct aq_hw_ops hw_atl_ops_ = {
.hw_get_fw_version = hw_atl_utils_get_fw_version,
};
-struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev)
+const struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev)
{
bool is_vid_ok = (pdev->vendor == PCI_VENDOR_ID_AQUANTIA);
bool is_did_ok = ((pdev->device == HW_ATL_DEVICE_ID_0001) ||
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
index a1e1bce6c1f3..3e10969c1df5 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
@@ -29,6 +29,6 @@
#endif
-struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev);
+const struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev);
#endif /* HW_ATL_B0_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
index 9aa2c6edfca2..740ff73c6d67 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
@@ -142,37 +142,6 @@
#define HW_ATL_INTR_MODER_MAX 0x1FF
#define HW_ATL_INTR_MODER_MIN 0xFF
-/* Hardware tx descriptor */
-struct __packed hw_atl_txd_s {
- u64 buf_addr;
- u32 ctl;
- u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */
-};
-
-/* Hardware tx context descriptor */
-struct __packed hw_atl_txc_s {
- u32 rsvd;
- u32 len;
- u32 ctl;
- u32 len2;
-};
-
-/* Hardware rx descriptor */
-struct __packed hw_atl_rxd_s {
- u64 buf_addr;
- u64 hdr_addr;
-};
-
-/* Hardware rx descriptor writeback */
-struct __packed hw_atl_rxd_wb_s {
- u32 type;
- u32 rss_hash;
- u16 status;
- u16 pkt_len;
- u16 next_desc_ptr;
- u16 vlan;
-};
-
/* HW layer capabilities */
static struct aq_hw_caps_s hw_atl_b0_hw_caps_ = {
.ports = 1U,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
index 3de651afa8c7..10ba035dadb1 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -16,111 +16,115 @@
#include "../aq_hw_utils.h"
/* global */
-void reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem, u32 semaphore)
+void hw_atl_reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem,
+ u32 semaphore)
{
- aq_hw_write_reg(aq_hw, glb_cpu_sem_adr(semaphore), glb_cpu_sem);
+ aq_hw_write_reg(aq_hw, HW_ATL_GLB_CPU_SEM_ADR(semaphore), glb_cpu_sem);
}
-u32 reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore)
+u32 hw_atl_reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore)
{
- return aq_hw_read_reg(aq_hw, glb_cpu_sem_adr(semaphore));
+ return aq_hw_read_reg(aq_hw, HW_ATL_GLB_CPU_SEM_ADR(semaphore));
}
-void glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis)
+void hw_atl_glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis)
{
- aq_hw_write_reg_bit(aq_hw, glb_reg_res_dis_adr,
- glb_reg_res_dis_msk,
- glb_reg_res_dis_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_GLB_REG_RES_DIS_ADR,
+ HW_ATL_GLB_REG_RES_DIS_MSK,
+ HW_ATL_GLB_REG_RES_DIS_SHIFT,
glb_reg_res_dis);
}
-void glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res)
+void hw_atl_glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res)
{
- aq_hw_write_reg_bit(aq_hw, glb_soft_res_adr, glb_soft_res_msk,
- glb_soft_res_shift, soft_res);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_GLB_SOFT_RES_ADR,
+ HW_ATL_GLB_SOFT_RES_MSK,
+ HW_ATL_GLB_SOFT_RES_SHIFT, soft_res);
}
-u32 glb_soft_res_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_glb_soft_res_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg_bit(aq_hw, glb_soft_res_adr,
- glb_soft_res_msk,
- glb_soft_res_shift);
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_GLB_SOFT_RES_ADR,
+ HW_ATL_GLB_SOFT_RES_MSK,
+ HW_ATL_GLB_SOFT_RES_SHIFT);
}
-u32 reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw)
+u32 hw_atl_reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, rx_dma_stat_counter7_adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_RX_DMA_STAT_COUNTER7_ADR);
}
-u32 reg_glb_mif_id_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_reg_glb_mif_id_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, glb_mif_id_adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MIF_ID_ADR);
}
/* stats */
-u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, rpb_rx_dma_drop_pkt_cnt_adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_RPB_RX_DMA_DROP_PKT_CNT_ADR);
}
-u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, stats_rx_dma_good_octet_counterlsw__adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERLSW);
}
-u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, stats_rx_dma_good_pkt_counterlsw__adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERLSW);
}
-u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, stats_tx_dma_good_octet_counterlsw__adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERLSW);
}
-u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, stats_tx_dma_good_pkt_counterlsw__adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERLSW);
}
-u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, stats_rx_dma_good_octet_countermsw__adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERMSW);
}
-u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, stats_rx_dma_good_pkt_countermsw__adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERMSW);
}
-u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, stats_tx_dma_good_octet_countermsw__adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERMSW);
}
-u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, stats_tx_dma_good_pkt_countermsw__adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERMSW);
}
/* interrupt */
-void itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, u32 irq_auto_masklsw)
+void hw_atl_itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_auto_masklsw)
{
- aq_hw_write_reg(aq_hw, itr_iamrlsw_adr, irq_auto_masklsw);
+ aq_hw_write_reg(aq_hw, HW_ATL_ITR_IAMRLSW_ADR, irq_auto_masklsw);
}
-void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx)
+void hw_atl_itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx,
+ u32 rx)
{
/* register address for bitfield imr_rx{r}_en */
static u32 itr_imr_rxren_adr[32] = {
0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
- 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+ 0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU,
0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
- 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+ 0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU,
0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
- 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+ 0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU,
0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
- 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+ 0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU
};
/* bitmask for bitfield imr_rx{r}_en */
@@ -149,18 +153,19 @@ void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx)
irq_map_en_rx);
}
-void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx)
+void hw_atl_itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx,
+ u32 tx)
{
/* register address for bitfield imr_tx{t}_en */
static u32 itr_imr_txten_adr[32] = {
0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
- 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+ 0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU,
0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
- 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+ 0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU,
0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
- 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+ 0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU,
0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
- 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+ 0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU
};
/* bitmask for bitfield imr_tx{t}_en */
@@ -189,30 +194,30 @@ void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx)
irq_map_en_tx);
}
-void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx)
+void hw_atl_itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx)
{
/* register address for bitfield imr_rx{r}[4:0] */
static u32 itr_imr_rxr_adr[32] = {
0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
- 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+ 0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU,
0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
- 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+ 0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU,
0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
- 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+ 0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU,
0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
- 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+ 0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU
};
/* bitmask for bitfield imr_rx{r}[4:0] */
static u32 itr_imr_rxr_msk[32] = {
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU
};
/* lower bit position of bitfield imr_rx{r}[4:0] */
@@ -229,30 +234,30 @@ void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx)
irq_map_rx);
}
-void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx)
+void hw_atl_itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx)
{
/* register address for bitfield imr_tx{t}[4:0] */
static u32 itr_imr_txt_adr[32] = {
0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
- 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+ 0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU,
0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
- 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+ 0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU,
0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
- 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+ 0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU,
0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
- 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+ 0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU
};
/* bitmask for bitfield imr_tx{t}[4:0] */
static u32 itr_imr_txt_msk[32] = {
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U
};
/* lower bit position of bitfield imr_tx{t}[4:0] */
@@ -269,429 +274,463 @@ void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx)
irq_map_tx);
}
-void itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_clearlsw)
+void hw_atl_itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_msk_clearlsw)
{
- aq_hw_write_reg(aq_hw, itr_imcrlsw_adr, irq_msk_clearlsw);
+ aq_hw_write_reg(aq_hw, HW_ATL_ITR_IMCRLSW_ADR, irq_msk_clearlsw);
}
-void itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw)
+void hw_atl_itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw)
{
- aq_hw_write_reg(aq_hw, itr_imsrlsw_adr, irq_msk_setlsw);
+ aq_hw_write_reg(aq_hw, HW_ATL_ITR_IMSRLSW_ADR, irq_msk_setlsw);
}
-void itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis)
+void hw_atl_itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis)
{
- aq_hw_write_reg_bit(aq_hw, itr_reg_res_dsbl_adr,
- itr_reg_res_dsbl_msk,
- itr_reg_res_dsbl_shift, irq_reg_res_dis);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_ITR_REG_RES_DSBL_ADR,
+ HW_ATL_ITR_REG_RES_DSBL_MSK,
+ HW_ATL_ITR_REG_RES_DSBL_SHIFT, irq_reg_res_dis);
}
-void itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
- u32 irq_status_clearlsw)
+void hw_atl_itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_status_clearlsw)
{
- aq_hw_write_reg(aq_hw, itr_iscrlsw_adr, irq_status_clearlsw);
+ aq_hw_write_reg(aq_hw, HW_ATL_ITR_ISCRLSW_ADR, irq_status_clearlsw);
}
-u32 itr_irq_statuslsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_itr_irq_statuslsw_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, itr_isrlsw_adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_ITR_ISRLSW_ADR);
}
-u32 itr_res_irq_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_itr_res_irq_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg_bit(aq_hw, itr_res_adr, itr_res_msk,
- itr_res_shift);
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_ITR_RES_ADR, HW_ATL_ITR_RES_MSK,
+ HW_ATL_ITR_RES_SHIFT);
}
-void itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq)
+void hw_atl_itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq)
{
- aq_hw_write_reg_bit(aq_hw, itr_res_adr, itr_res_msk,
- itr_res_shift, res_irq);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_ITR_RES_ADR, HW_ATL_ITR_RES_MSK,
+ HW_ATL_ITR_RES_SHIFT, res_irq);
}
/* rdm */
-void rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
+void hw_atl_rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
{
- aq_hw_write_reg_bit(aq_hw, rdm_dcadcpuid_adr(dca),
- rdm_dcadcpuid_msk,
- rdm_dcadcpuid_shift, cpuid);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADCPUID_ADR(dca),
+ HW_ATL_RDM_DCADCPUID_MSK,
+ HW_ATL_RDM_DCADCPUID_SHIFT, cpuid);
}
-void rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en)
+void hw_atl_rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en)
{
- aq_hw_write_reg_bit(aq_hw, rdm_dca_en_adr, rdm_dca_en_msk,
- rdm_dca_en_shift, rx_dca_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCA_EN_ADR, HW_ATL_RDM_DCA_EN_MSK,
+ HW_ATL_RDM_DCA_EN_SHIFT, rx_dca_en);
}
-void rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode)
+void hw_atl_rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode)
{
- aq_hw_write_reg_bit(aq_hw, rdm_dca_mode_adr, rdm_dca_mode_msk,
- rdm_dca_mode_shift, rx_dca_mode);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCA_MODE_ADR,
+ HW_ATL_RDM_DCA_MODE_MSK,
+ HW_ATL_RDM_DCA_MODE_SHIFT, rx_dca_mode);
}
-void rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_data_buff_size, u32 descriptor)
+void hw_atl_rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_data_buff_size,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, rdm_descddata_size_adr(descriptor),
- rdm_descddata_size_msk,
- rdm_descddata_size_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDDATA_SIZE_ADR(descriptor),
+ HW_ATL_RDM_DESCDDATA_SIZE_MSK,
+ HW_ATL_RDM_DESCDDATA_SIZE_SHIFT,
rx_desc_data_buff_size);
}
-void rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en, u32 dca)
+void hw_atl_rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en,
+ u32 dca)
{
- aq_hw_write_reg_bit(aq_hw, rdm_dcaddesc_en_adr(dca),
- rdm_dcaddesc_en_msk,
- rdm_dcaddesc_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADDESC_EN_ADR(dca),
+ HW_ATL_RDM_DCADDESC_EN_MSK,
+ HW_ATL_RDM_DCADDESC_EN_SHIFT,
rx_desc_dca_en);
}
-void rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en, u32 descriptor)
+void hw_atl_rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, rdm_descden_adr(descriptor),
- rdm_descden_msk,
- rdm_descden_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDEN_ADR(descriptor),
+ HW_ATL_RDM_DESCDEN_MSK,
+ HW_ATL_RDM_DESCDEN_SHIFT,
rx_desc_en);
}
-void rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_head_buff_size, u32 descriptor)
+void hw_atl_rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_head_buff_size,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, rdm_descdhdr_size_adr(descriptor),
- rdm_descdhdr_size_msk,
- rdm_descdhdr_size_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDHDR_SIZE_ADR(descriptor),
+ HW_ATL_RDM_DESCDHDR_SIZE_MSK,
+ HW_ATL_RDM_DESCDHDR_SIZE_SHIFT,
rx_desc_head_buff_size);
}
-void rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_head_splitting, u32 descriptor)
+void hw_atl_rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_head_splitting,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, rdm_descdhdr_split_adr(descriptor),
- rdm_descdhdr_split_msk,
- rdm_descdhdr_split_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDHDR_SPLIT_ADR(descriptor),
+ HW_ATL_RDM_DESCDHDR_SPLIT_MSK,
+ HW_ATL_RDM_DESCDHDR_SPLIT_SHIFT,
rx_desc_head_splitting);
}
-u32 rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
+u32 hw_atl_rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
{
- return aq_hw_read_reg_bit(aq_hw, rdm_descdhd_adr(descriptor),
- rdm_descdhd_msk, rdm_descdhd_shift);
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_RDM_DESCDHD_ADR(descriptor),
+ HW_ATL_RDM_DESCDHD_MSK,
+ HW_ATL_RDM_DESCDHD_SHIFT);
}
-void rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len, u32 descriptor)
+void hw_atl_rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, rdm_descdlen_adr(descriptor),
- rdm_descdlen_msk, rdm_descdlen_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDLEN_ADR(descriptor),
+ HW_ATL_RDM_DESCDLEN_MSK, HW_ATL_RDM_DESCDLEN_SHIFT,
rx_desc_len);
}
-void rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res, u32 descriptor)
+void hw_atl_rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, rdm_descdreset_adr(descriptor),
- rdm_descdreset_msk, rdm_descdreset_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDRESET_ADR(descriptor),
+ HW_ATL_RDM_DESCDRESET_MSK,
+ HW_ATL_RDM_DESCDRESET_SHIFT,
rx_desc_res);
}
-void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_wr_wb_irq_en)
+void hw_atl_rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_wr_wb_irq_en)
{
- aq_hw_write_reg_bit(aq_hw, rdm_int_desc_wrb_en_adr,
- rdm_int_desc_wrb_en_msk,
- rdm_int_desc_wrb_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_INT_DESC_WRB_EN_ADR,
+ HW_ATL_RDM_INT_DESC_WRB_EN_MSK,
+ HW_ATL_RDM_INT_DESC_WRB_EN_SHIFT,
rx_desc_wr_wb_irq_en);
}
-void rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en, u32 dca)
+void hw_atl_rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en,
+ u32 dca)
{
- aq_hw_write_reg_bit(aq_hw, rdm_dcadhdr_en_adr(dca),
- rdm_dcadhdr_en_msk,
- rdm_dcadhdr_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADHDR_EN_ADR(dca),
+ HW_ATL_RDM_DCADHDR_EN_MSK,
+ HW_ATL_RDM_DCADHDR_EN_SHIFT,
rx_head_dca_en);
}
-void rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, u32 dca)
+void hw_atl_rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en,
+ u32 dca)
{
- aq_hw_write_reg_bit(aq_hw, rdm_dcadpay_en_adr(dca),
- rdm_dcadpay_en_msk, rdm_dcadpay_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADPAY_EN_ADR(dca),
+ HW_ATL_RDM_DCADPAY_EN_MSK,
+ HW_ATL_RDM_DCADPAY_EN_SHIFT,
rx_pld_dca_en);
}
-void rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, u32 rdm_intr_moder_en)
+void hw_atl_rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+ u32 rdm_intr_moder_en)
{
- aq_hw_write_reg_bit(aq_hw, rdm_int_rim_en_adr,
- rdm_int_rim_en_msk,
- rdm_int_rim_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_INT_RIM_EN_ADR,
+ HW_ATL_RDM_INT_RIM_EN_MSK,
+ HW_ATL_RDM_INT_RIM_EN_SHIFT,
rdm_intr_moder_en);
}
/* reg */
-void reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, u32 regidx)
+void hw_atl_reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map,
+ u32 regidx)
{
- aq_hw_write_reg(aq_hw, gen_intr_map_adr(regidx), gen_intr_map);
+ aq_hw_write_reg(aq_hw, HW_ATL_GEN_INTR_MAP_ADR(regidx), gen_intr_map);
}
-u32 reg_gen_irq_status_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_reg_gen_irq_status_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, gen_intr_stat_adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_GEN_INTR_STAT_ADR);
}
-void reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl)
+void hw_atl_reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl)
{
- aq_hw_write_reg(aq_hw, intr_glb_ctl_adr, intr_glb_ctl);
+ aq_hw_write_reg(aq_hw, HW_ATL_INTR_GLB_CTL_ADR, intr_glb_ctl);
}
-void reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle)
+void hw_atl_reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle)
{
- aq_hw_write_reg(aq_hw, intr_thr_adr(throttle), intr_thr);
+ aq_hw_write_reg(aq_hw, HW_ATL_INTR_THR_ADR(throttle), intr_thr);
}
-void reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
- u32 rx_dma_desc_base_addrlsw,
- u32 descriptor)
+void hw_atl_reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_base_addrlsw,
+ u32 descriptor)
{
- aq_hw_write_reg(aq_hw, rx_dma_desc_base_addrlsw_adr(descriptor),
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor),
rx_dma_desc_base_addrlsw);
}
-void reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
- u32 rx_dma_desc_base_addrmsw,
- u32 descriptor)
+void hw_atl_reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_base_addrmsw,
+ u32 descriptor)
{
- aq_hw_write_reg(aq_hw, rx_dma_desc_base_addrmsw_adr(descriptor),
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor),
rx_dma_desc_base_addrmsw);
}
-u32 reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor)
+u32 hw_atl_reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor)
{
- return aq_hw_read_reg(aq_hw, rx_dma_desc_stat_adr(descriptor));
+ return aq_hw_read_reg(aq_hw, HW_ATL_RX_DMA_DESC_STAT_ADR(descriptor));
}
-void reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
- u32 rx_dma_desc_tail_ptr, u32 descriptor)
+void hw_atl_reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_tail_ptr,
+ u32 descriptor)
{
- aq_hw_write_reg(aq_hw, rx_dma_desc_tail_ptr_adr(descriptor),
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_DMA_DESC_TAIL_PTR_ADR(descriptor),
rx_dma_desc_tail_ptr);
}
-void reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr_msk)
+void hw_atl_reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw,
+ u32 rx_flr_mcst_flr_msk)
{
- aq_hw_write_reg(aq_hw, rx_flr_mcst_flr_msk_adr, rx_flr_mcst_flr_msk);
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_MCST_FLR_MSK_ADR,
+ rx_flr_mcst_flr_msk);
}
-void reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
- u32 filter)
+void hw_atl_reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
+ u32 filter)
{
- aq_hw_write_reg(aq_hw, rx_flr_mcst_flr_adr(filter), rx_flr_mcst_flr);
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_MCST_FLR_ADR(filter),
+ rx_flr_mcst_flr);
}
-void reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw, u32 rx_flr_rss_control1)
+void hw_atl_reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw,
+ u32 rx_flr_rss_control1)
{
- aq_hw_write_reg(aq_hw, rx_flr_rss_control1_adr, rx_flr_rss_control1);
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_RSS_CONTROL1_ADR,
+ rx_flr_rss_control1);
}
-void reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_filter_control2)
+void hw_atl_reg_rx_flr_control2_set(struct aq_hw_s *aq_hw,
+ u32 rx_filter_control2)
{
- aq_hw_write_reg(aq_hw, rx_flr_control2_adr, rx_filter_control2);
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_CONTROL2_ADR, rx_filter_control2);
}
-void reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
- u32 rx_intr_moderation_ctl,
- u32 queue)
+void hw_atl_reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 rx_intr_moderation_ctl,
+ u32 queue)
{
- aq_hw_write_reg(aq_hw, rx_intr_moderation_ctl_adr(queue),
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_INTR_MODERATION_CTL_ADR(queue),
rx_intr_moderation_ctl);
}
-void reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, u32 tx_dma_debug_ctl)
+void hw_atl_reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_debug_ctl)
{
- aq_hw_write_reg(aq_hw, tx_dma_debug_ctl_adr, tx_dma_debug_ctl);
+ aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DEBUG_CTL_ADR, tx_dma_debug_ctl);
}
-void reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
- u32 tx_dma_desc_base_addrlsw,
- u32 descriptor)
+void hw_atl_reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_base_addrlsw,
+ u32 descriptor)
{
- aq_hw_write_reg(aq_hw, tx_dma_desc_base_addrlsw_adr(descriptor),
+ aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor),
tx_dma_desc_base_addrlsw);
}
-void reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
- u32 tx_dma_desc_base_addrmsw,
- u32 descriptor)
+void hw_atl_reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_base_addrmsw,
+ u32 descriptor)
{
- aq_hw_write_reg(aq_hw, tx_dma_desc_base_addrmsw_adr(descriptor),
+ aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor),
tx_dma_desc_base_addrmsw);
}
-void reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
- u32 tx_dma_desc_tail_ptr, u32 descriptor)
+void hw_atl_reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_tail_ptr,
+ u32 descriptor)
{
- aq_hw_write_reg(aq_hw, tx_dma_desc_tail_ptr_adr(descriptor),
+ aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DESC_TAIL_PTR_ADR(descriptor),
tx_dma_desc_tail_ptr);
}
-void reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
- u32 tx_intr_moderation_ctl,
- u32 queue)
+void hw_atl_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 tx_intr_moderation_ctl,
+ u32 queue)
{
- aq_hw_write_reg(aq_hw, tx_intr_moderation_ctl_adr(queue),
+ aq_hw_write_reg(aq_hw, HW_ATL_TX_INTR_MODERATION_CTL_ADR(queue),
tx_intr_moderation_ctl);
}
/* RPB: rx packet buffer */
-void rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk)
+void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk)
{
- aq_hw_write_reg_bit(aq_hw, rpb_dma_sys_lbk_adr,
- rpb_dma_sys_lbk_msk,
- rpb_dma_sys_lbk_shift, dma_sys_lbk);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_DMA_SYS_LBK_ADR,
+ HW_ATL_RPB_DMA_SYS_LBK_MSK,
+ HW_ATL_RPB_DMA_SYS_LBK_SHIFT, dma_sys_lbk);
}
-void rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
- u32 rx_traf_class_mode)
+void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
+ u32 rx_traf_class_mode)
{
- aq_hw_write_reg_bit(aq_hw, rpb_rpf_rx_tc_mode_adr,
- rpb_rpf_rx_tc_mode_msk,
- rpb_rpf_rx_tc_mode_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RPF_RX_TC_MODE_ADR,
+ HW_ATL_RPB_RPF_RX_TC_MODE_MSK,
+ HW_ATL_RPB_RPF_RX_TC_MODE_SHIFT,
rx_traf_class_mode);
}
-void rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en)
+void hw_atl_rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en)
{
- aq_hw_write_reg_bit(aq_hw, rpb_rx_buf_en_adr, rpb_rx_buf_en_msk,
- rpb_rx_buf_en_shift, rx_buff_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RX_BUF_EN_ADR,
+ HW_ATL_RPB_RX_BUF_EN_MSK,
+ HW_ATL_RPB_RX_BUF_EN_SHIFT, rx_buff_en);
}
-void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
- u32 rx_buff_hi_threshold_per_tc,
- u32 buffer)
+void hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_buff_hi_threshold_per_tc,
+ u32 buffer)
{
- aq_hw_write_reg_bit(aq_hw, rpb_rxbhi_thresh_adr(buffer),
- rpb_rxbhi_thresh_msk, rpb_rxbhi_thresh_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBHI_THRESH_ADR(buffer),
+ HW_ATL_RPB_RXBHI_THRESH_MSK,
+ HW_ATL_RPB_RXBHI_THRESH_SHIFT,
rx_buff_hi_threshold_per_tc);
}
-void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
- u32 rx_buff_lo_threshold_per_tc,
- u32 buffer)
+void hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_buff_lo_threshold_per_tc,
+ u32 buffer)
{
- aq_hw_write_reg_bit(aq_hw, rpb_rxblo_thresh_adr(buffer),
- rpb_rxblo_thresh_msk,
- rpb_rxblo_thresh_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBLO_THRESH_ADR(buffer),
+ HW_ATL_RPB_RXBLO_THRESH_MSK,
+ HW_ATL_RPB_RXBLO_THRESH_SHIFT,
rx_buff_lo_threshold_per_tc);
}
-void rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode)
+void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode)
{
- aq_hw_write_reg_bit(aq_hw, rpb_rx_fc_mode_adr,
- rpb_rx_fc_mode_msk,
- rpb_rx_fc_mode_shift, rx_flow_ctl_mode);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RX_FC_MODE_ADR,
+ HW_ATL_RPB_RX_FC_MODE_MSK,
+ HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode);
}
-void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
- u32 rx_pkt_buff_size_per_tc, u32 buffer)
+void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_pkt_buff_size_per_tc, u32 buffer)
{
- aq_hw_write_reg_bit(aq_hw, rpb_rxbbuf_size_adr(buffer),
- rpb_rxbbuf_size_msk, rpb_rxbbuf_size_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBBUF_SIZE_ADR(buffer),
+ HW_ATL_RPB_RXBBUF_SIZE_MSK,
+ HW_ATL_RPB_RXBBUF_SIZE_SHIFT,
rx_pkt_buff_size_per_tc);
}
-void rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
- u32 buffer)
+void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
+ u32 buffer)
{
- aq_hw_write_reg_bit(aq_hw, rpb_rxbxoff_en_adr(buffer),
- rpb_rxbxoff_en_msk, rpb_rxbxoff_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBXOFF_EN_ADR(buffer),
+ HW_ATL_RPB_RXBXOFF_EN_MSK,
+ HW_ATL_RPB_RXBXOFF_EN_SHIFT,
rx_xoff_en_per_tc);
}
/* rpf */
-void rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
- u32 l2broadcast_count_threshold)
+void hw_atl_rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
+ u32 l2broadcast_count_threshold)
{
- aq_hw_write_reg_bit(aq_hw, rpfl2bc_thresh_adr,
- rpfl2bc_thresh_msk,
- rpfl2bc_thresh_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2BC_THRESH_ADR,
+ HW_ATL_RPFL2BC_THRESH_MSK,
+ HW_ATL_RPFL2BC_THRESH_SHIFT,
l2broadcast_count_threshold);
}
-void rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en)
+void hw_atl_rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en)
{
- aq_hw_write_reg_bit(aq_hw, rpfl2bc_en_adr, rpfl2bc_en_msk,
- rpfl2bc_en_shift, l2broadcast_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2BC_EN_ADR, HW_ATL_RPFL2BC_EN_MSK,
+ HW_ATL_RPFL2BC_EN_SHIFT, l2broadcast_en);
}
-void rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2broadcast_flr_act)
+void hw_atl_rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw,
+ u32 l2broadcast_flr_act)
{
- aq_hw_write_reg_bit(aq_hw, rpfl2bc_act_adr, rpfl2bc_act_msk,
- rpfl2bc_act_shift, l2broadcast_flr_act);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2BC_ACT_ADR,
+ HW_ATL_RPFL2BC_ACT_MSK,
+ HW_ATL_RPFL2BC_ACT_SHIFT, l2broadcast_flr_act);
}
-void rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, u32 l2multicast_flr_en,
- u32 filter)
+void hw_atl_rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw,
+ u32 l2multicast_flr_en,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpfl2mc_enf_adr(filter),
- rpfl2mc_enf_msk,
- rpfl2mc_enf_shift, l2multicast_flr_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2MC_ENF_ADR(filter),
+ HW_ATL_RPFL2MC_ENF_MSK,
+ HW_ATL_RPFL2MC_ENF_SHIFT, l2multicast_flr_en);
}
-void rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
- u32 l2promiscuous_mode_en)
+void hw_atl_rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
+ u32 l2promiscuous_mode_en)
{
- aq_hw_write_reg_bit(aq_hw, rpfl2promis_mode_adr,
- rpfl2promis_mode_msk,
- rpfl2promis_mode_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2PROMIS_MODE_ADR,
+ HW_ATL_RPFL2PROMIS_MODE_MSK,
+ HW_ATL_RPFL2PROMIS_MODE_SHIFT,
l2promiscuous_mode_en);
}
-void rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_act,
- u32 filter)
+void hw_atl_rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_flr_act,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpfl2uc_actf_adr(filter),
- rpfl2uc_actf_msk, rpfl2uc_actf_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2UC_ACTF_ADR(filter),
+ HW_ATL_RPFL2UC_ACTF_MSK, HW_ATL_RPFL2UC_ACTF_SHIFT,
l2unicast_flr_act);
}
-void rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
- u32 filter)
+void hw_atl_rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpfl2uc_enf_adr(filter),
- rpfl2uc_enf_msk,
- rpfl2uc_enf_shift, l2unicast_flr_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2UC_ENF_ADR(filter),
+ HW_ATL_RPFL2UC_ENF_MSK,
+ HW_ATL_RPFL2UC_ENF_SHIFT, l2unicast_flr_en);
}
-void rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
- u32 l2unicast_dest_addresslsw,
- u32 filter)
+void hw_atl_rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_dest_addresslsw,
+ u32 filter)
{
- aq_hw_write_reg(aq_hw, rpfl2uc_daflsw_adr(filter),
+ aq_hw_write_reg(aq_hw, HW_ATL_RPFL2UC_DAFLSW_ADR(filter),
l2unicast_dest_addresslsw);
}
-void rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
- u32 l2unicast_dest_addressmsw,
- u32 filter)
+void hw_atl_rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_dest_addressmsw,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpfl2uc_dafmsw_adr(filter),
- rpfl2uc_dafmsw_msk, rpfl2uc_dafmsw_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2UC_DAFMSW_ADR(filter),
+ HW_ATL_RPFL2UC_DAFMSW_MSK,
+ HW_ATL_RPFL2UC_DAFMSW_SHIFT,
l2unicast_dest_addressmsw);
}
-void rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
- u32 l2_accept_all_mc_packets)
+void hw_atl_rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
+ u32 l2_accept_all_mc_packets)
{
- aq_hw_write_reg_bit(aq_hw, rpfl2mc_accept_all_adr,
- rpfl2mc_accept_all_msk,
- rpfl2mc_accept_all_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2MC_ACCEPT_ALL_ADR,
+ HW_ATL_RPFL2MC_ACCEPT_ALL_MSK,
+ HW_ATL_RPFL2MC_ACCEPT_ALL_SHIFT,
l2_accept_all_mc_packets);
}
-void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
- u32 user_priority_tc_map, u32 tc)
+void hw_atl_rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
+ u32 user_priority_tc_map, u32 tc)
{
/* register address for bitfield rx_tc_up{t}[2:0] */
static u32 rpf_rpb_rx_tc_upt_adr[8] = {
- 0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U,
- 0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U
+ 0x000054c4U, 0x000054C4U, 0x000054C4U, 0x000054C4U,
+ 0x000054c4U, 0x000054C4U, 0x000054C4U, 0x000054C4U
};
/* bitmask for bitfield rx_tc_up{t}[2:0] */
@@ -711,273 +750,290 @@ void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
user_priority_tc_map);
}
-void rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr)
+void hw_atl_rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr)
{
- aq_hw_write_reg_bit(aq_hw, rpf_rss_key_addr_adr,
- rpf_rss_key_addr_msk,
- rpf_rss_key_addr_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_KEY_ADDR_ADR,
+ HW_ATL_RPF_RSS_KEY_ADDR_MSK,
+ HW_ATL_RPF_RSS_KEY_ADDR_SHIFT,
rss_key_addr);
}
-void rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data)
+void hw_atl_rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data)
{
- aq_hw_write_reg(aq_hw, rpf_rss_key_wr_data_adr,
+ aq_hw_write_reg(aq_hw, HW_ATL_RPF_RSS_KEY_WR_DATA_ADR,
rss_key_wr_data);
}
-u32 rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg_bit(aq_hw, rpf_rss_key_wr_eni_adr,
- rpf_rss_key_wr_eni_msk,
- rpf_rss_key_wr_eni_shift);
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPF_RSS_KEY_WR_ENI_ADR,
+ HW_ATL_RPF_RSS_KEY_WR_ENI_MSK,
+ HW_ATL_RPF_RSS_KEY_WR_ENI_SHIFT);
}
-void rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en)
+void hw_atl_rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en)
{
- aq_hw_write_reg_bit(aq_hw, rpf_rss_key_wr_eni_adr,
- rpf_rss_key_wr_eni_msk,
- rpf_rss_key_wr_eni_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_KEY_WR_ENI_ADR,
+ HW_ATL_RPF_RSS_KEY_WR_ENI_MSK,
+ HW_ATL_RPF_RSS_KEY_WR_ENI_SHIFT,
rss_key_wr_en);
}
-void rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw, u32 rss_redir_tbl_addr)
+void hw_atl_rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw,
+ u32 rss_redir_tbl_addr)
{
- aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_addr_adr,
- rpf_rss_redir_addr_msk,
- rpf_rss_redir_addr_shift, rss_redir_tbl_addr);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_ADDR_ADR,
+ HW_ATL_RPF_RSS_REDIR_ADDR_MSK,
+ HW_ATL_RPF_RSS_REDIR_ADDR_SHIFT,
+ rss_redir_tbl_addr);
}
-void rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
- u32 rss_redir_tbl_wr_data)
+void hw_atl_rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
+ u32 rss_redir_tbl_wr_data)
{
- aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_wr_data_adr,
- rpf_rss_redir_wr_data_msk,
- rpf_rss_redir_wr_data_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_WR_DATA_ADR,
+ HW_ATL_RPF_RSS_REDIR_WR_DATA_MSK,
+ HW_ATL_RPF_RSS_REDIR_WR_DATA_SHIFT,
rss_redir_tbl_wr_data);
}
-u32 rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg_bit(aq_hw, rpf_rss_redir_wr_eni_adr,
- rpf_rss_redir_wr_eni_msk,
- rpf_rss_redir_wr_eni_shift);
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_WR_ENI_ADR,
+ HW_ATL_RPF_RSS_REDIR_WR_ENI_MSK,
+ HW_ATL_RPF_RSS_REDIR_WR_ENI_SHIFT);
}
-void rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en)
+void hw_atl_rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en)
{
- aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_wr_eni_adr,
- rpf_rss_redir_wr_eni_msk,
- rpf_rss_redir_wr_eni_shift, rss_redir_wr_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_WR_ENI_ADR,
+ HW_ATL_RPF_RSS_REDIR_WR_ENI_MSK,
+ HW_ATL_RPF_RSS_REDIR_WR_ENI_SHIFT, rss_redir_wr_en);
}
-void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw, u32 tpo_to_rpf_sys_lbk)
+void hw_atl_rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw,
+ u32 tpo_to_rpf_sys_lbk)
{
- aq_hw_write_reg_bit(aq_hw, rpf_tpo_rpf_sys_lbk_adr,
- rpf_tpo_rpf_sys_lbk_msk,
- rpf_tpo_rpf_sys_lbk_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_TPO_RPF_SYS_LBK_ADR,
+ HW_ATL_RPF_TPO_RPF_SYS_LBK_MSK,
+ HW_ATL_RPF_TPO_RPF_SYS_LBK_SHIFT,
tpo_to_rpf_sys_lbk);
}
-void rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht)
+void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht)
{
- aq_hw_write_reg_bit(aq_hw, rpf_vl_inner_tpid_adr,
- rpf_vl_inner_tpid_msk,
- rpf_vl_inner_tpid_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_INNER_TPID_ADR,
+ HW_ATL_RPF_VL_INNER_TPID_MSK,
+ HW_ATL_RPF_VL_INNER_TPID_SHIFT,
vlan_inner_etht);
}
-void rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht)
+void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht)
{
- aq_hw_write_reg_bit(aq_hw, rpf_vl_outer_tpid_adr,
- rpf_vl_outer_tpid_msk,
- rpf_vl_outer_tpid_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_OUTER_TPID_ADR,
+ HW_ATL_RPF_VL_OUTER_TPID_MSK,
+ HW_ATL_RPF_VL_OUTER_TPID_SHIFT,
vlan_outer_etht);
}
-void rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, u32 vlan_prom_mode_en)
+void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw,
+ u32 vlan_prom_mode_en)
{
- aq_hw_write_reg_bit(aq_hw, rpf_vl_promis_mode_adr,
- rpf_vl_promis_mode_msk,
- rpf_vl_promis_mode_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_PROMIS_MODE_ADR,
+ HW_ATL_RPF_VL_PROMIS_MODE_MSK,
+ HW_ATL_RPF_VL_PROMIS_MODE_SHIFT,
vlan_prom_mode_en);
}
-void rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
- u32 vlan_accept_untagged_packets)
+void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
+ u32 vlan_acc_untagged_packets)
{
- aq_hw_write_reg_bit(aq_hw, rpf_vl_accept_untagged_mode_adr,
- rpf_vl_accept_untagged_mode_msk,
- rpf_vl_accept_untagged_mode_shift,
- vlan_accept_untagged_packets);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_ADR,
+ HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_MSK,
+ HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_SHIFT,
+ vlan_acc_untagged_packets);
}
-void rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, u32 vlan_untagged_act)
+void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw,
+ u32 vlan_untagged_act)
{
- aq_hw_write_reg_bit(aq_hw, rpf_vl_untagged_act_adr,
- rpf_vl_untagged_act_msk,
- rpf_vl_untagged_act_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_UNTAGGED_ACT_ADR,
+ HW_ATL_RPF_VL_UNTAGGED_ACT_MSK,
+ HW_ATL_RPF_VL_UNTAGGED_ACT_SHIFT,
vlan_untagged_act);
}
-void rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, u32 filter)
+void hw_atl_rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_vl_en_f_adr(filter),
- rpf_vl_en_f_msk,
- rpf_vl_en_f_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_EN_F_ADR(filter),
+ HW_ATL_RPF_VL_EN_F_MSK,
+ HW_ATL_RPF_VL_EN_F_SHIFT,
vlan_flr_en);
}
-void rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_flr_act, u32 filter)
+void hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_flr_act,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_vl_act_f_adr(filter),
- rpf_vl_act_f_msk,
- rpf_vl_act_f_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ACT_F_ADR(filter),
+ HW_ATL_RPF_VL_ACT_F_MSK,
+ HW_ATL_RPF_VL_ACT_F_SHIFT,
vlan_flr_act);
}
-void rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, u32 filter)
+void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_vl_id_f_adr(filter),
- rpf_vl_id_f_msk,
- rpf_vl_id_f_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ID_F_ADR(filter),
+ HW_ATL_RPF_VL_ID_F_MSK,
+ HW_ATL_RPF_VL_ID_F_SHIFT,
vlan_id_flr);
}
-void rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, u32 filter)
+void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_et_enf_adr(filter),
- rpf_et_enf_msk,
- rpf_et_enf_shift, etht_flr_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_ENF_ADR(filter),
+ HW_ATL_RPF_ET_ENF_MSK,
+ HW_ATL_RPF_ET_ENF_SHIFT, etht_flr_en);
}
-void rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
- u32 etht_user_priority_en, u32 filter)
+void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
+ u32 etht_user_priority_en, u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_et_upfen_adr(filter),
- rpf_et_upfen_msk, rpf_et_upfen_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_UPFEN_ADR(filter),
+ HW_ATL_RPF_ET_UPFEN_MSK, HW_ATL_RPF_ET_UPFEN_SHIFT,
etht_user_priority_en);
}
-void rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue_en,
- u32 filter)
+void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw,
+ u32 etht_rx_queue_en,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_et_rxqfen_adr(filter),
- rpf_et_rxqfen_msk, rpf_et_rxqfen_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_RXQFEN_ADR(filter),
+ HW_ATL_RPF_ET_RXQFEN_MSK,
+ HW_ATL_RPF_ET_RXQFEN_SHIFT,
etht_rx_queue_en);
}
-void rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, u32 etht_user_priority,
- u32 filter)
+void hw_atl_rpf_etht_user_priority_set(struct aq_hw_s *aq_hw,
+ u32 etht_user_priority,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_et_upf_adr(filter),
- rpf_et_upf_msk,
- rpf_et_upf_shift, etht_user_priority);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_UPF_ADR(filter),
+ HW_ATL_RPF_ET_UPF_MSK,
+ HW_ATL_RPF_ET_UPF_SHIFT, etht_user_priority);
}
-void rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
- u32 filter)
+void hw_atl_rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_et_rxqf_adr(filter),
- rpf_et_rxqf_msk,
- rpf_et_rxqf_shift, etht_rx_queue);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_RXQF_ADR(filter),
+ HW_ATL_RPF_ET_RXQF_MSK,
+ HW_ATL_RPF_ET_RXQF_SHIFT, etht_rx_queue);
}
-void rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
- u32 filter)
+void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_et_mng_rxqf_adr(filter),
- rpf_et_mng_rxqf_msk, rpf_et_mng_rxqf_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_MNG_RXQF_ADR(filter),
+ HW_ATL_RPF_ET_MNG_RXQF_MSK,
+ HW_ATL_RPF_ET_MNG_RXQF_SHIFT,
etht_mgt_queue);
}
-void rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act, u32 filter)
+void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_et_actf_adr(filter),
- rpf_et_actf_msk,
- rpf_et_actf_shift, etht_flr_act);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_ACTF_ADR(filter),
+ HW_ATL_RPF_ET_ACTF_MSK,
+ HW_ATL_RPF_ET_ACTF_SHIFT, etht_flr_act);
}
-void rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter)
+void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_et_valf_adr(filter),
- rpf_et_valf_msk,
- rpf_et_valf_shift, etht_flr);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_VALF_ADR(filter),
+ HW_ATL_RPF_ET_VALF_MSK,
+ HW_ATL_RPF_ET_VALF_SHIFT, etht_flr);
}
/* RPO: rx packet offload */
-void rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
- u32 ipv4header_crc_offload_en)
+void hw_atl_rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 ipv4header_crc_offload_en)
{
- aq_hw_write_reg_bit(aq_hw, rpo_ipv4chk_en_adr,
- rpo_ipv4chk_en_msk,
- rpo_ipv4chk_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_IPV4CHK_EN_ADR,
+ HW_ATL_RPO_IPV4CHK_EN_MSK,
+ HW_ATL_RPO_IPV4CHK_EN_SHIFT,
ipv4header_crc_offload_en);
}
-void rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_vlan_stripping, u32 descriptor)
+void hw_atl_rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_vlan_stripping,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, rpo_descdvl_strip_adr(descriptor),
- rpo_descdvl_strip_msk,
- rpo_descdvl_strip_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_DESCDVL_STRIP_ADR(descriptor),
+ HW_ATL_RPO_DESCDVL_STRIP_MSK,
+ HW_ATL_RPO_DESCDVL_STRIP_SHIFT,
rx_desc_vlan_stripping);
}
-void rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
- u32 tcp_udp_crc_offload_en)
+void hw_atl_rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 tcp_udp_crc_offload_en)
{
- aq_hw_write_reg_bit(aq_hw, rpol4chk_en_adr, rpol4chk_en_msk,
- rpol4chk_en_shift, tcp_udp_crc_offload_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPOL4CHK_EN_ADR,
+ HW_ATL_RPOL4CHK_EN_MSK,
+ HW_ATL_RPOL4CHK_EN_SHIFT, tcp_udp_crc_offload_en);
}
-void rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en)
+void hw_atl_rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en)
{
- aq_hw_write_reg(aq_hw, rpo_lro_en_adr, lro_en);
+ aq_hw_write_reg(aq_hw, HW_ATL_RPO_LRO_EN_ADR, lro_en);
}
-void rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
- u32 lro_patch_optimization_en)
+void hw_atl_rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
+ u32 lro_patch_optimization_en)
{
- aq_hw_write_reg_bit(aq_hw, rpo_lro_ptopt_en_adr,
- rpo_lro_ptopt_en_msk,
- rpo_lro_ptopt_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_PTOPT_EN_ADR,
+ HW_ATL_RPO_LRO_PTOPT_EN_MSK,
+ HW_ATL_RPO_LRO_PTOPT_EN_SHIFT,
lro_patch_optimization_en);
}
-void rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw,
- u32 lro_qsessions_lim)
+void hw_atl_rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw,
+ u32 lro_qsessions_lim)
{
- aq_hw_write_reg_bit(aq_hw, rpo_lro_qses_lmt_adr,
- rpo_lro_qses_lmt_msk,
- rpo_lro_qses_lmt_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_QSES_LMT_ADR,
+ HW_ATL_RPO_LRO_QSES_LMT_MSK,
+ HW_ATL_RPO_LRO_QSES_LMT_SHIFT,
lro_qsessions_lim);
}
-void rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, u32 lro_total_desc_lim)
+void hw_atl_rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw,
+ u32 lro_total_desc_lim)
{
- aq_hw_write_reg_bit(aq_hw, rpo_lro_tot_dsc_lmt_adr,
- rpo_lro_tot_dsc_lmt_msk,
- rpo_lro_tot_dsc_lmt_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_TOT_DSC_LMT_ADR,
+ HW_ATL_RPO_LRO_TOT_DSC_LMT_MSK,
+ HW_ATL_RPO_LRO_TOT_DSC_LMT_SHIFT,
lro_total_desc_lim);
}
-void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
- u32 lro_min_pld_of_first_pkt)
+void hw_atl_rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lro_min_pld_of_first_pkt)
{
- aq_hw_write_reg_bit(aq_hw, rpo_lro_pkt_min_adr,
- rpo_lro_pkt_min_msk,
- rpo_lro_pkt_min_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_PKT_MIN_ADR,
+ HW_ATL_RPO_LRO_PKT_MIN_MSK,
+ HW_ATL_RPO_LRO_PKT_MIN_SHIFT,
lro_min_pld_of_first_pkt);
}
-void rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_pkt_lim)
+void hw_atl_rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_pkt_lim)
{
- aq_hw_write_reg(aq_hw, rpo_lro_rsc_max_adr, lro_pkt_lim);
+ aq_hw_write_reg(aq_hw, HW_ATL_RPO_LRO_RSC_MAX_ADR, lro_pkt_lim);
}
-void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
- u32 lro_max_number_of_descriptors,
- u32 lro)
+void hw_atl_rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
+ u32 lro_max_number_of_descriptors,
+ u32 lro)
{
/* Register address for bitfield lro{L}_des_max[1:0] */
static u32 rpo_lro_ldes_max_adr[32] = {
@@ -1017,378 +1073,390 @@ void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
lro_max_number_of_descriptors);
}
-void rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
- u32 lro_time_base_divider)
+void hw_atl_rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
+ u32 lro_time_base_divider)
{
- aq_hw_write_reg_bit(aq_hw, rpo_lro_tb_div_adr,
- rpo_lro_tb_div_msk,
- rpo_lro_tb_div_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_TB_DIV_ADR,
+ HW_ATL_RPO_LRO_TB_DIV_MSK,
+ HW_ATL_RPO_LRO_TB_DIV_SHIFT,
lro_time_base_divider);
}
-void rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
- u32 lro_inactive_interval)
+void hw_atl_rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
+ u32 lro_inactive_interval)
{
- aq_hw_write_reg_bit(aq_hw, rpo_lro_ina_ival_adr,
- rpo_lro_ina_ival_msk,
- rpo_lro_ina_ival_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_INA_IVAL_ADR,
+ HW_ATL_RPO_LRO_INA_IVAL_MSK,
+ HW_ATL_RPO_LRO_INA_IVAL_SHIFT,
lro_inactive_interval);
}
-void rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
- u32 lro_max_coalescing_interval)
+void hw_atl_rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
+ u32 lro_max_coal_interval)
{
- aq_hw_write_reg_bit(aq_hw, rpo_lro_max_ival_adr,
- rpo_lro_max_ival_msk,
- rpo_lro_max_ival_shift,
- lro_max_coalescing_interval);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_MAX_IVAL_ADR,
+ HW_ATL_RPO_LRO_MAX_IVAL_MSK,
+ HW_ATL_RPO_LRO_MAX_IVAL_SHIFT,
+ lro_max_coal_interval);
}
/* rx */
-void rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis)
+void hw_atl_rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis)
{
- aq_hw_write_reg_bit(aq_hw, rx_reg_res_dsbl_adr,
- rx_reg_res_dsbl_msk,
- rx_reg_res_dsbl_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RX_REG_RES_DSBL_ADR,
+ HW_ATL_RX_REG_RES_DSBL_MSK,
+ HW_ATL_RX_REG_RES_DSBL_SHIFT,
rx_reg_res_dis);
}
/* tdm */
-void tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
+void hw_atl_tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
{
- aq_hw_write_reg_bit(aq_hw, tdm_dcadcpuid_adr(dca),
- tdm_dcadcpuid_msk,
- tdm_dcadcpuid_shift, cpuid);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCADCPUID_ADR(dca),
+ HW_ATL_TDM_DCADCPUID_MSK,
+ HW_ATL_TDM_DCADCPUID_SHIFT, cpuid);
}
-void tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
- u32 large_send_offload_en)
+void hw_atl_tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 large_send_offload_en)
{
- aq_hw_write_reg(aq_hw, tdm_lso_en_adr, large_send_offload_en);
+ aq_hw_write_reg(aq_hw, HW_ATL_TDM_LSO_EN_ADR, large_send_offload_en);
}
-void tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en)
+void hw_atl_tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en)
{
- aq_hw_write_reg_bit(aq_hw, tdm_dca_en_adr, tdm_dca_en_msk,
- tdm_dca_en_shift, tx_dca_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCA_EN_ADR, HW_ATL_TDM_DCA_EN_MSK,
+ HW_ATL_TDM_DCA_EN_SHIFT, tx_dca_en);
}
-void tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode)
+void hw_atl_tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode)
{
- aq_hw_write_reg_bit(aq_hw, tdm_dca_mode_adr, tdm_dca_mode_msk,
- tdm_dca_mode_shift, tx_dca_mode);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCA_MODE_ADR,
+ HW_ATL_TDM_DCA_MODE_MSK,
+ HW_ATL_TDM_DCA_MODE_SHIFT, tx_dca_mode);
}
-void tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, u32 dca)
+void hw_atl_tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en,
+ u32 dca)
{
- aq_hw_write_reg_bit(aq_hw, tdm_dcaddesc_en_adr(dca),
- tdm_dcaddesc_en_msk, tdm_dcaddesc_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCADDESC_EN_ADR(dca),
+ HW_ATL_TDM_DCADDESC_EN_MSK,
+ HW_ATL_TDM_DCADDESC_EN_SHIFT,
tx_desc_dca_en);
}
-void tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, u32 descriptor)
+void hw_atl_tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, tdm_descden_adr(descriptor),
- tdm_descden_msk,
- tdm_descden_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DESCDEN_ADR(descriptor),
+ HW_ATL_TDM_DESCDEN_MSK,
+ HW_ATL_TDM_DESCDEN_SHIFT,
tx_desc_en);
}
-u32 tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
+u32 hw_atl_tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
{
- return aq_hw_read_reg_bit(aq_hw, tdm_descdhd_adr(descriptor),
- tdm_descdhd_msk, tdm_descdhd_shift);
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_TDM_DESCDHD_ADR(descriptor),
+ HW_ATL_TDM_DESCDHD_MSK,
+ HW_ATL_TDM_DESCDHD_SHIFT);
}
-void tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
- u32 descriptor)
+void hw_atl_tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, tdm_descdlen_adr(descriptor),
- tdm_descdlen_msk,
- tdm_descdlen_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DESCDLEN_ADR(descriptor),
+ HW_ATL_TDM_DESCDLEN_MSK,
+ HW_ATL_TDM_DESCDLEN_SHIFT,
tx_desc_len);
}
-void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
- u32 tx_desc_wr_wb_irq_en)
+void hw_atl_tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_desc_wr_wb_irq_en)
{
- aq_hw_write_reg_bit(aq_hw, tdm_int_desc_wrb_en_adr,
- tdm_int_desc_wrb_en_msk,
- tdm_int_desc_wrb_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_INT_DESC_WRB_EN_ADR,
+ HW_ATL_TDM_INT_DESC_WRB_EN_MSK,
+ HW_ATL_TDM_INT_DESC_WRB_EN_SHIFT,
tx_desc_wr_wb_irq_en);
}
-void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
- u32 tx_desc_wr_wb_threshold,
- u32 descriptor)
+void hw_atl_tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
+ u32 tx_desc_wr_wb_threshold,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, tdm_descdwrb_thresh_adr(descriptor),
- tdm_descdwrb_thresh_msk,
- tdm_descdwrb_thresh_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DESCDWRB_THRESH_ADR(descriptor),
+ HW_ATL_TDM_DESCDWRB_THRESH_MSK,
+ HW_ATL_TDM_DESCDWRB_THRESH_SHIFT,
tx_desc_wr_wb_threshold);
}
-void tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
- u32 tdm_irq_moderation_en)
+void hw_atl_tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+ u32 tdm_irq_moderation_en)
{
- aq_hw_write_reg_bit(aq_hw, tdm_int_mod_en_adr,
- tdm_int_mod_en_msk,
- tdm_int_mod_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_INT_MOD_EN_ADR,
+ HW_ATL_TDM_INT_MOD_EN_MSK,
+ HW_ATL_TDM_INT_MOD_EN_SHIFT,
tdm_irq_moderation_en);
}
/* thm */
-void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
- u32 lso_tcp_flag_of_first_pkt)
+void hw_atl_thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_first_pkt)
{
- aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_first_adr,
- thm_lso_tcp_flag_first_msk,
- thm_lso_tcp_flag_first_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_THM_LSO_TCP_FLAG_FIRST_ADR,
+ HW_ATL_THM_LSO_TCP_FLAG_FIRST_MSK,
+ HW_ATL_THM_LSO_TCP_FLAG_FIRST_SHIFT,
lso_tcp_flag_of_first_pkt);
}
-void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
- u32 lso_tcp_flag_of_last_pkt)
+void hw_atl_thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_last_pkt)
{
- aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_last_adr,
- thm_lso_tcp_flag_last_msk,
- thm_lso_tcp_flag_last_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_THM_LSO_TCP_FLAG_LAST_ADR,
+ HW_ATL_THM_LSO_TCP_FLAG_LAST_MSK,
+ HW_ATL_THM_LSO_TCP_FLAG_LAST_SHIFT,
lso_tcp_flag_of_last_pkt);
}
-void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
- u32 lso_tcp_flag_of_middle_pkt)
+void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_middle_pkt)
{
- aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_mid_adr,
- thm_lso_tcp_flag_mid_msk,
- thm_lso_tcp_flag_mid_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_THM_LSO_TCP_FLAG_MID_ADR,
+ HW_ATL_THM_LSO_TCP_FLAG_MID_MSK,
+ HW_ATL_THM_LSO_TCP_FLAG_MID_SHIFT,
lso_tcp_flag_of_middle_pkt);
}
/* TPB: tx packet buffer */
-void tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en)
+void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en)
{
- aq_hw_write_reg_bit(aq_hw, tpb_tx_buf_en_adr, tpb_tx_buf_en_msk,
- tpb_tx_buf_en_shift, tx_buff_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_BUF_EN_ADR,
+ HW_ATL_TPB_TX_BUF_EN_MSK,
+ HW_ATL_TPB_TX_BUF_EN_SHIFT, tx_buff_en);
}
-void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
- u32 tx_buff_hi_threshold_per_tc,
+void hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_buff_hi_threshold_per_tc,
u32 buffer)
{
- aq_hw_write_reg_bit(aq_hw, tpb_txbhi_thresh_adr(buffer),
- tpb_txbhi_thresh_msk, tpb_txbhi_thresh_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBHI_THRESH_ADR(buffer),
+ HW_ATL_TPB_TXBHI_THRESH_MSK,
+ HW_ATL_TPB_TXBHI_THRESH_SHIFT,
tx_buff_hi_threshold_per_tc);
}
-void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
- u32 tx_buff_lo_threshold_per_tc,
+void hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_buff_lo_threshold_per_tc,
u32 buffer)
{
- aq_hw_write_reg_bit(aq_hw, tpb_txblo_thresh_adr(buffer),
- tpb_txblo_thresh_msk, tpb_txblo_thresh_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBLO_THRESH_ADR(buffer),
+ HW_ATL_TPB_TXBLO_THRESH_MSK,
+ HW_ATL_TPB_TXBLO_THRESH_SHIFT,
tx_buff_lo_threshold_per_tc);
}
-void tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en)
+void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en)
{
- aq_hw_write_reg_bit(aq_hw, tpb_dma_sys_lbk_adr,
- tpb_dma_sys_lbk_msk,
- tpb_dma_sys_lbk_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_DMA_SYS_LBK_ADR,
+ HW_ATL_TPB_DMA_SYS_LBK_MSK,
+ HW_ATL_TPB_DMA_SYS_LBK_SHIFT,
tx_dma_sys_lbk_en);
}
-void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_buff_size_per_tc, u32 buffer)
+void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_buff_size_per_tc, u32 buffer)
{
- aq_hw_write_reg_bit(aq_hw, tpb_txbbuf_size_adr(buffer),
- tpb_txbbuf_size_msk,
- tpb_txbbuf_size_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBBUF_SIZE_ADR(buffer),
+ HW_ATL_TPB_TXBBUF_SIZE_MSK,
+ HW_ATL_TPB_TXBBUF_SIZE_SHIFT,
tx_pkt_buff_size_per_tc);
}
-void tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en)
+void hw_atl_tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en)
{
- aq_hw_write_reg_bit(aq_hw, tpb_tx_scp_ins_en_adr,
- tpb_tx_scp_ins_en_msk,
- tpb_tx_scp_ins_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_SCP_INS_EN_ADR,
+ HW_ATL_TPB_TX_SCP_INS_EN_MSK,
+ HW_ATL_TPB_TX_SCP_INS_EN_SHIFT,
tx_path_scp_ins_en);
}
/* TPO: tx packet offload */
-void tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
- u32 ipv4header_crc_offload_en)
+void hw_atl_tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 ipv4header_crc_offload_en)
{
- aq_hw_write_reg_bit(aq_hw, tpo_ipv4chk_en_adr,
- tpo_ipv4chk_en_msk,
- tpo_ipv4chk_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPO_IPV4CHK_EN_ADR,
+ HW_ATL_TPO_IPV4CHK_EN_MSK,
+ HW_ATL_TPO_IPV4CHK_EN_SHIFT,
ipv4header_crc_offload_en);
}
-void tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
- u32 tcp_udp_crc_offload_en)
+void hw_atl_tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 tcp_udp_crc_offload_en)
{
- aq_hw_write_reg_bit(aq_hw, tpol4chk_en_adr,
- tpol4chk_en_msk,
- tpol4chk_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPOL4CHK_EN_ADR,
+ HW_ATL_TPOL4CHK_EN_MSK,
+ HW_ATL_TPOL4CHK_EN_SHIFT,
tcp_udp_crc_offload_en);
}
-void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_pkt_sys_lbk_en)
+void hw_atl_tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_sys_lbk_en)
{
- aq_hw_write_reg_bit(aq_hw, tpo_pkt_sys_lbk_adr,
- tpo_pkt_sys_lbk_msk,
- tpo_pkt_sys_lbk_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPO_PKT_SYS_LBK_ADR,
+ HW_ATL_TPO_PKT_SYS_LBK_MSK,
+ HW_ATL_TPO_PKT_SYS_LBK_SHIFT,
tx_pkt_sys_lbk_en);
}
/* TPS: tx packet scheduler */
-void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_data_arb_mode)
+void hw_atl_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_data_arb_mode)
{
- aq_hw_write_reg_bit(aq_hw, tps_data_tc_arb_mode_adr,
- tps_data_tc_arb_mode_msk,
- tps_data_tc_arb_mode_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TC_ARB_MODE_ADR,
+ HW_ATL_TPS_DATA_TC_ARB_MODE_MSK,
+ HW_ATL_TPS_DATA_TC_ARB_MODE_SHIFT,
tx_pkt_shed_data_arb_mode);
}
-void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
- u32 curr_time_res)
+void hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
+ u32 curr_time_res)
{
- aq_hw_write_reg_bit(aq_hw, tps_desc_rate_ta_rst_adr,
- tps_desc_rate_ta_rst_msk,
- tps_desc_rate_ta_rst_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_TA_RST_ADR,
+ HW_ATL_TPS_DESC_RATE_TA_RST_MSK,
+ HW_ATL_TPS_DESC_RATE_TA_RST_SHIFT,
curr_time_res);
}
-void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_rate_lim)
+void hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_rate_lim)
{
- aq_hw_write_reg_bit(aq_hw, tps_desc_rate_lim_adr,
- tps_desc_rate_lim_msk,
- tps_desc_rate_lim_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_LIM_ADR,
+ HW_ATL_TPS_DESC_RATE_LIM_MSK,
+ HW_ATL_TPS_DESC_RATE_LIM_SHIFT,
tx_pkt_shed_desc_rate_lim);
}
-void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_tc_arb_mode)
+void hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 arb_mode)
{
- aq_hw_write_reg_bit(aq_hw, tps_desc_tc_arb_mode_adr,
- tps_desc_tc_arb_mode_msk,
- tps_desc_tc_arb_mode_shift,
- tx_pkt_shed_desc_tc_arb_mode);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TC_ARB_MODE_ADR,
+ HW_ATL_TPS_DESC_TC_ARB_MODE_MSK,
+ HW_ATL_TPS_DESC_TC_ARB_MODE_SHIFT,
+ arb_mode);
}
-void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_tc_max_credit,
- u32 tc)
+void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
+ u32 max_credit,
+ u32 tc)
{
- aq_hw_write_reg_bit(aq_hw, tps_desc_tctcredit_max_adr(tc),
- tps_desc_tctcredit_max_msk,
- tps_desc_tctcredit_max_shift,
- tx_pkt_shed_desc_tc_max_credit);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TCTCREDIT_MAX_ADR(tc),
+ HW_ATL_TPS_DESC_TCTCREDIT_MAX_MSK,
+ HW_ATL_TPS_DESC_TCTCREDIT_MAX_SHIFT,
+ max_credit);
}
-void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_tc_weight, u32 tc)
+void hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_tc_weight,
+ u32 tc)
{
- aq_hw_write_reg_bit(aq_hw, tps_desc_tctweight_adr(tc),
- tps_desc_tctweight_msk,
- tps_desc_tctweight_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TCTWEIGHT_ADR(tc),
+ HW_ATL_TPS_DESC_TCTWEIGHT_MSK,
+ HW_ATL_TPS_DESC_TCTWEIGHT_SHIFT,
tx_pkt_shed_desc_tc_weight);
}
-void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_vm_arb_mode)
+void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 arb_mode)
{
- aq_hw_write_reg_bit(aq_hw, tps_desc_vm_arb_mode_adr,
- tps_desc_vm_arb_mode_msk,
- tps_desc_vm_arb_mode_shift,
- tx_pkt_shed_desc_vm_arb_mode);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_VM_ARB_MODE_ADR,
+ HW_ATL_TPS_DESC_VM_ARB_MODE_MSK,
+ HW_ATL_TPS_DESC_VM_ARB_MODE_SHIFT,
+ arb_mode);
}
-void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_tc_data_max_credit,
- u32 tc)
+void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
+ u32 max_credit,
+ u32 tc)
{
- aq_hw_write_reg_bit(aq_hw, tps_data_tctcredit_max_adr(tc),
- tps_data_tctcredit_max_msk,
- tps_data_tctcredit_max_shift,
- tx_pkt_shed_tc_data_max_credit);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TCTCREDIT_MAX_ADR(tc),
+ HW_ATL_TPS_DATA_TCTCREDIT_MAX_MSK,
+ HW_ATL_TPS_DATA_TCTCREDIT_MAX_SHIFT,
+ max_credit);
}
-void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_tc_data_weight, u32 tc)
+void hw_atl_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_tc_data_weight,
+ u32 tc)
{
- aq_hw_write_reg_bit(aq_hw, tps_data_tctweight_adr(tc),
- tps_data_tctweight_msk,
- tps_data_tctweight_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TCTWEIGHT_ADR(tc),
+ HW_ATL_TPS_DATA_TCTWEIGHT_MSK,
+ HW_ATL_TPS_DATA_TCTWEIGHT_SHIFT,
tx_pkt_shed_tc_data_weight);
}
/* tx */
-void tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis)
+void hw_atl_tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis)
{
- aq_hw_write_reg_bit(aq_hw, tx_reg_res_dsbl_adr,
- tx_reg_res_dsbl_msk,
- tx_reg_res_dsbl_shift, tx_reg_res_dis);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TX_REG_RES_DSBL_ADR,
+ HW_ATL_TX_REG_RES_DSBL_MSK,
+ HW_ATL_TX_REG_RES_DSBL_SHIFT, tx_reg_res_dis);
}
/* msm */
-u32 msm_reg_access_status_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_msm_reg_access_status_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg_bit(aq_hw, msm_reg_access_busy_adr,
- msm_reg_access_busy_msk,
- msm_reg_access_busy_shift);
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_MSM_REG_ACCESS_BUSY_ADR,
+ HW_ATL_MSM_REG_ACCESS_BUSY_MSK,
+ HW_ATL_MSM_REG_ACCESS_BUSY_SHIFT);
}
-void msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
- u32 reg_addr_for_indirect_addr)
+void hw_atl_msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
+ u32 reg_addr_for_indirect_addr)
{
- aq_hw_write_reg_bit(aq_hw, msm_reg_addr_adr,
- msm_reg_addr_msk,
- msm_reg_addr_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_ADDR_ADR,
+ HW_ATL_MSM_REG_ADDR_MSK,
+ HW_ATL_MSM_REG_ADDR_SHIFT,
reg_addr_for_indirect_addr);
}
-void msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe)
+void hw_atl_msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe)
{
- aq_hw_write_reg_bit(aq_hw, msm_reg_rd_strobe_adr,
- msm_reg_rd_strobe_msk,
- msm_reg_rd_strobe_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_RD_STROBE_ADR,
+ HW_ATL_MSM_REG_RD_STROBE_MSK,
+ HW_ATL_MSM_REG_RD_STROBE_SHIFT,
reg_rd_strobe);
}
-u32 msm_reg_rd_data_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_msm_reg_rd_data_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, msm_reg_rd_data_adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_MSM_REG_RD_DATA_ADR);
}
-void msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data)
+void hw_atl_msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data)
{
- aq_hw_write_reg(aq_hw, msm_reg_wr_data_adr, reg_wr_data);
+ aq_hw_write_reg(aq_hw, HW_ATL_MSM_REG_WR_DATA_ADR, reg_wr_data);
}
-void msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe)
+void hw_atl_msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe)
{
- aq_hw_write_reg_bit(aq_hw, msm_reg_wr_strobe_adr,
- msm_reg_wr_strobe_msk,
- msm_reg_wr_strobe_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_WR_STROBE_ADR,
+ HW_ATL_MSM_REG_WR_STROBE_MSK,
+ HW_ATL_MSM_REG_WR_STROBE_SHIFT,
reg_wr_strobe);
}
/* pci */
-void pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis)
+void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis)
{
- aq_hw_write_reg_bit(aq_hw, pci_reg_res_dsbl_adr,
- pci_reg_res_dsbl_msk,
- pci_reg_res_dsbl_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_PCI_REG_RES_DSBL_ADR,
+ HW_ATL_PCI_REG_RES_DSBL_MSK,
+ HW_ATL_PCI_REG_RES_DSBL_SHIFT,
pci_reg_res_dis);
}
-void reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw, u32 glb_cpu_scratch_scp,
- u32 scratch_scp)
+void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
+ u32 glb_cpu_scratch_scp,
+ u32 scratch_scp)
{
- aq_hw_write_reg(aq_hw, glb_cpu_scratch_scp_adr(scratch_scp),
+ aq_hw_write_reg(aq_hw, HW_ATL_GLB_CPU_SCRATCH_SCP_ADR(scratch_scp),
glb_cpu_scratch_scp);
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
index ed1085b95adb..dfb426f2dc2c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -21,657 +21,681 @@ struct aq_hw_s;
/* global */
/* set global microprocessor semaphore */
-void reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem,
- u32 semaphore);
+void hw_atl_reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem,
+ u32 semaphore);
/* get global microprocessor semaphore */
-u32 reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore);
+u32 hw_atl_reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore);
/* set global register reset disable */
-void glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis);
+void hw_atl_glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis);
/* set soft reset */
-void glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res);
+void hw_atl_glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res);
/* get soft reset */
-u32 glb_soft_res_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_glb_soft_res_get(struct aq_hw_s *aq_hw);
/* stats */
-u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw);
/* get rx dma good octet counter lsw */
-u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
/* get rx dma good packet counter lsw */
-u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
/* get tx dma good octet counter lsw */
-u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
/* get tx dma good packet counter lsw */
-u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
/* get rx dma good octet counter msw */
-u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
/* get rx dma good packet counter msw */
-u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
/* get tx dma good octet counter msw */
-u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
/* get tx dma good packet counter msw */
-u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
/* get msm rx errors counter register */
-u32 reg_mac_msm_rx_errs_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_rx_errs_cnt_get(struct aq_hw_s *aq_hw);
/* get msm rx unicast frames counter register */
-u32 reg_mac_msm_rx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_rx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
/* get msm rx multicast frames counter register */
-u32 reg_mac_msm_rx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_rx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
/* get msm rx broadcast frames counter register */
-u32 reg_mac_msm_rx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_rx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
/* get msm rx broadcast octets counter register 1 */
-u32 reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
/* get msm rx unicast octets counter register 0 */
-u32 reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
/* get rx dma statistics counter 7 */
-u32 reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw);
/* get msm tx errors counter register */
-u32 reg_mac_msm_tx_errs_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_errs_cnt_get(struct aq_hw_s *aq_hw);
/* get msm tx unicast frames counter register */
-u32 reg_mac_msm_tx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
/* get msm tx multicast frames counter register */
-u32 reg_mac_msm_tx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
/* get msm tx broadcast frames counter register */
-u32 reg_mac_msm_tx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
/* get msm tx multicast octets counter register 1 */
-u32 reg_mac_msm_tx_mcst_octets_counter1get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_mcst_octets_counter1get(struct aq_hw_s *aq_hw);
/* get msm tx broadcast octets counter register 1 */
-u32 reg_mac_msm_tx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
/* get msm tx unicast octets counter register 0 */
-u32 reg_mac_msm_tx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
/* get global mif identification */
-u32 reg_glb_mif_id_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_glb_mif_id_get(struct aq_hw_s *aq_hw);
/* interrupt */
/* set interrupt auto mask lsw */
-void itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, u32 irq_auto_masklsw);
+void hw_atl_itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_auto_masklsw);
/* set interrupt mapping enable rx */
-void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx);
+void hw_atl_itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx,
+ u32 rx);
/* set interrupt mapping enable tx */
-void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx);
+void hw_atl_itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx,
+ u32 tx);
/* set interrupt mapping rx */
-void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx);
+void hw_atl_itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx);
/* set interrupt mapping tx */
-void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx);
+void hw_atl_itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx);
/* set interrupt mask clear lsw */
-void itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_clearlsw);
+void hw_atl_itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_msk_clearlsw);
/* set interrupt mask set lsw */
-void itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw);
+void hw_atl_itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw);
/* set interrupt register reset disable */
-void itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis);
+void hw_atl_itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis);
/* set interrupt status clear lsw */
-void itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
- u32 irq_status_clearlsw);
+void hw_atl_itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_status_clearlsw);
/* get interrupt status lsw */
-u32 itr_irq_statuslsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_itr_irq_statuslsw_get(struct aq_hw_s *aq_hw);
/* get reset interrupt */
-u32 itr_res_irq_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_itr_res_irq_get(struct aq_hw_s *aq_hw);
/* set reset interrupt */
-void itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq);
+void hw_atl_itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq);
/* rdm */
/* set cpu id */
-void rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
+void hw_atl_rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
/* set rx dca enable */
-void rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en);
+void hw_atl_rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en);
/* set rx dca mode */
-void rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode);
+void hw_atl_rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode);
/* set rx descriptor data buffer size */
-void rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_data_buff_size,
+void hw_atl_rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_data_buff_size,
u32 descriptor);
/* set rx descriptor dca enable */
-void rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en,
- u32 dca);
+void hw_atl_rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en,
+ u32 dca);
/* set rx descriptor enable */
-void rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en,
- u32 descriptor);
+void hw_atl_rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en,
+ u32 descriptor);
/* set rx descriptor header splitting */
-void rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_head_splitting,
+void hw_atl_rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_head_splitting,
u32 descriptor);
/* get rx descriptor head pointer */
-u32 rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
+u32 hw_atl_rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
/* set rx descriptor length */
-void rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len,
- u32 descriptor);
+void hw_atl_rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len,
+ u32 descriptor);
/* set rx descriptor write-back interrupt enable */
-void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_wr_wb_irq_en);
+void hw_atl_rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_wr_wb_irq_en);
/* set rx header dca enable */
-void rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en,
- u32 dca);
+void hw_atl_rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en,
+ u32 dca);
/* set rx payload dca enable */
-void rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, u32 dca);
+void hw_atl_rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en,
+ u32 dca);
/* set rx descriptor header buffer size */
-void rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_head_buff_size,
- u32 descriptor);
+void hw_atl_rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_head_buff_size,
+ u32 descriptor);
/* set rx descriptor reset */
-void rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res,
- u32 descriptor);
+void hw_atl_rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res,
+ u32 descriptor);
/* Set RDM Interrupt Moderation Enable */
-void rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, u32 rdm_intr_moder_en);
+void hw_atl_rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+ u32 rdm_intr_moder_en);
/* reg */
/* set general interrupt mapping register */
-void reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, u32 regidx);
+void hw_atl_reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map,
+ u32 regidx);
/* get general interrupt status register */
-u32 reg_gen_irq_status_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_gen_irq_status_get(struct aq_hw_s *aq_hw);
/* set interrupt global control register */
-void reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl);
+void hw_atl_reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl);
/* set interrupt throttle register */
-void reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle);
+void hw_atl_reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle);
/* set rx dma descriptor base address lsw */
-void reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
- u32 rx_dma_desc_base_addrlsw,
+void hw_atl_reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_base_addrlsw,
u32 descriptor);
/* set rx dma descriptor base address msw */
-void reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
- u32 rx_dma_desc_base_addrmsw,
+void hw_atl_reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_base_addrmsw,
u32 descriptor);
/* get rx dma descriptor status register */
-u32 reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor);
+u32 hw_atl_reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor);
/* set rx dma descriptor tail pointer register */
-void reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
- u32 rx_dma_desc_tail_ptr,
+void hw_atl_reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_tail_ptr,
u32 descriptor);
/* set rx filter multicast filter mask register */
-void reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw,
- u32 rx_flr_mcst_flr_msk);
+void hw_atl_reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw,
+ u32 rx_flr_mcst_flr_msk);
/* set rx filter multicast filter register */
-void reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
- u32 filter);
+void hw_atl_reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
+ u32 filter);
/* set rx filter rss control register 1 */
-void reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw,
- u32 rx_flr_rss_control1);
+void hw_atl_reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw,
+ u32 rx_flr_rss_control1);
/* Set RX Filter Control Register 2 */
-void reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_flr_control2);
+void hw_atl_reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_flr_control2);
/* Set RX Interrupt Moderation Control Register */
-void reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
- u32 rx_intr_moderation_ctl,
+void hw_atl_reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 rx_intr_moderation_ctl,
u32 queue);
/* set tx dma debug control */
-void reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, u32 tx_dma_debug_ctl);
+void hw_atl_reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_debug_ctl);
/* set tx dma descriptor base address lsw */
-void reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
- u32 tx_dma_desc_base_addrlsw,
+void hw_atl_reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_base_addrlsw,
u32 descriptor);
/* set tx dma descriptor base address msw */
-void reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
- u32 tx_dma_desc_base_addrmsw,
+void hw_atl_reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_base_addrmsw,
u32 descriptor);
/* set tx dma descriptor tail pointer register */
-void reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
- u32 tx_dma_desc_tail_ptr,
- u32 descriptor);
+void hw_atl_reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_tail_ptr,
+ u32 descriptor);
/* Set TX Interrupt Moderation Control Register */
-void reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
- u32 tx_intr_moderation_ctl,
- u32 queue);
+void hw_atl_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 tx_intr_moderation_ctl,
+ u32 queue);
/* set global microprocessor scratch pad */
-void reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
- u32 glb_cpu_scratch_scp, u32 scratch_scp);
+void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
+ u32 glb_cpu_scratch_scp,
+ u32 scratch_scp);
/* rpb */
/* set dma system loopback */
-void rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk);
+void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk);
/* set rx traffic class mode */
-void rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
- u32 rx_traf_class_mode);
+void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
+ u32 rx_traf_class_mode);
/* set rx buffer enable */
-void rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en);
+void hw_atl_rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en);
/* set rx buffer high threshold (per tc) */
-void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
- u32 rx_buff_hi_threshold_per_tc,
- u32 buffer);
+void hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_buff_hi_threshold_per_tc,
+ u32 buffer);
/* set rx buffer low threshold (per tc) */
-void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
- u32 rx_buff_lo_threshold_per_tc,
+void hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_buff_lo_threshold_per_tc,
u32 buffer);
/* set rx flow control mode */
-void rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode);
+void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode);
/* set rx packet buffer size (per tc) */
-void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
- u32 rx_pkt_buff_size_per_tc,
- u32 buffer);
+void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_pkt_buff_size_per_tc,
+ u32 buffer);
/* set rx xoff enable (per tc) */
-void rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
- u32 buffer);
+void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
+ u32 buffer);
/* rpf */
/* set l2 broadcast count threshold */
-void rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
- u32 l2broadcast_count_threshold);
+void hw_atl_rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
+ u32 l2broadcast_count_threshold);
/* set l2 broadcast enable */
-void rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en);
+void hw_atl_rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en);
/* set l2 broadcast filter action */
-void rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw,
- u32 l2broadcast_flr_act);
+void hw_atl_rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw,
+ u32 l2broadcast_flr_act);
/* set l2 multicast filter enable */
-void rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, u32 l2multicast_flr_en,
- u32 filter);
+void hw_atl_rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw,
+ u32 l2multicast_flr_en,
+ u32 filter);
/* set l2 promiscuous mode enable */
-void rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
- u32 l2promiscuous_mode_en);
+void hw_atl_rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
+ u32 l2promiscuous_mode_en);
/* set l2 unicast filter action */
-void rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_act,
- u32 filter);
+void hw_atl_rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_flr_act,
+ u32 filter);
/* set l2 unicast filter enable */
-void rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
- u32 filter);
+void hw_atl_rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
+ u32 filter);
/* set l2 unicast destination address lsw */
-void rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
- u32 l2unicast_dest_addresslsw,
+void hw_atl_rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_dest_addresslsw,
u32 filter);
/* set l2 unicast destination address msw */
-void rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
- u32 l2unicast_dest_addressmsw,
+void hw_atl_rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_dest_addressmsw,
u32 filter);
/* Set L2 Accept all Multicast packets */
-void rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
- u32 l2_accept_all_mc_packets);
+void hw_atl_rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
+ u32 l2_accept_all_mc_packets);
/* set user-priority tc mapping */
-void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
- u32 user_priority_tc_map, u32 tc);
+void hw_atl_rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
+ u32 user_priority_tc_map, u32 tc);
/* set rss key address */
-void rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr);
+void hw_atl_rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr);
/* set rss key write data */
-void rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data);
+void hw_atl_rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data);
/* get rss key write enable */
-u32 rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw);
/* set rss key write enable */
-void rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en);
+void hw_atl_rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en);
/* set rss redirection table address */
-void rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw,
- u32 rss_redir_tbl_addr);
+void hw_atl_rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw,
+ u32 rss_redir_tbl_addr);
/* set rss redirection table write data */
-void rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
- u32 rss_redir_tbl_wr_data);
+void hw_atl_rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
+ u32 rss_redir_tbl_wr_data);
/* get rss redirection write enable */
-u32 rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw);
/* set rss redirection write enable */
-void rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en);
+void hw_atl_rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en);
/* set tpo to rpf system loopback */
-void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw,
- u32 tpo_to_rpf_sys_lbk);
+void hw_atl_rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw,
+ u32 tpo_to_rpf_sys_lbk);
/* set vlan inner ethertype */
-void rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht);
+void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht);
/* set vlan outer ethertype */
-void rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht);
+void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht);
/* set vlan promiscuous mode enable */
-void rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, u32 vlan_prom_mode_en);
+void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw,
+ u32 vlan_prom_mode_en);
/* Set VLAN untagged action */
-void rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, u32 vlan_untagged_act);
+void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw,
+ u32 vlan_untagged_act);
/* Set VLAN accept untagged packets */
-void rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
- u32 vlan_accept_untagged_packets);
+void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
+ u32 vlan_acc_untagged_packets);
/* Set VLAN filter enable */
-void rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, u32 filter);
+void hw_atl_rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en,
+ u32 filter);
/* Set VLAN Filter Action */
-void rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act,
- u32 filter);
+void hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act,
+ u32 filter);
/* Set VLAN ID Filter */
-void rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, u32 filter);
+void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
+ u32 filter);
/* set ethertype filter enable */
-void rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, u32 filter);
+void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
+ u32 filter);
/* set ethertype user-priority enable */
-void rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
- u32 etht_user_priority_en, u32 filter);
+void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
+ u32 etht_user_priority_en,
+ u32 filter);
/* set ethertype rx queue enable */
-void rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue_en,
- u32 filter);
+void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw,
+ u32 etht_rx_queue_en,
+ u32 filter);
/* set ethertype rx queue */
-void rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
- u32 filter);
+void hw_atl_rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
+ u32 filter);
/* set ethertype user-priority */
-void rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, u32 etht_user_priority,
- u32 filter);
+void hw_atl_rpf_etht_user_priority_set(struct aq_hw_s *aq_hw,
+ u32 etht_user_priority,
+ u32 filter);
/* set ethertype management queue */
-void rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
- u32 filter);
+void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
+ u32 filter);
/* set ethertype filter action */
-void rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
- u32 filter);
+void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
+ u32 filter);
/* set ethertype filter */
-void rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter);
+void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter);
/* rpo */
/* set ipv4 header checksum offload enable */
-void rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
- u32 ipv4header_crc_offload_en);
+void hw_atl_rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 ipv4header_crc_offload_en);
/* set rx descriptor vlan stripping */
-void rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_vlan_stripping,
- u32 descriptor);
+void hw_atl_rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_vlan_stripping,
+ u32 descriptor);
/* set tcp/udp checksum offload enable */
-void rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
- u32 tcp_udp_crc_offload_en);
+void hw_atl_rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 tcp_udp_crc_offload_en);
/* Set LRO Patch Optimization Enable. */
-void rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
- u32 lro_patch_optimization_en);
+void hw_atl_rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
+ u32 lro_patch_optimization_en);
/* Set Large Receive Offload Enable */
-void rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en);
+void hw_atl_rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en);
/* Set LRO Q Sessions Limit */
-void rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw, u32 lro_qsessions_lim);
+void hw_atl_rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw,
+ u32 lro_qsessions_lim);
/* Set LRO Total Descriptor Limit */
-void rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, u32 lro_total_desc_lim);
+void hw_atl_rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw,
+ u32 lro_total_desc_lim);
/* Set LRO Min Payload of First Packet */
-void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
- u32 lro_min_pld_of_first_pkt);
+void hw_atl_rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lro_min_pld_of_first_pkt);
/* Set LRO Packet Limit */
-void rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_packet_lim);
+void hw_atl_rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_packet_lim);
/* Set LRO Max Number of Descriptors */
-void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
- u32 lro_max_desc_num, u32 lro);
+void hw_atl_rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
+ u32 lro_max_desc_num, u32 lro);
/* Set LRO Time Base Divider */
-void rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
- u32 lro_time_base_divider);
+void hw_atl_rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
+ u32 lro_time_base_divider);
/*Set LRO Inactive Interval */
-void rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
- u32 lro_inactive_interval);
+void hw_atl_rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
+ u32 lro_inactive_interval);
/*Set LRO Max Coalescing Interval */
-void rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
- u32 lro_max_coalescing_interval);
+void hw_atl_rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
+ u32 lro_max_coal_interval);
/* rx */
/* set rx register reset disable */
-void rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis);
+void hw_atl_rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis);
/* tdm */
/* set cpu id */
-void tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
+void hw_atl_tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
/* set large send offload enable */
-void tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
- u32 large_send_offload_en);
+void hw_atl_tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 large_send_offload_en);
/* set tx descriptor enable */
-void tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, u32 descriptor);
+void hw_atl_tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en,
+ u32 descriptor);
/* set tx dca enable */
-void tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en);
+void hw_atl_tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en);
/* set tx dca mode */
-void tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode);
+void hw_atl_tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode);
/* set tx descriptor dca enable */
-void tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, u32 dca);
+void hw_atl_tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en,
+ u32 dca);
/* get tx descriptor head pointer */
-u32 tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
+u32 hw_atl_tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
/* set tx descriptor length */
-void tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
- u32 descriptor);
+void hw_atl_tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
+ u32 descriptor);
/* set tx descriptor write-back interrupt enable */
-void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
- u32 tx_desc_wr_wb_irq_en);
+void hw_atl_tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_desc_wr_wb_irq_en);
/* set tx descriptor write-back threshold */
-void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
- u32 tx_desc_wr_wb_threshold,
+void hw_atl_tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
+ u32 tx_desc_wr_wb_threshold,
u32 descriptor);
/* Set TDM Interrupt Moderation Enable */
-void tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
- u32 tdm_irq_moderation_en);
+void hw_atl_tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+ u32 tdm_irq_moderation_en);
/* thm */
/* set lso tcp flag of first packet */
-void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
- u32 lso_tcp_flag_of_first_pkt);
+void hw_atl_thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_first_pkt);
/* set lso tcp flag of last packet */
-void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
- u32 lso_tcp_flag_of_last_pkt);
+void hw_atl_thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_last_pkt);
/* set lso tcp flag of middle packet */
-void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
- u32 lso_tcp_flag_of_middle_pkt);
+void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_middle_pkt);
/* tpb */
/* set tx buffer enable */
-void tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en);
+void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en);
/* set tx buffer high threshold (per tc) */
-void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
- u32 tx_buff_hi_threshold_per_tc,
+void hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_buff_hi_threshold_per_tc,
u32 buffer);
/* set tx buffer low threshold (per tc) */
-void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
- u32 tx_buff_lo_threshold_per_tc,
+void hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_buff_lo_threshold_per_tc,
u32 buffer);
/* set tx dma system loopback enable */
-void tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en);
+void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en);
/* set tx packet buffer size (per tc) */
-void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_buff_size_per_tc, u32 buffer);
+void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_buff_size_per_tc, u32 buffer);
/* set tx path pad insert enable */
-void tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en);
+void hw_atl_tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en);
/* tpo */
/* set ipv4 header checksum offload enable */
-void tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
- u32 ipv4header_crc_offload_en);
+void hw_atl_tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 ipv4header_crc_offload_en);
/* set tcp/udp checksum offload enable */
-void tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
- u32 tcp_udp_crc_offload_en);
+void hw_atl_tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 tcp_udp_crc_offload_en);
/* set tx pkt system loopback enable */
-void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_pkt_sys_lbk_en);
+void hw_atl_tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_sys_lbk_en);
/* tps */
/* set tx packet scheduler data arbitration mode */
-void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_data_arb_mode);
+void hw_atl_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_data_arb_mode);
/* set tx packet scheduler descriptor rate current time reset */
-void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
- u32 curr_time_res);
+void hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
+ u32 curr_time_res);
/* set tx packet scheduler descriptor rate limit */
-void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_rate_lim);
+void hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_rate_lim);
/* set tx packet scheduler descriptor tc arbitration mode */
-void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_tc_arb_mode);
+void hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 arb_mode);
/* set tx packet scheduler descriptor tc max credit */
-void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_tc_max_credit,
+void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
+ u32 max_credit,
u32 tc);
/* set tx packet scheduler descriptor tc weight */
-void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_tc_weight,
+void hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_tc_weight,
u32 tc);
/* set tx packet scheduler descriptor vm arbitration mode */
-void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_vm_arb_mode);
+void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 arb_mode);
/* set tx packet scheduler tc data max credit */
-void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_tc_data_max_credit,
+void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
+ u32 max_credit,
u32 tc);
/* set tx packet scheduler tc data weight */
-void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_tc_data_weight,
+void hw_atl_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_tc_data_weight,
u32 tc);
/* tx */
/* set tx register reset disable */
-void tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis);
+void hw_atl_tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis);
/* msm */
/* get register access status */
-u32 msm_reg_access_status_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_msm_reg_access_status_get(struct aq_hw_s *aq_hw);
/* set register address for indirect address */
-void msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
- u32 reg_addr_for_indirect_addr);
+void hw_atl_msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
+ u32 reg_addr_for_indirect_addr);
/* set register read strobe */
-void msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe);
+void hw_atl_msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe);
/* get register read data */
-u32 msm_reg_rd_data_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_msm_reg_rd_data_get(struct aq_hw_s *aq_hw);
/* set register write data */
-void msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data);
+void hw_atl_msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data);
/* set register write strobe */
-void msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe);
+void hw_atl_msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe);
/* pci */
/* set pci register reset disable */
-void pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis);
+void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis);
#endif /* HW_ATL_LLH_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index 93450ec930e8..e0cf70120f1d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -18,91 +18,91 @@
* base address: 0x000003a0
* parameter: semaphore {s} | stride size 0x4 | range [0, 15]
*/
-#define glb_cpu_sem_adr(semaphore) (0x000003a0u + (semaphore) * 0x4)
+#define HW_ATL_GLB_CPU_SEM_ADR(semaphore) (0x000003a0u + (semaphore) * 0x4)
/* register address for bitfield rx dma good octet counter lsw [1f:0] */
-#define stats_rx_dma_good_octet_counterlsw__adr 0x00006808
+#define HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERLSW 0x00006808
/* register address for bitfield rx dma good packet counter lsw [1f:0] */
-#define stats_rx_dma_good_pkt_counterlsw__adr 0x00006800
+#define HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERLSW 0x00006800
/* register address for bitfield tx dma good octet counter lsw [1f:0] */
-#define stats_tx_dma_good_octet_counterlsw__adr 0x00008808
+#define HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERLSW 0x00008808
/* register address for bitfield tx dma good packet counter lsw [1f:0] */
-#define stats_tx_dma_good_pkt_counterlsw__adr 0x00008800
+#define HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERLSW 0x00008800
/* register address for bitfield rx dma good octet counter msw [3f:20] */
-#define stats_rx_dma_good_octet_countermsw__adr 0x0000680c
+#define HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERMSW 0x0000680c
/* register address for bitfield rx dma good packet counter msw [3f:20] */
-#define stats_rx_dma_good_pkt_countermsw__adr 0x00006804
+#define HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERMSW 0x00006804
/* register address for bitfield tx dma good octet counter msw [3f:20] */
-#define stats_tx_dma_good_octet_countermsw__adr 0x0000880c
+#define HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERMSW 0x0000880c
/* register address for bitfield tx dma good packet counter msw [3f:20] */
-#define stats_tx_dma_good_pkt_countermsw__adr 0x00008804
+#define HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERMSW 0x00008804
/* preprocessor definitions for msm rx errors counter register */
-#define mac_msm_rx_errs_cnt_adr 0x00000120u
+#define HW_ATL_MAC_MSM_RX_ERRS_CNT_ADR 0x00000120u
/* preprocessor definitions for msm rx unicast frames counter register */
-#define mac_msm_rx_ucst_frm_cnt_adr 0x000000e0u
+#define HW_ATL_MAC_MSM_RX_UCST_FRM_CNT_ADR 0x000000e0u
/* preprocessor definitions for msm rx multicast frames counter register */
-#define mac_msm_rx_mcst_frm_cnt_adr 0x000000e8u
+#define HW_ATL_MAC_MSM_RX_MCST_FRM_CNT_ADR 0x000000e8u
/* preprocessor definitions for msm rx broadcast frames counter register */
-#define mac_msm_rx_bcst_frm_cnt_adr 0x000000f0u
+#define HW_ATL_MAC_MSM_RX_BCST_FRM_CNT_ADR 0x000000f0u
/* preprocessor definitions for msm rx broadcast octets counter register 1 */
-#define mac_msm_rx_bcst_octets_counter1_adr 0x000001b0u
+#define HW_ATL_MAC_MSM_RX_BCST_OCTETS_COUNTER1_ADR 0x000001b0u
/* preprocessor definitions for msm rx broadcast octets counter register 2 */
-#define mac_msm_rx_bcst_octets_counter2_adr 0x000001b4u
+#define HW_ATL_MAC_MSM_RX_BCST_OCTETS_COUNTER2_ADR 0x000001b4u
/* preprocessor definitions for msm rx unicast octets counter register 0 */
-#define mac_msm_rx_ucst_octets_counter0_adr 0x000001b8u
+#define HW_ATL_MAC_MSM_RX_UCST_OCTETS_COUNTER0_ADR 0x000001b8u
/* preprocessor definitions for rx dma statistics counter 7 */
-#define rx_dma_stat_counter7_adr 0x00006818u
+#define HW_ATL_RX_DMA_STAT_COUNTER7_ADR 0x00006818u
/* preprocessor definitions for msm tx unicast frames counter register */
-#define mac_msm_tx_ucst_frm_cnt_adr 0x00000108u
+#define HW_ATL_MAC_MSM_TX_UCST_FRM_CNT_ADR 0x00000108u
/* preprocessor definitions for msm tx multicast frames counter register */
-#define mac_msm_tx_mcst_frm_cnt_adr 0x00000110u
+#define HW_ATL_MAC_MSM_TX_MCST_FRM_CNT_ADR 0x00000110u
/* preprocessor definitions for global mif identification */
-#define glb_mif_id_adr 0x0000001cu
+#define HW_ATL_GLB_MIF_ID_ADR 0x0000001cu
/* register address for bitfield iamr_lsw[1f:0] */
-#define itr_iamrlsw_adr 0x00002090
+#define HW_ATL_ITR_IAMRLSW_ADR 0x00002090
/* register address for bitfield rx dma drop packet counter [1f:0] */
-#define rpb_rx_dma_drop_pkt_cnt_adr 0x00006818
+#define HW_ATL_RPB_RX_DMA_DROP_PKT_CNT_ADR 0x00006818
/* register address for bitfield imcr_lsw[1f:0] */
-#define itr_imcrlsw_adr 0x00002070
+#define HW_ATL_ITR_IMCRLSW_ADR 0x00002070
/* register address for bitfield imsr_lsw[1f:0] */
-#define itr_imsrlsw_adr 0x00002060
+#define HW_ATL_ITR_IMSRLSW_ADR 0x00002060
/* register address for bitfield itr_reg_res_dsbl */
-#define itr_reg_res_dsbl_adr 0x00002300
+#define HW_ATL_ITR_REG_RES_DSBL_ADR 0x00002300
/* bitmask for bitfield itr_reg_res_dsbl */
-#define itr_reg_res_dsbl_msk 0x20000000
+#define HW_ATL_ITR_REG_RES_DSBL_MSK 0x20000000
/* lower bit position of bitfield itr_reg_res_dsbl */
-#define itr_reg_res_dsbl_shift 29
+#define HW_ATL_ITR_REG_RES_DSBL_SHIFT 29
/* register address for bitfield iscr_lsw[1f:0] */
-#define itr_iscrlsw_adr 0x00002050
+#define HW_ATL_ITR_ISCRLSW_ADR 0x00002050
/* register address for bitfield isr_lsw[1f:0] */
-#define itr_isrlsw_adr 0x00002000
+#define HW_ATL_ITR_ISRLSW_ADR 0x00002000
/* register address for bitfield itr_reset */
-#define itr_res_adr 0x00002300
+#define HW_ATL_ITR_RES_ADR 0x00002300
/* bitmask for bitfield itr_reset */
-#define itr_res_msk 0x80000000
+#define HW_ATL_ITR_RES_MSK 0x80000000
/* lower bit position of bitfield itr_reset */
-#define itr_res_shift 31
+#define HW_ATL_ITR_RES_SHIFT 31
/* register address for bitfield dca{d}_cpuid[7:0] */
-#define rdm_dcadcpuid_adr(dca) (0x00006100 + (dca) * 0x4)
+#define HW_ATL_RDM_DCADCPUID_ADR(dca) (0x00006100 + (dca) * 0x4)
/* bitmask for bitfield dca{d}_cpuid[7:0] */
-#define rdm_dcadcpuid_msk 0x000000ff
+#define HW_ATL_RDM_DCADCPUID_MSK 0x000000ff
/* lower bit position of bitfield dca{d}_cpuid[7:0] */
-#define rdm_dcadcpuid_shift 0
+#define HW_ATL_RDM_DCADCPUID_SHIFT 0
/* register address for bitfield dca_en */
-#define rdm_dca_en_adr 0x00006180
+#define HW_ATL_RDM_DCA_EN_ADR 0x00006180
/* rx dca_en bitfield definitions
* preprocessor definitions for the bitfield "dca_en".
@@ -110,17 +110,17 @@
*/
/* register address for bitfield dca_en */
-#define rdm_dca_en_adr 0x00006180
+#define HW_ATL_RDM_DCA_EN_ADR 0x00006180
/* bitmask for bitfield dca_en */
-#define rdm_dca_en_msk 0x80000000
+#define HW_ATL_RDM_DCA_EN_MSK 0x80000000
/* inverted bitmask for bitfield dca_en */
-#define rdm_dca_en_mskn 0x7fffffff
+#define HW_ATL_RDM_DCA_EN_MSKN 0x7fffffff
/* lower bit position of bitfield dca_en */
-#define rdm_dca_en_shift 31
+#define HW_ATL_RDM_DCA_EN_SHIFT 31
/* width of bitfield dca_en */
-#define rdm_dca_en_width 1
+#define HW_ATL_RDM_DCA_EN_WIDTH 1
/* default value of bitfield dca_en */
-#define rdm_dca_en_default 0x1
+#define HW_ATL_RDM_DCA_EN_DEFAULT 0x1
/* rx dca_mode[3:0] bitfield definitions
* preprocessor definitions for the bitfield "dca_mode[3:0]".
@@ -128,17 +128,17 @@
*/
/* register address for bitfield dca_mode[3:0] */
-#define rdm_dca_mode_adr 0x00006180
+#define HW_ATL_RDM_DCA_MODE_ADR 0x00006180
/* bitmask for bitfield dca_mode[3:0] */
-#define rdm_dca_mode_msk 0x0000000f
+#define HW_ATL_RDM_DCA_MODE_MSK 0x0000000f
/* inverted bitmask for bitfield dca_mode[3:0] */
-#define rdm_dca_mode_mskn 0xfffffff0
+#define HW_ATL_RDM_DCA_MODE_MSKN 0xfffffff0
/* lower bit position of bitfield dca_mode[3:0] */
-#define rdm_dca_mode_shift 0
+#define HW_ATL_RDM_DCA_MODE_SHIFT 0
/* width of bitfield dca_mode[3:0] */
-#define rdm_dca_mode_width 4
+#define HW_ATL_RDM_DCA_MODE_WIDTH 4
/* default value of bitfield dca_mode[3:0] */
-#define rdm_dca_mode_default 0x0
+#define HW_ATL_RDM_DCA_MODE_DEFAULT 0x0
/* rx desc{d}_data_size[4:0] bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_data_size[4:0]".
@@ -147,17 +147,18 @@
*/
/* register address for bitfield desc{d}_data_size[4:0] */
-#define rdm_descddata_size_adr(descriptor) (0x00005b18 + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDDATA_SIZE_ADR(descriptor) \
+ (0x00005b18 + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_data_size[4:0] */
-#define rdm_descddata_size_msk 0x0000001f
+#define HW_ATL_RDM_DESCDDATA_SIZE_MSK 0x0000001f
/* inverted bitmask for bitfield desc{d}_data_size[4:0] */
-#define rdm_descddata_size_mskn 0xffffffe0
+#define HW_ATL_RDM_DESCDDATA_SIZE_MSKN 0xffffffe0
/* lower bit position of bitfield desc{d}_data_size[4:0] */
-#define rdm_descddata_size_shift 0
+#define HW_ATL_RDM_DESCDDATA_SIZE_SHIFT 0
/* width of bitfield desc{d}_data_size[4:0] */
-#define rdm_descddata_size_width 5
+#define HW_ATL_RDM_DESCDDATA_SIZE_WIDTH 5
/* default value of bitfield desc{d}_data_size[4:0] */
-#define rdm_descddata_size_default 0x0
+#define HW_ATL_RDM_DESCDDATA_SIZE_DEFAULT 0x0
/* rx dca{d}_desc_en bitfield definitions
* preprocessor definitions for the bitfield "dca{d}_desc_en".
@@ -166,17 +167,17 @@
*/
/* register address for bitfield dca{d}_desc_en */
-#define rdm_dcaddesc_en_adr(dca) (0x00006100 + (dca) * 0x4)
+#define HW_ATL_RDM_DCADDESC_EN_ADR(dca) (0x00006100 + (dca) * 0x4)
/* bitmask for bitfield dca{d}_desc_en */
-#define rdm_dcaddesc_en_msk 0x80000000
+#define HW_ATL_RDM_DCADDESC_EN_MSK 0x80000000
/* inverted bitmask for bitfield dca{d}_desc_en */
-#define rdm_dcaddesc_en_mskn 0x7fffffff
+#define HW_ATL_RDM_DCADDESC_EN_MSKN 0x7fffffff
/* lower bit position of bitfield dca{d}_desc_en */
-#define rdm_dcaddesc_en_shift 31
+#define HW_ATL_RDM_DCADDESC_EN_SHIFT 31
/* width of bitfield dca{d}_desc_en */
-#define rdm_dcaddesc_en_width 1
+#define HW_ATL_RDM_DCADDESC_EN_WIDTH 1
/* default value of bitfield dca{d}_desc_en */
-#define rdm_dcaddesc_en_default 0x0
+#define HW_ATL_RDM_DCADDESC_EN_DEFAULT 0x0
/* rx desc{d}_en bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_en".
@@ -185,17 +186,17 @@
*/
/* register address for bitfield desc{d}_en */
-#define rdm_descden_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDEN_ADR(descriptor) (0x00005b08 + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_en */
-#define rdm_descden_msk 0x80000000
+#define HW_ATL_RDM_DESCDEN_MSK 0x80000000
/* inverted bitmask for bitfield desc{d}_en */
-#define rdm_descden_mskn 0x7fffffff
+#define HW_ATL_RDM_DESCDEN_MSKN 0x7fffffff
/* lower bit position of bitfield desc{d}_en */
-#define rdm_descden_shift 31
+#define HW_ATL_RDM_DESCDEN_SHIFT 31
/* width of bitfield desc{d}_en */
-#define rdm_descden_width 1
+#define HW_ATL_RDM_DESCDEN_WIDTH 1
/* default value of bitfield desc{d}_en */
-#define rdm_descden_default 0x0
+#define HW_ATL_RDM_DESCDEN_DEFAULT 0x0
/* rx desc{d}_hdr_size[4:0] bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_hdr_size[4:0]".
@@ -204,17 +205,18 @@
*/
/* register address for bitfield desc{d}_hdr_size[4:0] */
-#define rdm_descdhdr_size_adr(descriptor) (0x00005b18 + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDHDR_SIZE_ADR(descriptor) \
+ (0x00005b18 + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_hdr_size[4:0] */
-#define rdm_descdhdr_size_msk 0x00001f00
+#define HW_ATL_RDM_DESCDHDR_SIZE_MSK 0x00001f00
/* inverted bitmask for bitfield desc{d}_hdr_size[4:0] */
-#define rdm_descdhdr_size_mskn 0xffffe0ff
+#define HW_ATL_RDM_DESCDHDR_SIZE_MSKN 0xffffe0ff
/* lower bit position of bitfield desc{d}_hdr_size[4:0] */
-#define rdm_descdhdr_size_shift 8
+#define HW_ATL_RDM_DESCDHDR_SIZE_SHIFT 8
/* width of bitfield desc{d}_hdr_size[4:0] */
-#define rdm_descdhdr_size_width 5
+#define HW_ATL_RDM_DESCDHDR_SIZE_WIDTH 5
/* default value of bitfield desc{d}_hdr_size[4:0] */
-#define rdm_descdhdr_size_default 0x0
+#define HW_ATL_RDM_DESCDHDR_SIZE_DEFAULT 0x0
/* rx desc{d}_hdr_split bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_hdr_split".
@@ -223,17 +225,18 @@
*/
/* register address for bitfield desc{d}_hdr_split */
-#define rdm_descdhdr_split_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDHDR_SPLIT_ADR(descriptor) \
+ (0x00005b08 + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_hdr_split */
-#define rdm_descdhdr_split_msk 0x10000000
+#define HW_ATL_RDM_DESCDHDR_SPLIT_MSK 0x10000000
/* inverted bitmask for bitfield desc{d}_hdr_split */
-#define rdm_descdhdr_split_mskn 0xefffffff
+#define HW_ATL_RDM_DESCDHDR_SPLIT_MSKN 0xefffffff
/* lower bit position of bitfield desc{d}_hdr_split */
-#define rdm_descdhdr_split_shift 28
+#define HW_ATL_RDM_DESCDHDR_SPLIT_SHIFT 28
/* width of bitfield desc{d}_hdr_split */
-#define rdm_descdhdr_split_width 1
+#define HW_ATL_RDM_DESCDHDR_SPLIT_WIDTH 1
/* default value of bitfield desc{d}_hdr_split */
-#define rdm_descdhdr_split_default 0x0
+#define HW_ATL_RDM_DESCDHDR_SPLIT_DEFAULT 0x0
/* rx desc{d}_hd[c:0] bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_hd[c:0]".
@@ -242,15 +245,15 @@
*/
/* register address for bitfield desc{d}_hd[c:0] */
-#define rdm_descdhd_adr(descriptor) (0x00005b0c + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDHD_ADR(descriptor) (0x00005b0c + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_hd[c:0] */
-#define rdm_descdhd_msk 0x00001fff
+#define HW_ATL_RDM_DESCDHD_MSK 0x00001fff
/* inverted bitmask for bitfield desc{d}_hd[c:0] */
-#define rdm_descdhd_mskn 0xffffe000
+#define HW_ATL_RDM_DESCDHD_MSKN 0xffffe000
/* lower bit position of bitfield desc{d}_hd[c:0] */
-#define rdm_descdhd_shift 0
+#define HW_ATL_RDM_DESCDHD_SHIFT 0
/* width of bitfield desc{d}_hd[c:0] */
-#define rdm_descdhd_width 13
+#define HW_ATL_RDM_DESCDHD_WIDTH 13
/* rx desc{d}_len[9:0] bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_len[9:0]".
@@ -259,17 +262,17 @@
*/
/* register address for bitfield desc{d}_len[9:0] */
-#define rdm_descdlen_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDLEN_ADR(descriptor) (0x00005b08 + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_len[9:0] */
-#define rdm_descdlen_msk 0x00001ff8
+#define HW_ATL_RDM_DESCDLEN_MSK 0x00001ff8
/* inverted bitmask for bitfield desc{d}_len[9:0] */
-#define rdm_descdlen_mskn 0xffffe007
+#define HW_ATL_RDM_DESCDLEN_MSKN 0xffffe007
/* lower bit position of bitfield desc{d}_len[9:0] */
-#define rdm_descdlen_shift 3
+#define HW_ATL_RDM_DESCDLEN_SHIFT 3
/* width of bitfield desc{d}_len[9:0] */
-#define rdm_descdlen_width 10
+#define HW_ATL_RDM_DESCDLEN_WIDTH 10
/* default value of bitfield desc{d}_len[9:0] */
-#define rdm_descdlen_default 0x0
+#define HW_ATL_RDM_DESCDLEN_DEFAULT 0x0
/* rx desc{d}_reset bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_reset".
@@ -278,17 +281,17 @@
*/
/* register address for bitfield desc{d}_reset */
-#define rdm_descdreset_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDRESET_ADR(descriptor) (0x00005b08 + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_reset */
-#define rdm_descdreset_msk 0x02000000
+#define HW_ATL_RDM_DESCDRESET_MSK 0x02000000
/* inverted bitmask for bitfield desc{d}_reset */
-#define rdm_descdreset_mskn 0xfdffffff
+#define HW_ATL_RDM_DESCDRESET_MSKN 0xfdffffff
/* lower bit position of bitfield desc{d}_reset */
-#define rdm_descdreset_shift 25
+#define HW_ATL_RDM_DESCDRESET_SHIFT 25
/* width of bitfield desc{d}_reset */
-#define rdm_descdreset_width 1
+#define HW_ATL_RDM_DESCDRESET_WIDTH 1
/* default value of bitfield desc{d}_reset */
-#define rdm_descdreset_default 0x0
+#define HW_ATL_RDM_DESCDRESET_DEFAULT 0x0
/* rx int_desc_wrb_en bitfield definitions
* preprocessor definitions for the bitfield "int_desc_wrb_en".
@@ -296,17 +299,17 @@
*/
/* register address for bitfield int_desc_wrb_en */
-#define rdm_int_desc_wrb_en_adr 0x00005a30
+#define HW_ATL_RDM_INT_DESC_WRB_EN_ADR 0x00005a30
/* bitmask for bitfield int_desc_wrb_en */
-#define rdm_int_desc_wrb_en_msk 0x00000004
+#define HW_ATL_RDM_INT_DESC_WRB_EN_MSK 0x00000004
/* inverted bitmask for bitfield int_desc_wrb_en */
-#define rdm_int_desc_wrb_en_mskn 0xfffffffb
+#define HW_ATL_RDM_INT_DESC_WRB_EN_MSKN 0xfffffffb
/* lower bit position of bitfield int_desc_wrb_en */
-#define rdm_int_desc_wrb_en_shift 2
+#define HW_ATL_RDM_INT_DESC_WRB_EN_SHIFT 2
/* width of bitfield int_desc_wrb_en */
-#define rdm_int_desc_wrb_en_width 1
+#define HW_ATL_RDM_INT_DESC_WRB_EN_WIDTH 1
/* default value of bitfield int_desc_wrb_en */
-#define rdm_int_desc_wrb_en_default 0x0
+#define HW_ATL_RDM_INT_DESC_WRB_EN_DEFAULT 0x0
/* rx dca{d}_hdr_en bitfield definitions
* preprocessor definitions for the bitfield "dca{d}_hdr_en".
@@ -315,17 +318,17 @@
*/
/* register address for bitfield dca{d}_hdr_en */
-#define rdm_dcadhdr_en_adr(dca) (0x00006100 + (dca) * 0x4)
+#define HW_ATL_RDM_DCADHDR_EN_ADR(dca) (0x00006100 + (dca) * 0x4)
/* bitmask for bitfield dca{d}_hdr_en */
-#define rdm_dcadhdr_en_msk 0x40000000
+#define HW_ATL_RDM_DCADHDR_EN_MSK 0x40000000
/* inverted bitmask for bitfield dca{d}_hdr_en */
-#define rdm_dcadhdr_en_mskn 0xbfffffff
+#define HW_ATL_RDM_DCADHDR_EN_MSKN 0xbfffffff
/* lower bit position of bitfield dca{d}_hdr_en */
-#define rdm_dcadhdr_en_shift 30
+#define HW_ATL_RDM_DCADHDR_EN_SHIFT 30
/* width of bitfield dca{d}_hdr_en */
-#define rdm_dcadhdr_en_width 1
+#define HW_ATL_RDM_DCADHDR_EN_WIDTH 1
/* default value of bitfield dca{d}_hdr_en */
-#define rdm_dcadhdr_en_default 0x0
+#define HW_ATL_RDM_DCADHDR_EN_DEFAULT 0x0
/* rx dca{d}_pay_en bitfield definitions
* preprocessor definitions for the bitfield "dca{d}_pay_en".
@@ -334,17 +337,17 @@
*/
/* register address for bitfield dca{d}_pay_en */
-#define rdm_dcadpay_en_adr(dca) (0x00006100 + (dca) * 0x4)
+#define HW_ATL_RDM_DCADPAY_EN_ADR(dca) (0x00006100 + (dca) * 0x4)
/* bitmask for bitfield dca{d}_pay_en */
-#define rdm_dcadpay_en_msk 0x20000000
+#define HW_ATL_RDM_DCADPAY_EN_MSK 0x20000000
/* inverted bitmask for bitfield dca{d}_pay_en */
-#define rdm_dcadpay_en_mskn 0xdfffffff
+#define HW_ATL_RDM_DCADPAY_EN_MSKN 0xdfffffff
/* lower bit position of bitfield dca{d}_pay_en */
-#define rdm_dcadpay_en_shift 29
+#define HW_ATL_RDM_DCADPAY_EN_SHIFT 29
/* width of bitfield dca{d}_pay_en */
-#define rdm_dcadpay_en_width 1
+#define HW_ATL_RDM_DCADPAY_EN_WIDTH 1
/* default value of bitfield dca{d}_pay_en */
-#define rdm_dcadpay_en_default 0x0
+#define HW_ATL_RDM_DCADPAY_EN_DEFAULT 0x0
/* RX rdm_int_rim_en Bitfield Definitions
* Preprocessor definitions for the bitfield "rdm_int_rim_en".
@@ -352,51 +355,51 @@
*/
/* Register address for bitfield rdm_int_rim_en */
-#define rdm_int_rim_en_adr 0x00005A30
+#define HW_ATL_RDM_INT_RIM_EN_ADR 0x00005A30
/* Bitmask for bitfield rdm_int_rim_en */
-#define rdm_int_rim_en_msk 0x00000008
+#define HW_ATL_RDM_INT_RIM_EN_MSK 0x00000008
/* Inverted bitmask for bitfield rdm_int_rim_en */
-#define rdm_int_rim_en_mskn 0xFFFFFFF7
+#define HW_ATL_RDM_INT_RIM_EN_MSKN 0xFFFFFFF7
/* Lower bit position of bitfield rdm_int_rim_en */
-#define rdm_int_rim_en_shift 3
+#define HW_ATL_RDM_INT_RIM_EN_SHIFT 3
/* Width of bitfield rdm_int_rim_en */
-#define rdm_int_rim_en_width 1
+#define HW_ATL_RDM_INT_RIM_EN_WIDTH 1
/* Default value of bitfield rdm_int_rim_en */
-#define rdm_int_rim_en_default 0x0
+#define HW_ATL_RDM_INT_RIM_EN_DEFAULT 0x0
/* general interrupt mapping register definitions
* preprocessor definitions for general interrupt mapping register
* base address: 0x00002180
* parameter: regidx {f} | stride size 0x4 | range [0, 3]
*/
-#define gen_intr_map_adr(regidx) (0x00002180u + (regidx) * 0x4)
+#define HW_ATL_GEN_INTR_MAP_ADR(regidx) (0x00002180u + (regidx) * 0x4)
/* general interrupt status register definitions
* preprocessor definitions for general interrupt status register
* address: 0x000021A0
*/
-#define gen_intr_stat_adr 0x000021A4U
+#define HW_ATL_GEN_INTR_STAT_ADR 0x000021A4U
/* interrupt global control register definitions
* preprocessor definitions for interrupt global control register
* address: 0x00002300
*/
-#define intr_glb_ctl_adr 0x00002300u
+#define HW_ATL_INTR_GLB_CTL_ADR 0x00002300u
/* interrupt throttle register definitions
* preprocessor definitions for interrupt throttle register
* base address: 0x00002800
* parameter: throttle {t} | stride size 0x4 | range [0, 31]
*/
-#define intr_thr_adr(throttle) (0x00002800u + (throttle) * 0x4)
+#define HW_ATL_INTR_THR_ADR(throttle) (0x00002800u + (throttle) * 0x4)
/* rx dma descriptor base address lsw definitions
* preprocessor definitions for rx dma descriptor base address lsw
* base address: 0x00005b00
* parameter: descriptor {d} | stride size 0x20 | range [0, 31]
*/
-#define rx_dma_desc_base_addrlsw_adr(descriptor) \
+#define HW_ATL_RX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor) \
(0x00005b00u + (descriptor) * 0x20)
/* rx dma descriptor base address msw definitions
@@ -404,7 +407,7 @@
* base address: 0x00005b04
* parameter: descriptor {d} | stride size 0x20 | range [0, 31]
*/
-#define rx_dma_desc_base_addrmsw_adr(descriptor) \
+#define HW_ATL_RX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor) \
(0x00005b04u + (descriptor) * 0x20)
/* rx dma descriptor status register definitions
@@ -412,46 +415,48 @@
* base address: 0x00005b14
* parameter: descriptor {d} | stride size 0x20 | range [0, 31]
*/
-#define rx_dma_desc_stat_adr(descriptor) (0x00005b14u + (descriptor) * 0x20)
+#define HW_ATL_RX_DMA_DESC_STAT_ADR(descriptor) \
+ (0x00005b14u + (descriptor) * 0x20)
/* rx dma descriptor tail pointer register definitions
* preprocessor definitions for rx dma descriptor tail pointer register
* base address: 0x00005b10
* parameter: descriptor {d} | stride size 0x20 | range [0, 31]
*/
-#define rx_dma_desc_tail_ptr_adr(descriptor) (0x00005b10u + (descriptor) * 0x20)
+#define HW_ATL_RX_DMA_DESC_TAIL_PTR_ADR(descriptor) \
+ (0x00005b10u + (descriptor) * 0x20)
/* rx interrupt moderation control register definitions
* Preprocessor definitions for RX Interrupt Moderation Control Register
* Base Address: 0x00005A40
* Parameter: RIM {R} | stride size 0x4 | range [0, 31]
*/
-#define rx_intr_moderation_ctl_adr(rim) (0x00005A40u + (rim) * 0x4)
+#define HW_ATL_RX_INTR_MODERATION_CTL_ADR(rim) (0x00005A40u + (rim) * 0x4)
/* rx filter multicast filter mask register definitions
* preprocessor definitions for rx filter multicast filter mask register
* address: 0x00005270
*/
-#define rx_flr_mcst_flr_msk_adr 0x00005270u
+#define HW_ATL_RX_FLR_MCST_FLR_MSK_ADR 0x00005270u
/* rx filter multicast filter register definitions
* preprocessor definitions for rx filter multicast filter register
* base address: 0x00005250
* parameter: filter {f} | stride size 0x4 | range [0, 7]
*/
-#define rx_flr_mcst_flr_adr(filter) (0x00005250u + (filter) * 0x4)
+#define HW_ATL_RX_FLR_MCST_FLR_ADR(filter) (0x00005250u + (filter) * 0x4)
/* RX Filter RSS Control Register 1 Definitions
* Preprocessor definitions for RX Filter RSS Control Register 1
* Address: 0x000054C0
*/
-#define rx_flr_rss_control1_adr 0x000054C0u
+#define HW_ATL_RX_FLR_RSS_CONTROL1_ADR 0x000054C0u
/* RX Filter Control Register 2 Definitions
* Preprocessor definitions for RX Filter Control Register 2
* Address: 0x00005104
*/
-#define rx_flr_control2_adr 0x00005104u
+#define HW_ATL_RX_FLR_CONTROL2_ADR 0x00005104u
/* tx tx dma debug control [1f:0] bitfield definitions
* preprocessor definitions for the bitfield "tx dma debug control [1f:0]".
@@ -459,24 +464,24 @@
*/
/* register address for bitfield tx dma debug control [1f:0] */
-#define tdm_tx_dma_debug_ctl_adr 0x00008920
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_ADR 0x00008920
/* bitmask for bitfield tx dma debug control [1f:0] */
-#define tdm_tx_dma_debug_ctl_msk 0xffffffff
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_MSK 0xffffffff
/* inverted bitmask for bitfield tx dma debug control [1f:0] */
-#define tdm_tx_dma_debug_ctl_mskn 0x00000000
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_MSKN 0x00000000
/* lower bit position of bitfield tx dma debug control [1f:0] */
-#define tdm_tx_dma_debug_ctl_shift 0
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_SHIFT 0
/* width of bitfield tx dma debug control [1f:0] */
-#define tdm_tx_dma_debug_ctl_width 32
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_WIDTH 32
/* default value of bitfield tx dma debug control [1f:0] */
-#define tdm_tx_dma_debug_ctl_default 0x0
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_DEFAULT 0x0
/* tx dma descriptor base address lsw definitions
* preprocessor definitions for tx dma descriptor base address lsw
* base address: 0x00007c00
* parameter: descriptor {d} | stride size 0x40 | range [0, 31]
*/
-#define tx_dma_desc_base_addrlsw_adr(descriptor) \
+#define HW_ATL_TX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor) \
(0x00007c00u + (descriptor) * 0x40)
/* tx dma descriptor tail pointer register definitions
@@ -484,7 +489,8 @@
* base address: 0x00007c10
* parameter: descriptor {d} | stride size 0x40 | range [0, 31]
*/
-#define tx_dma_desc_tail_ptr_adr(descriptor) (0x00007c10u + (descriptor) * 0x40)
+#define HW_ATL_TX_DMA_DESC_TAIL_PTR_ADR(descriptor) \
+ (0x00007c10u + (descriptor) * 0x40)
/* rx dma_sys_loopback bitfield definitions
* preprocessor definitions for the bitfield "dma_sys_loopback".
@@ -492,17 +498,17 @@
*/
/* register address for bitfield dma_sys_loopback */
-#define rpb_dma_sys_lbk_adr 0x00005000
+#define HW_ATL_RPB_DMA_SYS_LBK_ADR 0x00005000
/* bitmask for bitfield dma_sys_loopback */
-#define rpb_dma_sys_lbk_msk 0x00000040
+#define HW_ATL_RPB_DMA_SYS_LBK_MSK 0x00000040
/* inverted bitmask for bitfield dma_sys_loopback */
-#define rpb_dma_sys_lbk_mskn 0xffffffbf
+#define HW_ATL_RPB_DMA_SYS_LBK_MSKN 0xffffffbf
/* lower bit position of bitfield dma_sys_loopback */
-#define rpb_dma_sys_lbk_shift 6
+#define HW_ATL_RPB_DMA_SYS_LBK_SHIFT 6
/* width of bitfield dma_sys_loopback */
-#define rpb_dma_sys_lbk_width 1
+#define HW_ATL_RPB_DMA_SYS_LBK_WIDTH 1
/* default value of bitfield dma_sys_loopback */
-#define rpb_dma_sys_lbk_default 0x0
+#define HW_ATL_RPB_DMA_SYS_LBK_DEFAULT 0x0
/* rx rx_tc_mode bitfield definitions
* preprocessor definitions for the bitfield "rx_tc_mode".
@@ -510,17 +516,17 @@
*/
/* register address for bitfield rx_tc_mode */
-#define rpb_rpf_rx_tc_mode_adr 0x00005700
+#define HW_ATL_RPB_RPF_RX_TC_MODE_ADR 0x00005700
/* bitmask for bitfield rx_tc_mode */
-#define rpb_rpf_rx_tc_mode_msk 0x00000100
+#define HW_ATL_RPB_RPF_RX_TC_MODE_MSK 0x00000100
/* inverted bitmask for bitfield rx_tc_mode */
-#define rpb_rpf_rx_tc_mode_mskn 0xfffffeff
+#define HW_ATL_RPB_RPF_RX_TC_MODE_MSKN 0xfffffeff
/* lower bit position of bitfield rx_tc_mode */
-#define rpb_rpf_rx_tc_mode_shift 8
+#define HW_ATL_RPB_RPF_RX_TC_MODE_SHIFT 8
/* width of bitfield rx_tc_mode */
-#define rpb_rpf_rx_tc_mode_width 1
+#define HW_ATL_RPB_RPF_RX_TC_MODE_WIDTH 1
/* default value of bitfield rx_tc_mode */
-#define rpb_rpf_rx_tc_mode_default 0x0
+#define HW_ATL_RPB_RPF_RX_TC_MODE_DEFAULT 0x0
/* rx rx_buf_en bitfield definitions
* preprocessor definitions for the bitfield "rx_buf_en".
@@ -528,17 +534,17 @@
*/
/* register address for bitfield rx_buf_en */
-#define rpb_rx_buf_en_adr 0x00005700
+#define HW_ATL_RPB_RX_BUF_EN_ADR 0x00005700
/* bitmask for bitfield rx_buf_en */
-#define rpb_rx_buf_en_msk 0x00000001
+#define HW_ATL_RPB_RX_BUF_EN_MSK 0x00000001
/* inverted bitmask for bitfield rx_buf_en */
-#define rpb_rx_buf_en_mskn 0xfffffffe
+#define HW_ATL_RPB_RX_BUF_EN_MSKN 0xfffffffe
/* lower bit position of bitfield rx_buf_en */
-#define rpb_rx_buf_en_shift 0
+#define HW_ATL_RPB_RX_BUF_EN_SHIFT 0
/* width of bitfield rx_buf_en */
-#define rpb_rx_buf_en_width 1
+#define HW_ATL_RPB_RX_BUF_EN_WIDTH 1
/* default value of bitfield rx_buf_en */
-#define rpb_rx_buf_en_default 0x0
+#define HW_ATL_RPB_RX_BUF_EN_DEFAULT 0x0
/* rx rx{b}_hi_thresh[d:0] bitfield definitions
* preprocessor definitions for the bitfield "rx{b}_hi_thresh[d:0]".
@@ -547,17 +553,17 @@
*/
/* register address for bitfield rx{b}_hi_thresh[d:0] */
-#define rpb_rxbhi_thresh_adr(buffer) (0x00005714 + (buffer) * 0x10)
+#define HW_ATL_RPB_RXBHI_THRESH_ADR(buffer) (0x00005714 + (buffer) * 0x10)
/* bitmask for bitfield rx{b}_hi_thresh[d:0] */
-#define rpb_rxbhi_thresh_msk 0x3fff0000
+#define HW_ATL_RPB_RXBHI_THRESH_MSK 0x3fff0000
/* inverted bitmask for bitfield rx{b}_hi_thresh[d:0] */
-#define rpb_rxbhi_thresh_mskn 0xc000ffff
+#define HW_ATL_RPB_RXBHI_THRESH_MSKN 0xc000ffff
/* lower bit position of bitfield rx{b}_hi_thresh[d:0] */
-#define rpb_rxbhi_thresh_shift 16
+#define HW_ATL_RPB_RXBHI_THRESH_SHIFT 16
/* width of bitfield rx{b}_hi_thresh[d:0] */
-#define rpb_rxbhi_thresh_width 14
+#define HW_ATL_RPB_RXBHI_THRESH_WIDTH 14
/* default value of bitfield rx{b}_hi_thresh[d:0] */
-#define rpb_rxbhi_thresh_default 0x0
+#define HW_ATL_RPB_RXBHI_THRESH_DEFAULT 0x0
/* rx rx{b}_lo_thresh[d:0] bitfield definitions
* preprocessor definitions for the bitfield "rx{b}_lo_thresh[d:0]".
@@ -566,17 +572,17 @@
*/
/* register address for bitfield rx{b}_lo_thresh[d:0] */
-#define rpb_rxblo_thresh_adr(buffer) (0x00005714 + (buffer) * 0x10)
+#define HW_ATL_RPB_RXBLO_THRESH_ADR(buffer) (0x00005714 + (buffer) * 0x10)
/* bitmask for bitfield rx{b}_lo_thresh[d:0] */
-#define rpb_rxblo_thresh_msk 0x00003fff
+#define HW_ATL_RPB_RXBLO_THRESH_MSK 0x00003fff
/* inverted bitmask for bitfield rx{b}_lo_thresh[d:0] */
-#define rpb_rxblo_thresh_mskn 0xffffc000
+#define HW_ATL_RPB_RXBLO_THRESH_MSKN 0xffffc000
/* lower bit position of bitfield rx{b}_lo_thresh[d:0] */
-#define rpb_rxblo_thresh_shift 0
+#define HW_ATL_RPB_RXBLO_THRESH_SHIFT 0
/* width of bitfield rx{b}_lo_thresh[d:0] */
-#define rpb_rxblo_thresh_width 14
+#define HW_ATL_RPB_RXBLO_THRESH_WIDTH 14
/* default value of bitfield rx{b}_lo_thresh[d:0] */
-#define rpb_rxblo_thresh_default 0x0
+#define HW_ATL_RPB_RXBLO_THRESH_DEFAULT 0x0
/* rx rx_fc_mode[1:0] bitfield definitions
* preprocessor definitions for the bitfield "rx_fc_mode[1:0]".
@@ -584,17 +590,17 @@
*/
/* register address for bitfield rx_fc_mode[1:0] */
-#define rpb_rx_fc_mode_adr 0x00005700
+#define HW_ATL_RPB_RX_FC_MODE_ADR 0x00005700
/* bitmask for bitfield rx_fc_mode[1:0] */
-#define rpb_rx_fc_mode_msk 0x00000030
+#define HW_ATL_RPB_RX_FC_MODE_MSK 0x00000030
/* inverted bitmask for bitfield rx_fc_mode[1:0] */
-#define rpb_rx_fc_mode_mskn 0xffffffcf
+#define HW_ATL_RPB_RX_FC_MODE_MSKN 0xffffffcf
/* lower bit position of bitfield rx_fc_mode[1:0] */
-#define rpb_rx_fc_mode_shift 4
+#define HW_ATL_RPB_RX_FC_MODE_SHIFT 4
/* width of bitfield rx_fc_mode[1:0] */
-#define rpb_rx_fc_mode_width 2
+#define HW_ATL_RPB_RX_FC_MODE_WIDTH 2
/* default value of bitfield rx_fc_mode[1:0] */
-#define rpb_rx_fc_mode_default 0x0
+#define HW_ATL_RPB_RX_FC_MODE_DEFAULT 0x0
/* rx rx{b}_buf_size[8:0] bitfield definitions
* preprocessor definitions for the bitfield "rx{b}_buf_size[8:0]".
@@ -603,17 +609,17 @@
*/
/* register address for bitfield rx{b}_buf_size[8:0] */
-#define rpb_rxbbuf_size_adr(buffer) (0x00005710 + (buffer) * 0x10)
+#define HW_ATL_RPB_RXBBUF_SIZE_ADR(buffer) (0x00005710 + (buffer) * 0x10)
/* bitmask for bitfield rx{b}_buf_size[8:0] */
-#define rpb_rxbbuf_size_msk 0x000001ff
+#define HW_ATL_RPB_RXBBUF_SIZE_MSK 0x000001ff
/* inverted bitmask for bitfield rx{b}_buf_size[8:0] */
-#define rpb_rxbbuf_size_mskn 0xfffffe00
+#define HW_ATL_RPB_RXBBUF_SIZE_MSKN 0xfffffe00
/* lower bit position of bitfield rx{b}_buf_size[8:0] */
-#define rpb_rxbbuf_size_shift 0
+#define HW_ATL_RPB_RXBBUF_SIZE_SHIFT 0
/* width of bitfield rx{b}_buf_size[8:0] */
-#define rpb_rxbbuf_size_width 9
+#define HW_ATL_RPB_RXBBUF_SIZE_WIDTH 9
/* default value of bitfield rx{b}_buf_size[8:0] */
-#define rpb_rxbbuf_size_default 0x0
+#define HW_ATL_RPB_RXBBUF_SIZE_DEFAULT 0x0
/* rx rx{b}_xoff_en bitfield definitions
* preprocessor definitions for the bitfield "rx{b}_xoff_en".
@@ -622,17 +628,17 @@
*/
/* register address for bitfield rx{b}_xoff_en */
-#define rpb_rxbxoff_en_adr(buffer) (0x00005714 + (buffer) * 0x10)
+#define HW_ATL_RPB_RXBXOFF_EN_ADR(buffer) (0x00005714 + (buffer) * 0x10)
/* bitmask for bitfield rx{b}_xoff_en */
-#define rpb_rxbxoff_en_msk 0x80000000
+#define HW_ATL_RPB_RXBXOFF_EN_MSK 0x80000000
/* inverted bitmask for bitfield rx{b}_xoff_en */
-#define rpb_rxbxoff_en_mskn 0x7fffffff
+#define HW_ATL_RPB_RXBXOFF_EN_MSKN 0x7fffffff
/* lower bit position of bitfield rx{b}_xoff_en */
-#define rpb_rxbxoff_en_shift 31
+#define HW_ATL_RPB_RXBXOFF_EN_SHIFT 31
/* width of bitfield rx{b}_xoff_en */
-#define rpb_rxbxoff_en_width 1
+#define HW_ATL_RPB_RXBXOFF_EN_WIDTH 1
/* default value of bitfield rx{b}_xoff_en */
-#define rpb_rxbxoff_en_default 0x0
+#define HW_ATL_RPB_RXBXOFF_EN_DEFAULT 0x0
/* rx l2_bc_thresh[f:0] bitfield definitions
* preprocessor definitions for the bitfield "l2_bc_thresh[f:0]".
@@ -640,17 +646,17 @@
*/
/* register address for bitfield l2_bc_thresh[f:0] */
-#define rpfl2bc_thresh_adr 0x00005100
+#define HW_ATL_RPFL2BC_THRESH_ADR 0x00005100
/* bitmask for bitfield l2_bc_thresh[f:0] */
-#define rpfl2bc_thresh_msk 0xffff0000
+#define HW_ATL_RPFL2BC_THRESH_MSK 0xffff0000
/* inverted bitmask for bitfield l2_bc_thresh[f:0] */
-#define rpfl2bc_thresh_mskn 0x0000ffff
+#define HW_ATL_RPFL2BC_THRESH_MSKN 0x0000ffff
/* lower bit position of bitfield l2_bc_thresh[f:0] */
-#define rpfl2bc_thresh_shift 16
+#define HW_ATL_RPFL2BC_THRESH_SHIFT 16
/* width of bitfield l2_bc_thresh[f:0] */
-#define rpfl2bc_thresh_width 16
+#define HW_ATL_RPFL2BC_THRESH_WIDTH 16
/* default value of bitfield l2_bc_thresh[f:0] */
-#define rpfl2bc_thresh_default 0x0
+#define HW_ATL_RPFL2BC_THRESH_DEFAULT 0x0
/* rx l2_bc_en bitfield definitions
* preprocessor definitions for the bitfield "l2_bc_en".
@@ -658,17 +664,17 @@
*/
/* register address for bitfield l2_bc_en */
-#define rpfl2bc_en_adr 0x00005100
+#define HW_ATL_RPFL2BC_EN_ADR 0x00005100
/* bitmask for bitfield l2_bc_en */
-#define rpfl2bc_en_msk 0x00000001
+#define HW_ATL_RPFL2BC_EN_MSK 0x00000001
/* inverted bitmask for bitfield l2_bc_en */
-#define rpfl2bc_en_mskn 0xfffffffe
+#define HW_ATL_RPFL2BC_EN_MSKN 0xfffffffe
/* lower bit position of bitfield l2_bc_en */
-#define rpfl2bc_en_shift 0
+#define HW_ATL_RPFL2BC_EN_SHIFT 0
/* width of bitfield l2_bc_en */
-#define rpfl2bc_en_width 1
+#define HW_ATL_RPFL2BC_EN_WIDTH 1
/* default value of bitfield l2_bc_en */
-#define rpfl2bc_en_default 0x0
+#define HW_ATL_RPFL2BC_EN_DEFAULT 0x0
/* rx l2_bc_act[2:0] bitfield definitions
* preprocessor definitions for the bitfield "l2_bc_act[2:0]".
@@ -676,17 +682,17 @@
*/
/* register address for bitfield l2_bc_act[2:0] */
-#define rpfl2bc_act_adr 0x00005100
+#define HW_ATL_RPFL2BC_ACT_ADR 0x00005100
/* bitmask for bitfield l2_bc_act[2:0] */
-#define rpfl2bc_act_msk 0x00007000
+#define HW_ATL_RPFL2BC_ACT_MSK 0x00007000
/* inverted bitmask for bitfield l2_bc_act[2:0] */
-#define rpfl2bc_act_mskn 0xffff8fff
+#define HW_ATL_RPFL2BC_ACT_MSKN 0xffff8fff
/* lower bit position of bitfield l2_bc_act[2:0] */
-#define rpfl2bc_act_shift 12
+#define HW_ATL_RPFL2BC_ACT_SHIFT 12
/* width of bitfield l2_bc_act[2:0] */
-#define rpfl2bc_act_width 3
+#define HW_ATL_RPFL2BC_ACT_WIDTH 3
/* default value of bitfield l2_bc_act[2:0] */
-#define rpfl2bc_act_default 0x0
+#define HW_ATL_RPFL2BC_ACT_DEFAULT 0x0
/* rx l2_mc_en{f} bitfield definitions
* preprocessor definitions for the bitfield "l2_mc_en{f}".
@@ -695,17 +701,17 @@
*/
/* register address for bitfield l2_mc_en{f} */
-#define rpfl2mc_enf_adr(filter) (0x00005250 + (filter) * 0x4)
+#define HW_ATL_RPFL2MC_ENF_ADR(filter) (0x00005250 + (filter) * 0x4)
/* bitmask for bitfield l2_mc_en{f} */
-#define rpfl2mc_enf_msk 0x80000000
+#define HW_ATL_RPFL2MC_ENF_MSK 0x80000000
/* inverted bitmask for bitfield l2_mc_en{f} */
-#define rpfl2mc_enf_mskn 0x7fffffff
+#define HW_ATL_RPFL2MC_ENF_MSKN 0x7fffffff
/* lower bit position of bitfield l2_mc_en{f} */
-#define rpfl2mc_enf_shift 31
+#define HW_ATL_RPFL2MC_ENF_SHIFT 31
/* width of bitfield l2_mc_en{f} */
-#define rpfl2mc_enf_width 1
+#define HW_ATL_RPFL2MC_ENF_WIDTH 1
/* default value of bitfield l2_mc_en{f} */
-#define rpfl2mc_enf_default 0x0
+#define HW_ATL_RPFL2MC_ENF_DEFAULT 0x0
/* rx l2_promis_mode bitfield definitions
* preprocessor definitions for the bitfield "l2_promis_mode".
@@ -713,17 +719,17 @@
*/
/* register address for bitfield l2_promis_mode */
-#define rpfl2promis_mode_adr 0x00005100
+#define HW_ATL_RPFL2PROMIS_MODE_ADR 0x00005100
/* bitmask for bitfield l2_promis_mode */
-#define rpfl2promis_mode_msk 0x00000008
+#define HW_ATL_RPFL2PROMIS_MODE_MSK 0x00000008
/* inverted bitmask for bitfield l2_promis_mode */
-#define rpfl2promis_mode_mskn 0xfffffff7
+#define HW_ATL_RPFL2PROMIS_MODE_MSKN 0xfffffff7
/* lower bit position of bitfield l2_promis_mode */
-#define rpfl2promis_mode_shift 3
+#define HW_ATL_RPFL2PROMIS_MODE_SHIFT 3
/* width of bitfield l2_promis_mode */
-#define rpfl2promis_mode_width 1
+#define HW_ATL_RPFL2PROMIS_MODE_WIDTH 1
/* default value of bitfield l2_promis_mode */
-#define rpfl2promis_mode_default 0x0
+#define HW_ATL_RPFL2PROMIS_MODE_DEFAULT 0x0
/* rx l2_uc_act{f}[2:0] bitfield definitions
* preprocessor definitions for the bitfield "l2_uc_act{f}[2:0]".
@@ -732,17 +738,17 @@
*/
/* register address for bitfield l2_uc_act{f}[2:0] */
-#define rpfl2uc_actf_adr(filter) (0x00005114 + (filter) * 0x8)
+#define HW_ATL_RPFL2UC_ACTF_ADR(filter) (0x00005114 + (filter) * 0x8)
/* bitmask for bitfield l2_uc_act{f}[2:0] */
-#define rpfl2uc_actf_msk 0x00070000
+#define HW_ATL_RPFL2UC_ACTF_MSK 0x00070000
/* inverted bitmask for bitfield l2_uc_act{f}[2:0] */
-#define rpfl2uc_actf_mskn 0xfff8ffff
+#define HW_ATL_RPFL2UC_ACTF_MSKN 0xfff8ffff
/* lower bit position of bitfield l2_uc_act{f}[2:0] */
-#define rpfl2uc_actf_shift 16
+#define HW_ATL_RPFL2UC_ACTF_SHIFT 16
/* width of bitfield l2_uc_act{f}[2:0] */
-#define rpfl2uc_actf_width 3
+#define HW_ATL_RPFL2UC_ACTF_WIDTH 3
/* default value of bitfield l2_uc_act{f}[2:0] */
-#define rpfl2uc_actf_default 0x0
+#define HW_ATL_RPFL2UC_ACTF_DEFAULT 0x0
/* rx l2_uc_en{f} bitfield definitions
* preprocessor definitions for the bitfield "l2_uc_en{f}".
@@ -751,26 +757,26 @@
*/
/* register address for bitfield l2_uc_en{f} */
-#define rpfl2uc_enf_adr(filter) (0x00005114 + (filter) * 0x8)
+#define HW_ATL_RPFL2UC_ENF_ADR(filter) (0x00005114 + (filter) * 0x8)
/* bitmask for bitfield l2_uc_en{f} */
-#define rpfl2uc_enf_msk 0x80000000
+#define HW_ATL_RPFL2UC_ENF_MSK 0x80000000
/* inverted bitmask for bitfield l2_uc_en{f} */
-#define rpfl2uc_enf_mskn 0x7fffffff
+#define HW_ATL_RPFL2UC_ENF_MSKN 0x7fffffff
/* lower bit position of bitfield l2_uc_en{f} */
-#define rpfl2uc_enf_shift 31
+#define HW_ATL_RPFL2UC_ENF_SHIFT 31
/* width of bitfield l2_uc_en{f} */
-#define rpfl2uc_enf_width 1
+#define HW_ATL_RPFL2UC_ENF_WIDTH 1
/* default value of bitfield l2_uc_en{f} */
-#define rpfl2uc_enf_default 0x0
+#define HW_ATL_RPFL2UC_ENF_DEFAULT 0x0
/* register address for bitfield l2_uc_da{f}_lsw[1f:0] */
-#define rpfl2uc_daflsw_adr(filter) (0x00005110 + (filter) * 0x8)
+#define HW_ATL_RPFL2UC_DAFLSW_ADR(filter) (0x00005110 + (filter) * 0x8)
/* register address for bitfield l2_uc_da{f}_msw[f:0] */
-#define rpfl2uc_dafmsw_adr(filter) (0x00005114 + (filter) * 0x8)
+#define HW_ATL_RPFL2UC_DAFMSW_ADR(filter) (0x00005114 + (filter) * 0x8)
/* bitmask for bitfield l2_uc_da{f}_msw[f:0] */
-#define rpfl2uc_dafmsw_msk 0x0000ffff
+#define HW_ATL_RPFL2UC_DAFMSW_MSK 0x0000ffff
/* lower bit position of bitfield l2_uc_da{f}_msw[f:0] */
-#define rpfl2uc_dafmsw_shift 0
+#define HW_ATL_RPFL2UC_DAFMSW_SHIFT 0
/* rx l2_mc_accept_all bitfield definitions
* Preprocessor definitions for the bitfield "l2_mc_accept_all".
@@ -778,22 +784,22 @@
*/
/* Register address for bitfield l2_mc_accept_all */
-#define rpfl2mc_accept_all_adr 0x00005270
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_ADR 0x00005270
/* Bitmask for bitfield l2_mc_accept_all */
-#define rpfl2mc_accept_all_msk 0x00004000
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_MSK 0x00004000
/* Inverted bitmask for bitfield l2_mc_accept_all */
-#define rpfl2mc_accept_all_mskn 0xFFFFBFFF
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_MSKN 0xFFFFBFFF
/* Lower bit position of bitfield l2_mc_accept_all */
-#define rpfl2mc_accept_all_shift 14
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_SHIFT 14
/* Width of bitfield l2_mc_accept_all */
-#define rpfl2mc_accept_all_width 1
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_WIDTH 1
/* Default value of bitfield l2_mc_accept_all */
-#define rpfl2mc_accept_all_default 0x0
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_DEFAULT 0x0
/* width of bitfield rx_tc_up{t}[2:0] */
-#define rpf_rpb_rx_tc_upt_width 3
+#define HW_ATL_RPF_RPB_RX_TC_UPT_WIDTH 3
/* default value of bitfield rx_tc_up{t}[2:0] */
-#define rpf_rpb_rx_tc_upt_default 0x0
+#define HW_ATL_RPF_RPB_RX_TC_UPT_DEFAULT 0x0
/* rx rss_key_addr[4:0] bitfield definitions
* preprocessor definitions for the bitfield "rss_key_addr[4:0]".
@@ -801,17 +807,17 @@
*/
/* register address for bitfield rss_key_addr[4:0] */
-#define rpf_rss_key_addr_adr 0x000054d0
+#define HW_ATL_RPF_RSS_KEY_ADDR_ADR 0x000054d0
/* bitmask for bitfield rss_key_addr[4:0] */
-#define rpf_rss_key_addr_msk 0x0000001f
+#define HW_ATL_RPF_RSS_KEY_ADDR_MSK 0x0000001f
/* inverted bitmask for bitfield rss_key_addr[4:0] */
-#define rpf_rss_key_addr_mskn 0xffffffe0
+#define HW_ATL_RPF_RSS_KEY_ADDR_MSKN 0xffffffe0
/* lower bit position of bitfield rss_key_addr[4:0] */
-#define rpf_rss_key_addr_shift 0
+#define HW_ATL_RPF_RSS_KEY_ADDR_SHIFT 0
/* width of bitfield rss_key_addr[4:0] */
-#define rpf_rss_key_addr_width 5
+#define HW_ATL_RPF_RSS_KEY_ADDR_WIDTH 5
/* default value of bitfield rss_key_addr[4:0] */
-#define rpf_rss_key_addr_default 0x0
+#define HW_ATL_RPF_RSS_KEY_ADDR_DEFAULT 0x0
/* rx rss_key_wr_data[1f:0] bitfield definitions
* preprocessor definitions for the bitfield "rss_key_wr_data[1f:0]".
@@ -819,17 +825,17 @@
*/
/* register address for bitfield rss_key_wr_data[1f:0] */
-#define rpf_rss_key_wr_data_adr 0x000054d4
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_ADR 0x000054d4
/* bitmask for bitfield rss_key_wr_data[1f:0] */
-#define rpf_rss_key_wr_data_msk 0xffffffff
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_MSK 0xffffffff
/* inverted bitmask for bitfield rss_key_wr_data[1f:0] */
-#define rpf_rss_key_wr_data_mskn 0x00000000
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_MSKN 0x00000000
/* lower bit position of bitfield rss_key_wr_data[1f:0] */
-#define rpf_rss_key_wr_data_shift 0
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_SHIFT 0
/* width of bitfield rss_key_wr_data[1f:0] */
-#define rpf_rss_key_wr_data_width 32
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_WIDTH 32
/* default value of bitfield rss_key_wr_data[1f:0] */
-#define rpf_rss_key_wr_data_default 0x0
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_DEFAULT 0x0
/* rx rss_key_wr_en_i bitfield definitions
* preprocessor definitions for the bitfield "rss_key_wr_en_i".
@@ -837,17 +843,17 @@
*/
/* register address for bitfield rss_key_wr_en_i */
-#define rpf_rss_key_wr_eni_adr 0x000054d0
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_ADR 0x000054d0
/* bitmask for bitfield rss_key_wr_en_i */
-#define rpf_rss_key_wr_eni_msk 0x00000020
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_MSK 0x00000020
/* inverted bitmask for bitfield rss_key_wr_en_i */
-#define rpf_rss_key_wr_eni_mskn 0xffffffdf
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_MSKN 0xffffffdf
/* lower bit position of bitfield rss_key_wr_en_i */
-#define rpf_rss_key_wr_eni_shift 5
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_SHIFT 5
/* width of bitfield rss_key_wr_en_i */
-#define rpf_rss_key_wr_eni_width 1
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_WIDTH 1
/* default value of bitfield rss_key_wr_en_i */
-#define rpf_rss_key_wr_eni_default 0x0
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_DEFAULT 0x0
/* rx rss_redir_addr[3:0] bitfield definitions
* preprocessor definitions for the bitfield "rss_redir_addr[3:0]".
@@ -855,17 +861,17 @@
*/
/* register address for bitfield rss_redir_addr[3:0] */
-#define rpf_rss_redir_addr_adr 0x000054e0
+#define HW_ATL_RPF_RSS_REDIR_ADDR_ADR 0x000054e0
/* bitmask for bitfield rss_redir_addr[3:0] */
-#define rpf_rss_redir_addr_msk 0x0000000f
+#define HW_ATL_RPF_RSS_REDIR_ADDR_MSK 0x0000000f
/* inverted bitmask for bitfield rss_redir_addr[3:0] */
-#define rpf_rss_redir_addr_mskn 0xfffffff0
+#define HW_ATL_RPF_RSS_REDIR_ADDR_MSKN 0xfffffff0
/* lower bit position of bitfield rss_redir_addr[3:0] */
-#define rpf_rss_redir_addr_shift 0
+#define HW_ATL_RPF_RSS_REDIR_ADDR_SHIFT 0
/* width of bitfield rss_redir_addr[3:0] */
-#define rpf_rss_redir_addr_width 4
+#define HW_ATL_RPF_RSS_REDIR_ADDR_WIDTH 4
/* default value of bitfield rss_redir_addr[3:0] */
-#define rpf_rss_redir_addr_default 0x0
+#define HW_ATL_RPF_RSS_REDIR_ADDR_DEFAULT 0x0
/* rx rss_redir_wr_data[f:0] bitfield definitions
* preprocessor definitions for the bitfield "rss_redir_wr_data[f:0]".
@@ -873,17 +879,17 @@
*/
/* register address for bitfield rss_redir_wr_data[f:0] */
-#define rpf_rss_redir_wr_data_adr 0x000054e4
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_ADR 0x000054e4
/* bitmask for bitfield rss_redir_wr_data[f:0] */
-#define rpf_rss_redir_wr_data_msk 0x0000ffff
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_MSK 0x0000ffff
/* inverted bitmask for bitfield rss_redir_wr_data[f:0] */
-#define rpf_rss_redir_wr_data_mskn 0xffff0000
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_MSKN 0xffff0000
/* lower bit position of bitfield rss_redir_wr_data[f:0] */
-#define rpf_rss_redir_wr_data_shift 0
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_SHIFT 0
/* width of bitfield rss_redir_wr_data[f:0] */
-#define rpf_rss_redir_wr_data_width 16
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_WIDTH 16
/* default value of bitfield rss_redir_wr_data[f:0] */
-#define rpf_rss_redir_wr_data_default 0x0
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_DEFAULT 0x0
/* rx rss_redir_wr_en_i bitfield definitions
* preprocessor definitions for the bitfield "rss_redir_wr_en_i".
@@ -891,17 +897,17 @@
*/
/* register address for bitfield rss_redir_wr_en_i */
-#define rpf_rss_redir_wr_eni_adr 0x000054e0
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_ADR 0x000054e0
/* bitmask for bitfield rss_redir_wr_en_i */
-#define rpf_rss_redir_wr_eni_msk 0x00000010
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_MSK 0x00000010
/* inverted bitmask for bitfield rss_redir_wr_en_i */
-#define rpf_rss_redir_wr_eni_mskn 0xffffffef
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_MSKN 0xffffffef
/* lower bit position of bitfield rss_redir_wr_en_i */
-#define rpf_rss_redir_wr_eni_shift 4
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_SHIFT 4
/* width of bitfield rss_redir_wr_en_i */
-#define rpf_rss_redir_wr_eni_width 1
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_WIDTH 1
/* default value of bitfield rss_redir_wr_en_i */
-#define rpf_rss_redir_wr_eni_default 0x0
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_DEFAULT 0x0
/* rx tpo_rpf_sys_loopback bitfield definitions
* preprocessor definitions for the bitfield "tpo_rpf_sys_loopback".
@@ -909,17 +915,17 @@
*/
/* register address for bitfield tpo_rpf_sys_loopback */
-#define rpf_tpo_rpf_sys_lbk_adr 0x00005000
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_ADR 0x00005000
/* bitmask for bitfield tpo_rpf_sys_loopback */
-#define rpf_tpo_rpf_sys_lbk_msk 0x00000100
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_MSK 0x00000100
/* inverted bitmask for bitfield tpo_rpf_sys_loopback */
-#define rpf_tpo_rpf_sys_lbk_mskn 0xfffffeff
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_MSKN 0xfffffeff
/* lower bit position of bitfield tpo_rpf_sys_loopback */
-#define rpf_tpo_rpf_sys_lbk_shift 8
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_SHIFT 8
/* width of bitfield tpo_rpf_sys_loopback */
-#define rpf_tpo_rpf_sys_lbk_width 1
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_WIDTH 1
/* default value of bitfield tpo_rpf_sys_loopback */
-#define rpf_tpo_rpf_sys_lbk_default 0x0
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_DEFAULT 0x0
/* rx vl_inner_tpid[f:0] bitfield definitions
* preprocessor definitions for the bitfield "vl_inner_tpid[f:0]".
@@ -927,17 +933,17 @@
*/
/* register address for bitfield vl_inner_tpid[f:0] */
-#define rpf_vl_inner_tpid_adr 0x00005284
+#define HW_ATL_RPF_VL_INNER_TPID_ADR 0x00005284
/* bitmask for bitfield vl_inner_tpid[f:0] */
-#define rpf_vl_inner_tpid_msk 0x0000ffff
+#define HW_ATL_RPF_VL_INNER_TPID_MSK 0x0000ffff
/* inverted bitmask for bitfield vl_inner_tpid[f:0] */
-#define rpf_vl_inner_tpid_mskn 0xffff0000
+#define HW_ATL_RPF_VL_INNER_TPID_MSKN 0xffff0000
/* lower bit position of bitfield vl_inner_tpid[f:0] */
-#define rpf_vl_inner_tpid_shift 0
+#define HW_ATL_RPF_VL_INNER_TPID_SHIFT 0
/* width of bitfield vl_inner_tpid[f:0] */
-#define rpf_vl_inner_tpid_width 16
+#define HW_ATL_RPF_VL_INNER_TPID_WIDTH 16
/* default value of bitfield vl_inner_tpid[f:0] */
-#define rpf_vl_inner_tpid_default 0x8100
+#define HW_ATL_RPF_VL_INNER_TPID_DEFAULT 0x8100
/* rx vl_outer_tpid[f:0] bitfield definitions
* preprocessor definitions for the bitfield "vl_outer_tpid[f:0]".
@@ -945,17 +951,17 @@
*/
/* register address for bitfield vl_outer_tpid[f:0] */
-#define rpf_vl_outer_tpid_adr 0x00005284
+#define HW_ATL_RPF_VL_OUTER_TPID_ADR 0x00005284
/* bitmask for bitfield vl_outer_tpid[f:0] */
-#define rpf_vl_outer_tpid_msk 0xffff0000
+#define HW_ATL_RPF_VL_OUTER_TPID_MSK 0xffff0000
/* inverted bitmask for bitfield vl_outer_tpid[f:0] */
-#define rpf_vl_outer_tpid_mskn 0x0000ffff
+#define HW_ATL_RPF_VL_OUTER_TPID_MSKN 0x0000ffff
/* lower bit position of bitfield vl_outer_tpid[f:0] */
-#define rpf_vl_outer_tpid_shift 16
+#define HW_ATL_RPF_VL_OUTER_TPID_SHIFT 16
/* width of bitfield vl_outer_tpid[f:0] */
-#define rpf_vl_outer_tpid_width 16
+#define HW_ATL_RPF_VL_OUTER_TPID_WIDTH 16
/* default value of bitfield vl_outer_tpid[f:0] */
-#define rpf_vl_outer_tpid_default 0x88a8
+#define HW_ATL_RPF_VL_OUTER_TPID_DEFAULT 0x88a8
/* rx vl_promis_mode bitfield definitions
* preprocessor definitions for the bitfield "vl_promis_mode".
@@ -963,17 +969,17 @@
*/
/* register address for bitfield vl_promis_mode */
-#define rpf_vl_promis_mode_adr 0x00005280
+#define HW_ATL_RPF_VL_PROMIS_MODE_ADR 0x00005280
/* bitmask for bitfield vl_promis_mode */
-#define rpf_vl_promis_mode_msk 0x00000002
+#define HW_ATL_RPF_VL_PROMIS_MODE_MSK 0x00000002
/* inverted bitmask for bitfield vl_promis_mode */
-#define rpf_vl_promis_mode_mskn 0xfffffffd
+#define HW_ATL_RPF_VL_PROMIS_MODE_MSKN 0xfffffffd
/* lower bit position of bitfield vl_promis_mode */
-#define rpf_vl_promis_mode_shift 1
+#define HW_ATL_RPF_VL_PROMIS_MODE_SHIFT 1
/* width of bitfield vl_promis_mode */
-#define rpf_vl_promis_mode_width 1
+#define HW_ATL_RPF_VL_PROMIS_MODE_WIDTH 1
/* default value of bitfield vl_promis_mode */
-#define rpf_vl_promis_mode_default 0x0
+#define HW_ATL_RPF_VL_PROMIS_MODE_DEFAULT 0x0
/* RX vl_accept_untagged_mode Bitfield Definitions
* Preprocessor definitions for the bitfield "vl_accept_untagged_mode".
@@ -981,17 +987,17 @@
*/
/* Register address for bitfield vl_accept_untagged_mode */
-#define rpf_vl_accept_untagged_mode_adr 0x00005280
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_ADR 0x00005280
/* Bitmask for bitfield vl_accept_untagged_mode */
-#define rpf_vl_accept_untagged_mode_msk 0x00000004
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_MSK 0x00000004
/* Inverted bitmask for bitfield vl_accept_untagged_mode */
-#define rpf_vl_accept_untagged_mode_mskn 0xFFFFFFFB
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_MSKN 0xFFFFFFFB
/* Lower bit position of bitfield vl_accept_untagged_mode */
-#define rpf_vl_accept_untagged_mode_shift 2
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_SHIFT 2
/* Width of bitfield vl_accept_untagged_mode */
-#define rpf_vl_accept_untagged_mode_width 1
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_WIDTH 1
/* Default value of bitfield vl_accept_untagged_mode */
-#define rpf_vl_accept_untagged_mode_default 0x0
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_DEFAULT 0x0
/* rX vl_untagged_act[2:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "vl_untagged_act[2:0]".
@@ -999,17 +1005,17 @@
*/
/* Register address for bitfield vl_untagged_act[2:0] */
-#define rpf_vl_untagged_act_adr 0x00005280
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_ADR 0x00005280
/* Bitmask for bitfield vl_untagged_act[2:0] */
-#define rpf_vl_untagged_act_msk 0x00000038
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_MSK 0x00000038
/* Inverted bitmask for bitfield vl_untagged_act[2:0] */
-#define rpf_vl_untagged_act_mskn 0xFFFFFFC7
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_MSKN 0xFFFFFFC7
/* Lower bit position of bitfield vl_untagged_act[2:0] */
-#define rpf_vl_untagged_act_shift 3
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_SHIFT 3
/* Width of bitfield vl_untagged_act[2:0] */
-#define rpf_vl_untagged_act_width 3
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_WIDTH 3
/* Default value of bitfield vl_untagged_act[2:0] */
-#define rpf_vl_untagged_act_default 0x0
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_DEFAULT 0x0
/* RX vl_en{F} Bitfield Definitions
* Preprocessor definitions for the bitfield "vl_en{F}".
@@ -1018,17 +1024,17 @@
*/
/* Register address for bitfield vl_en{F} */
-#define rpf_vl_en_f_adr(filter) (0x00005290 + (filter) * 0x4)
+#define HW_ATL_RPF_VL_EN_F_ADR(filter) (0x00005290 + (filter) * 0x4)
/* Bitmask for bitfield vl_en{F} */
-#define rpf_vl_en_f_msk 0x80000000
+#define HW_ATL_RPF_VL_EN_F_MSK 0x80000000
/* Inverted bitmask for bitfield vl_en{F} */
-#define rpf_vl_en_f_mskn 0x7FFFFFFF
+#define HW_ATL_RPF_VL_EN_F_MSKN 0x7FFFFFFF
/* Lower bit position of bitfield vl_en{F} */
-#define rpf_vl_en_f_shift 31
+#define HW_ATL_RPF_VL_EN_F_SHIFT 31
/* Width of bitfield vl_en{F} */
-#define rpf_vl_en_f_width 1
+#define HW_ATL_RPF_VL_EN_F_WIDTH 1
/* Default value of bitfield vl_en{F} */
-#define rpf_vl_en_f_default 0x0
+#define HW_ATL_RPF_VL_EN_F_DEFAULT 0x0
/* RX vl_act{F}[2:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "vl_act{F}[2:0]".
@@ -1037,17 +1043,17 @@
*/
/* Register address for bitfield vl_act{F}[2:0] */
-#define rpf_vl_act_f_adr(filter) (0x00005290 + (filter) * 0x4)
+#define HW_ATL_RPF_VL_ACT_F_ADR(filter) (0x00005290 + (filter) * 0x4)
/* Bitmask for bitfield vl_act{F}[2:0] */
-#define rpf_vl_act_f_msk 0x00070000
+#define HW_ATL_RPF_VL_ACT_F_MSK 0x00070000
/* Inverted bitmask for bitfield vl_act{F}[2:0] */
-#define rpf_vl_act_f_mskn 0xFFF8FFFF
+#define HW_ATL_RPF_VL_ACT_F_MSKN 0xFFF8FFFF
/* Lower bit position of bitfield vl_act{F}[2:0] */
-#define rpf_vl_act_f_shift 16
+#define HW_ATL_RPF_VL_ACT_F_SHIFT 16
/* Width of bitfield vl_act{F}[2:0] */
-#define rpf_vl_act_f_width 3
+#define HW_ATL_RPF_VL_ACT_F_WIDTH 3
/* Default value of bitfield vl_act{F}[2:0] */
-#define rpf_vl_act_f_default 0x0
+#define HW_ATL_RPF_VL_ACT_F_DEFAULT 0x0
/* RX vl_id{F}[B:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "vl_id{F}[B:0]".
@@ -1056,17 +1062,17 @@
*/
/* Register address for bitfield vl_id{F}[B:0] */
-#define rpf_vl_id_f_adr(filter) (0x00005290 + (filter) * 0x4)
+#define HW_ATL_RPF_VL_ID_F_ADR(filter) (0x00005290 + (filter) * 0x4)
/* Bitmask for bitfield vl_id{F}[B:0] */
-#define rpf_vl_id_f_msk 0x00000FFF
+#define HW_ATL_RPF_VL_ID_F_MSK 0x00000FFF
/* Inverted bitmask for bitfield vl_id{F}[B:0] */
-#define rpf_vl_id_f_mskn 0xFFFFF000
+#define HW_ATL_RPF_VL_ID_F_MSKN 0xFFFFF000
/* Lower bit position of bitfield vl_id{F}[B:0] */
-#define rpf_vl_id_f_shift 0
+#define HW_ATL_RPF_VL_ID_F_SHIFT 0
/* Width of bitfield vl_id{F}[B:0] */
-#define rpf_vl_id_f_width 12
+#define HW_ATL_RPF_VL_ID_F_WIDTH 12
/* Default value of bitfield vl_id{F}[B:0] */
-#define rpf_vl_id_f_default 0x0
+#define HW_ATL_RPF_VL_ID_F_DEFAULT 0x0
/* RX et_en{F} Bitfield Definitions
* Preprocessor definitions for the bitfield "et_en{F}".
@@ -1075,17 +1081,17 @@
*/
/* Register address for bitfield et_en{F} */
-#define rpf_et_en_f_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_EN_F_ADR(filter) (0x00005300 + (filter) * 0x4)
/* Bitmask for bitfield et_en{F} */
-#define rpf_et_en_f_msk 0x80000000
+#define HW_ATL_RPF_ET_EN_F_MSK 0x80000000
/* Inverted bitmask for bitfield et_en{F} */
-#define rpf_et_en_f_mskn 0x7FFFFFFF
+#define HW_ATL_RPF_ET_EN_F_MSKN 0x7FFFFFFF
/* Lower bit position of bitfield et_en{F} */
-#define rpf_et_en_f_shift 31
+#define HW_ATL_RPF_ET_EN_F_SHIFT 31
/* Width of bitfield et_en{F} */
-#define rpf_et_en_f_width 1
+#define HW_ATL_RPF_ET_EN_F_WIDTH 1
/* Default value of bitfield et_en{F} */
-#define rpf_et_en_f_default 0x0
+#define HW_ATL_RPF_ET_EN_F_DEFAULT 0x0
/* rx et_en{f} bitfield definitions
* preprocessor definitions for the bitfield "et_en{f}".
@@ -1094,17 +1100,17 @@
*/
/* register address for bitfield et_en{f} */
-#define rpf_et_enf_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_ENF_ADR(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_en{f} */
-#define rpf_et_enf_msk 0x80000000
+#define HW_ATL_RPF_ET_ENF_MSK 0x80000000
/* inverted bitmask for bitfield et_en{f} */
-#define rpf_et_enf_mskn 0x7fffffff
+#define HW_ATL_RPF_ET_ENF_MSKN 0x7fffffff
/* lower bit position of bitfield et_en{f} */
-#define rpf_et_enf_shift 31
+#define HW_ATL_RPF_ET_ENF_SHIFT 31
/* width of bitfield et_en{f} */
-#define rpf_et_enf_width 1
+#define HW_ATL_RPF_ET_ENF_WIDTH 1
/* default value of bitfield et_en{f} */
-#define rpf_et_enf_default 0x0
+#define HW_ATL_RPF_ET_ENF_DEFAULT 0x0
/* rx et_up{f}_en bitfield definitions
* preprocessor definitions for the bitfield "et_up{f}_en".
@@ -1113,17 +1119,17 @@
*/
/* register address for bitfield et_up{f}_en */
-#define rpf_et_upfen_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_UPFEN_ADR(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_up{f}_en */
-#define rpf_et_upfen_msk 0x40000000
+#define HW_ATL_RPF_ET_UPFEN_MSK 0x40000000
/* inverted bitmask for bitfield et_up{f}_en */
-#define rpf_et_upfen_mskn 0xbfffffff
+#define HW_ATL_RPF_ET_UPFEN_MSKN 0xbfffffff
/* lower bit position of bitfield et_up{f}_en */
-#define rpf_et_upfen_shift 30
+#define HW_ATL_RPF_ET_UPFEN_SHIFT 30
/* width of bitfield et_up{f}_en */
-#define rpf_et_upfen_width 1
+#define HW_ATL_RPF_ET_UPFEN_WIDTH 1
/* default value of bitfield et_up{f}_en */
-#define rpf_et_upfen_default 0x0
+#define HW_ATL_RPF_ET_UPFEN_DEFAULT 0x0
/* rx et_rxq{f}_en bitfield definitions
* preprocessor definitions for the bitfield "et_rxq{f}_en".
@@ -1132,17 +1138,17 @@
*/
/* register address for bitfield et_rxq{f}_en */
-#define rpf_et_rxqfen_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_RXQFEN_ADR(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_rxq{f}_en */
-#define rpf_et_rxqfen_msk 0x20000000
+#define HW_ATL_RPF_ET_RXQFEN_MSK 0x20000000
/* inverted bitmask for bitfield et_rxq{f}_en */
-#define rpf_et_rxqfen_mskn 0xdfffffff
+#define HW_ATL_RPF_ET_RXQFEN_MSKN 0xdfffffff
/* lower bit position of bitfield et_rxq{f}_en */
-#define rpf_et_rxqfen_shift 29
+#define HW_ATL_RPF_ET_RXQFEN_SHIFT 29
/* width of bitfield et_rxq{f}_en */
-#define rpf_et_rxqfen_width 1
+#define HW_ATL_RPF_ET_RXQFEN_WIDTH 1
/* default value of bitfield et_rxq{f}_en */
-#define rpf_et_rxqfen_default 0x0
+#define HW_ATL_RPF_ET_RXQFEN_DEFAULT 0x0
/* rx et_up{f}[2:0] bitfield definitions
* preprocessor definitions for the bitfield "et_up{f}[2:0]".
@@ -1151,17 +1157,17 @@
*/
/* register address for bitfield et_up{f}[2:0] */
-#define rpf_et_upf_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_UPF_ADR(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_up{f}[2:0] */
-#define rpf_et_upf_msk 0x1c000000
+#define HW_ATL_RPF_ET_UPF_MSK 0x1c000000
/* inverted bitmask for bitfield et_up{f}[2:0] */
-#define rpf_et_upf_mskn 0xe3ffffff
+#define HW_ATL_RPF_ET_UPF_MSKN 0xe3ffffff
/* lower bit position of bitfield et_up{f}[2:0] */
-#define rpf_et_upf_shift 26
+#define HW_ATL_RPF_ET_UPF_SHIFT 26
/* width of bitfield et_up{f}[2:0] */
-#define rpf_et_upf_width 3
+#define HW_ATL_RPF_ET_UPF_WIDTH 3
/* default value of bitfield et_up{f}[2:0] */
-#define rpf_et_upf_default 0x0
+#define HW_ATL_RPF_ET_UPF_DEFAULT 0x0
/* rx et_rxq{f}[4:0] bitfield definitions
* preprocessor definitions for the bitfield "et_rxq{f}[4:0]".
@@ -1170,17 +1176,17 @@
*/
/* register address for bitfield et_rxq{f}[4:0] */
-#define rpf_et_rxqf_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_RXQF_ADR(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_rxq{f}[4:0] */
-#define rpf_et_rxqf_msk 0x01f00000
+#define HW_ATL_RPF_ET_RXQF_MSK 0x01f00000
/* inverted bitmask for bitfield et_rxq{f}[4:0] */
-#define rpf_et_rxqf_mskn 0xfe0fffff
+#define HW_ATL_RPF_ET_RXQF_MSKN 0xfe0fffff
/* lower bit position of bitfield et_rxq{f}[4:0] */
-#define rpf_et_rxqf_shift 20
+#define HW_ATL_RPF_ET_RXQF_SHIFT 20
/* width of bitfield et_rxq{f}[4:0] */
-#define rpf_et_rxqf_width 5
+#define HW_ATL_RPF_ET_RXQF_WIDTH 5
/* default value of bitfield et_rxq{f}[4:0] */
-#define rpf_et_rxqf_default 0x0
+#define HW_ATL_RPF_ET_RXQF_DEFAULT 0x0
/* rx et_mng_rxq{f} bitfield definitions
* preprocessor definitions for the bitfield "et_mng_rxq{f}".
@@ -1189,17 +1195,17 @@
*/
/* register address for bitfield et_mng_rxq{f} */
-#define rpf_et_mng_rxqf_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_MNG_RXQF_ADR(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_mng_rxq{f} */
-#define rpf_et_mng_rxqf_msk 0x00080000
+#define HW_ATL_RPF_ET_MNG_RXQF_MSK 0x00080000
/* inverted bitmask for bitfield et_mng_rxq{f} */
-#define rpf_et_mng_rxqf_mskn 0xfff7ffff
+#define HW_ATL_RPF_ET_MNG_RXQF_MSKN 0xfff7ffff
/* lower bit position of bitfield et_mng_rxq{f} */
-#define rpf_et_mng_rxqf_shift 19
+#define HW_ATL_RPF_ET_MNG_RXQF_SHIFT 19
/* width of bitfield et_mng_rxq{f} */
-#define rpf_et_mng_rxqf_width 1
+#define HW_ATL_RPF_ET_MNG_RXQF_WIDTH 1
/* default value of bitfield et_mng_rxq{f} */
-#define rpf_et_mng_rxqf_default 0x0
+#define HW_ATL_RPF_ET_MNG_RXQF_DEFAULT 0x0
/* rx et_act{f}[2:0] bitfield definitions
* preprocessor definitions for the bitfield "et_act{f}[2:0]".
@@ -1208,17 +1214,17 @@
*/
/* register address for bitfield et_act{f}[2:0] */
-#define rpf_et_actf_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_ACTF_ADR(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_act{f}[2:0] */
-#define rpf_et_actf_msk 0x00070000
+#define HW_ATL_RPF_ET_ACTF_MSK 0x00070000
/* inverted bitmask for bitfield et_act{f}[2:0] */
-#define rpf_et_actf_mskn 0xfff8ffff
+#define HW_ATL_RPF_ET_ACTF_MSKN 0xfff8ffff
/* lower bit position of bitfield et_act{f}[2:0] */
-#define rpf_et_actf_shift 16
+#define HW_ATL_RPF_ET_ACTF_SHIFT 16
/* width of bitfield et_act{f}[2:0] */
-#define rpf_et_actf_width 3
+#define HW_ATL_RPF_ET_ACTF_WIDTH 3
/* default value of bitfield et_act{f}[2:0] */
-#define rpf_et_actf_default 0x0
+#define HW_ATL_RPF_ET_ACTF_DEFAULT 0x0
/* rx et_val{f}[f:0] bitfield definitions
* preprocessor definitions for the bitfield "et_val{f}[f:0]".
@@ -1227,17 +1233,17 @@
*/
/* register address for bitfield et_val{f}[f:0] */
-#define rpf_et_valf_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_VALF_ADR(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_val{f}[f:0] */
-#define rpf_et_valf_msk 0x0000ffff
+#define HW_ATL_RPF_ET_VALF_MSK 0x0000ffff
/* inverted bitmask for bitfield et_val{f}[f:0] */
-#define rpf_et_valf_mskn 0xffff0000
+#define HW_ATL_RPF_ET_VALF_MSKN 0xffff0000
/* lower bit position of bitfield et_val{f}[f:0] */
-#define rpf_et_valf_shift 0
+#define HW_ATL_RPF_ET_VALF_SHIFT 0
/* width of bitfield et_val{f}[f:0] */
-#define rpf_et_valf_width 16
+#define HW_ATL_RPF_ET_VALF_WIDTH 16
/* default value of bitfield et_val{f}[f:0] */
-#define rpf_et_valf_default 0x0
+#define HW_ATL_RPF_ET_VALF_DEFAULT 0x0
/* rx ipv4_chk_en bitfield definitions
* preprocessor definitions for the bitfield "ipv4_chk_en".
@@ -1245,17 +1251,17 @@
*/
/* register address for bitfield ipv4_chk_en */
-#define rpo_ipv4chk_en_adr 0x00005580
+#define HW_ATL_RPO_IPV4CHK_EN_ADR 0x00005580
/* bitmask for bitfield ipv4_chk_en */
-#define rpo_ipv4chk_en_msk 0x00000002
+#define HW_ATL_RPO_IPV4CHK_EN_MSK 0x00000002
/* inverted bitmask for bitfield ipv4_chk_en */
-#define rpo_ipv4chk_en_mskn 0xfffffffd
+#define HW_ATL_RPO_IPV4CHK_EN_MSKN 0xfffffffd
/* lower bit position of bitfield ipv4_chk_en */
-#define rpo_ipv4chk_en_shift 1
+#define HW_ATL_RPO_IPV4CHK_EN_SHIFT 1
/* width of bitfield ipv4_chk_en */
-#define rpo_ipv4chk_en_width 1
+#define HW_ATL_RPO_IPV4CHK_EN_WIDTH 1
/* default value of bitfield ipv4_chk_en */
-#define rpo_ipv4chk_en_default 0x0
+#define HW_ATL_RPO_IPV4CHK_EN_DEFAULT 0x0
/* rx desc{d}_vl_strip bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_vl_strip".
@@ -1264,17 +1270,18 @@
*/
/* register address for bitfield desc{d}_vl_strip */
-#define rpo_descdvl_strip_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+#define HW_ATL_RPO_DESCDVL_STRIP_ADR(descriptor) \
+ (0x00005b08 + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_vl_strip */
-#define rpo_descdvl_strip_msk 0x20000000
+#define HW_ATL_RPO_DESCDVL_STRIP_MSK 0x20000000
/* inverted bitmask for bitfield desc{d}_vl_strip */
-#define rpo_descdvl_strip_mskn 0xdfffffff
+#define HW_ATL_RPO_DESCDVL_STRIP_MSKN 0xdfffffff
/* lower bit position of bitfield desc{d}_vl_strip */
-#define rpo_descdvl_strip_shift 29
+#define HW_ATL_RPO_DESCDVL_STRIP_SHIFT 29
/* width of bitfield desc{d}_vl_strip */
-#define rpo_descdvl_strip_width 1
+#define HW_ATL_RPO_DESCDVL_STRIP_WIDTH 1
/* default value of bitfield desc{d}_vl_strip */
-#define rpo_descdvl_strip_default 0x0
+#define HW_ATL_RPO_DESCDVL_STRIP_DEFAULT 0x0
/* rx l4_chk_en bitfield definitions
* preprocessor definitions for the bitfield "l4_chk_en".
@@ -1282,17 +1289,17 @@
*/
/* register address for bitfield l4_chk_en */
-#define rpol4chk_en_adr 0x00005580
+#define HW_ATL_RPOL4CHK_EN_ADR 0x00005580
/* bitmask for bitfield l4_chk_en */
-#define rpol4chk_en_msk 0x00000001
+#define HW_ATL_RPOL4CHK_EN_MSK 0x00000001
/* inverted bitmask for bitfield l4_chk_en */
-#define rpol4chk_en_mskn 0xfffffffe
+#define HW_ATL_RPOL4CHK_EN_MSKN 0xfffffffe
/* lower bit position of bitfield l4_chk_en */
-#define rpol4chk_en_shift 0
+#define HW_ATL_RPOL4CHK_EN_SHIFT 0
/* width of bitfield l4_chk_en */
-#define rpol4chk_en_width 1
+#define HW_ATL_RPOL4CHK_EN_WIDTH 1
/* default value of bitfield l4_chk_en */
-#define rpol4chk_en_default 0x0
+#define HW_ATL_RPOL4CHK_EN_DEFAULT 0x0
/* rx reg_res_dsbl bitfield definitions
* preprocessor definitions for the bitfield "reg_res_dsbl".
@@ -1300,17 +1307,17 @@
*/
/* register address for bitfield reg_res_dsbl */
-#define rx_reg_res_dsbl_adr 0x00005000
+#define HW_ATL_RX_REG_RES_DSBL_ADR 0x00005000
/* bitmask for bitfield reg_res_dsbl */
-#define rx_reg_res_dsbl_msk 0x20000000
+#define HW_ATL_RX_REG_RES_DSBL_MSK 0x20000000
/* inverted bitmask for bitfield reg_res_dsbl */
-#define rx_reg_res_dsbl_mskn 0xdfffffff
+#define HW_ATL_RX_REG_RES_DSBL_MSKN 0xdfffffff
/* lower bit position of bitfield reg_res_dsbl */
-#define rx_reg_res_dsbl_shift 29
+#define HW_ATL_RX_REG_RES_DSBL_SHIFT 29
/* width of bitfield reg_res_dsbl */
-#define rx_reg_res_dsbl_width 1
+#define HW_ATL_RX_REG_RES_DSBL_WIDTH 1
/* default value of bitfield reg_res_dsbl */
-#define rx_reg_res_dsbl_default 0x1
+#define HW_ATL_RX_REG_RES_DSBL_DEFAULT 0x1
/* tx dca{d}_cpuid[7:0] bitfield definitions
* preprocessor definitions for the bitfield "dca{d}_cpuid[7:0]".
@@ -1319,17 +1326,17 @@
*/
/* register address for bitfield dca{d}_cpuid[7:0] */
-#define tdm_dcadcpuid_adr(dca) (0x00008400 + (dca) * 0x4)
+#define HW_ATL_TDM_DCADCPUID_ADR(dca) (0x00008400 + (dca) * 0x4)
/* bitmask for bitfield dca{d}_cpuid[7:0] */
-#define tdm_dcadcpuid_msk 0x000000ff
+#define HW_ATL_TDM_DCADCPUID_MSK 0x000000ff
/* inverted bitmask for bitfield dca{d}_cpuid[7:0] */
-#define tdm_dcadcpuid_mskn 0xffffff00
+#define HW_ATL_TDM_DCADCPUID_MSKN 0xffffff00
/* lower bit position of bitfield dca{d}_cpuid[7:0] */
-#define tdm_dcadcpuid_shift 0
+#define HW_ATL_TDM_DCADCPUID_SHIFT 0
/* width of bitfield dca{d}_cpuid[7:0] */
-#define tdm_dcadcpuid_width 8
+#define HW_ATL_TDM_DCADCPUID_WIDTH 8
/* default value of bitfield dca{d}_cpuid[7:0] */
-#define tdm_dcadcpuid_default 0x0
+#define HW_ATL_TDM_DCADCPUID_DEFAULT 0x0
/* tx lso_en[1f:0] bitfield definitions
* preprocessor definitions for the bitfield "lso_en[1f:0]".
@@ -1337,17 +1344,17 @@
*/
/* register address for bitfield lso_en[1f:0] */
-#define tdm_lso_en_adr 0x00007810
+#define HW_ATL_TDM_LSO_EN_ADR 0x00007810
/* bitmask for bitfield lso_en[1f:0] */
-#define tdm_lso_en_msk 0xffffffff
+#define HW_ATL_TDM_LSO_EN_MSK 0xffffffff
/* inverted bitmask for bitfield lso_en[1f:0] */
-#define tdm_lso_en_mskn 0x00000000
+#define HW_ATL_TDM_LSO_EN_MSKN 0x00000000
/* lower bit position of bitfield lso_en[1f:0] */
-#define tdm_lso_en_shift 0
+#define HW_ATL_TDM_LSO_EN_SHIFT 0
/* width of bitfield lso_en[1f:0] */
-#define tdm_lso_en_width 32
+#define HW_ATL_TDM_LSO_EN_WIDTH 32
/* default value of bitfield lso_en[1f:0] */
-#define tdm_lso_en_default 0x0
+#define HW_ATL_TDM_LSO_EN_DEFAULT 0x0
/* tx dca_en bitfield definitions
* preprocessor definitions for the bitfield "dca_en".
@@ -1355,17 +1362,17 @@
*/
/* register address for bitfield dca_en */
-#define tdm_dca_en_adr 0x00008480
+#define HW_ATL_TDM_DCA_EN_ADR 0x00008480
/* bitmask for bitfield dca_en */
-#define tdm_dca_en_msk 0x80000000
+#define HW_ATL_TDM_DCA_EN_MSK 0x80000000
/* inverted bitmask for bitfield dca_en */
-#define tdm_dca_en_mskn 0x7fffffff
+#define HW_ATL_TDM_DCA_EN_MSKN 0x7fffffff
/* lower bit position of bitfield dca_en */
-#define tdm_dca_en_shift 31
+#define HW_ATL_TDM_DCA_EN_SHIFT 31
/* width of bitfield dca_en */
-#define tdm_dca_en_width 1
+#define HW_ATL_TDM_DCA_EN_WIDTH 1
/* default value of bitfield dca_en */
-#define tdm_dca_en_default 0x1
+#define HW_ATL_TDM_DCA_EN_DEFAULT 0x1
/* tx dca_mode[3:0] bitfield definitions
* preprocessor definitions for the bitfield "dca_mode[3:0]".
@@ -1373,17 +1380,17 @@
*/
/* register address for bitfield dca_mode[3:0] */
-#define tdm_dca_mode_adr 0x00008480
+#define HW_ATL_TDM_DCA_MODE_ADR 0x00008480
/* bitmask for bitfield dca_mode[3:0] */
-#define tdm_dca_mode_msk 0x0000000f
+#define HW_ATL_TDM_DCA_MODE_MSK 0x0000000f
/* inverted bitmask for bitfield dca_mode[3:0] */
-#define tdm_dca_mode_mskn 0xfffffff0
+#define HW_ATL_TDM_DCA_MODE_MSKN 0xfffffff0
/* lower bit position of bitfield dca_mode[3:0] */
-#define tdm_dca_mode_shift 0
+#define HW_ATL_TDM_DCA_MODE_SHIFT 0
/* width of bitfield dca_mode[3:0] */
-#define tdm_dca_mode_width 4
+#define HW_ATL_TDM_DCA_MODE_WIDTH 4
/* default value of bitfield dca_mode[3:0] */
-#define tdm_dca_mode_default 0x0
+#define HW_ATL_TDM_DCA_MODE_DEFAULT 0x0
/* tx dca{d}_desc_en bitfield definitions
* preprocessor definitions for the bitfield "dca{d}_desc_en".
@@ -1392,17 +1399,17 @@
*/
/* register address for bitfield dca{d}_desc_en */
-#define tdm_dcaddesc_en_adr(dca) (0x00008400 + (dca) * 0x4)
+#define HW_ATL_TDM_DCADDESC_EN_ADR(dca) (0x00008400 + (dca) * 0x4)
/* bitmask for bitfield dca{d}_desc_en */
-#define tdm_dcaddesc_en_msk 0x80000000
+#define HW_ATL_TDM_DCADDESC_EN_MSK 0x80000000
/* inverted bitmask for bitfield dca{d}_desc_en */
-#define tdm_dcaddesc_en_mskn 0x7fffffff
+#define HW_ATL_TDM_DCADDESC_EN_MSKN 0x7fffffff
/* lower bit position of bitfield dca{d}_desc_en */
-#define tdm_dcaddesc_en_shift 31
+#define HW_ATL_TDM_DCADDESC_EN_SHIFT 31
/* width of bitfield dca{d}_desc_en */
-#define tdm_dcaddesc_en_width 1
+#define HW_ATL_TDM_DCADDESC_EN_WIDTH 1
/* default value of bitfield dca{d}_desc_en */
-#define tdm_dcaddesc_en_default 0x0
+#define HW_ATL_TDM_DCADDESC_EN_DEFAULT 0x0
/* tx desc{d}_en bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_en".
@@ -1411,17 +1418,17 @@
*/
/* register address for bitfield desc{d}_en */
-#define tdm_descden_adr(descriptor) (0x00007c08 + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESCDEN_ADR(descriptor) (0x00007c08 + (descriptor) * 0x40)
/* bitmask for bitfield desc{d}_en */
-#define tdm_descden_msk 0x80000000
+#define HW_ATL_TDM_DESCDEN_MSK 0x80000000
/* inverted bitmask for bitfield desc{d}_en */
-#define tdm_descden_mskn 0x7fffffff
+#define HW_ATL_TDM_DESCDEN_MSKN 0x7fffffff
/* lower bit position of bitfield desc{d}_en */
-#define tdm_descden_shift 31
+#define HW_ATL_TDM_DESCDEN_SHIFT 31
/* width of bitfield desc{d}_en */
-#define tdm_descden_width 1
+#define HW_ATL_TDM_DESCDEN_WIDTH 1
/* default value of bitfield desc{d}_en */
-#define tdm_descden_default 0x0
+#define HW_ATL_TDM_DESCDEN_DEFAULT 0x0
/* tx desc{d}_hd[c:0] bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_hd[c:0]".
@@ -1430,15 +1437,15 @@
*/
/* register address for bitfield desc{d}_hd[c:0] */
-#define tdm_descdhd_adr(descriptor) (0x00007c0c + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESCDHD_ADR(descriptor) (0x00007c0c + (descriptor) * 0x40)
/* bitmask for bitfield desc{d}_hd[c:0] */
-#define tdm_descdhd_msk 0x00001fff
+#define HW_ATL_TDM_DESCDHD_MSK 0x00001fff
/* inverted bitmask for bitfield desc{d}_hd[c:0] */
-#define tdm_descdhd_mskn 0xffffe000
+#define HW_ATL_TDM_DESCDHD_MSKN 0xffffe000
/* lower bit position of bitfield desc{d}_hd[c:0] */
-#define tdm_descdhd_shift 0
+#define HW_ATL_TDM_DESCDHD_SHIFT 0
/* width of bitfield desc{d}_hd[c:0] */
-#define tdm_descdhd_width 13
+#define HW_ATL_TDM_DESCDHD_WIDTH 13
/* tx desc{d}_len[9:0] bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_len[9:0]".
@@ -1447,17 +1454,17 @@
*/
/* register address for bitfield desc{d}_len[9:0] */
-#define tdm_descdlen_adr(descriptor) (0x00007c08 + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESCDLEN_ADR(descriptor) (0x00007c08 + (descriptor) * 0x40)
/* bitmask for bitfield desc{d}_len[9:0] */
-#define tdm_descdlen_msk 0x00001ff8
+#define HW_ATL_TDM_DESCDLEN_MSK 0x00001ff8
/* inverted bitmask for bitfield desc{d}_len[9:0] */
-#define tdm_descdlen_mskn 0xffffe007
+#define HW_ATL_TDM_DESCDLEN_MSKN 0xffffe007
/* lower bit position of bitfield desc{d}_len[9:0] */
-#define tdm_descdlen_shift 3
+#define HW_ATL_TDM_DESCDLEN_SHIFT 3
/* width of bitfield desc{d}_len[9:0] */
-#define tdm_descdlen_width 10
+#define HW_ATL_TDM_DESCDLEN_WIDTH 10
/* default value of bitfield desc{d}_len[9:0] */
-#define tdm_descdlen_default 0x0
+#define HW_ATL_TDM_DESCDLEN_DEFAULT 0x0
/* tx int_desc_wrb_en bitfield definitions
* preprocessor definitions for the bitfield "int_desc_wrb_en".
@@ -1465,17 +1472,17 @@
*/
/* register address for bitfield int_desc_wrb_en */
-#define tdm_int_desc_wrb_en_adr 0x00007b40
+#define HW_ATL_TDM_INT_DESC_WRB_EN_ADR 0x00007b40
/* bitmask for bitfield int_desc_wrb_en */
-#define tdm_int_desc_wrb_en_msk 0x00000002
+#define HW_ATL_TDM_INT_DESC_WRB_EN_MSK 0x00000002
/* inverted bitmask for bitfield int_desc_wrb_en */
-#define tdm_int_desc_wrb_en_mskn 0xfffffffd
+#define HW_ATL_TDM_INT_DESC_WRB_EN_MSKN 0xfffffffd
/* lower bit position of bitfield int_desc_wrb_en */
-#define tdm_int_desc_wrb_en_shift 1
+#define HW_ATL_TDM_INT_DESC_WRB_EN_SHIFT 1
/* width of bitfield int_desc_wrb_en */
-#define tdm_int_desc_wrb_en_width 1
+#define HW_ATL_TDM_INT_DESC_WRB_EN_WIDTH 1
/* default value of bitfield int_desc_wrb_en */
-#define tdm_int_desc_wrb_en_default 0x0
+#define HW_ATL_TDM_INT_DESC_WRB_EN_DEFAULT 0x0
/* tx desc{d}_wrb_thresh[6:0] bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_wrb_thresh[6:0]".
@@ -1484,17 +1491,18 @@
*/
/* register address for bitfield desc{d}_wrb_thresh[6:0] */
-#define tdm_descdwrb_thresh_adr(descriptor) (0x00007c18 + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESCDWRB_THRESH_ADR(descriptor) \
+ (0x00007c18 + (descriptor) * 0x40)
/* bitmask for bitfield desc{d}_wrb_thresh[6:0] */
-#define tdm_descdwrb_thresh_msk 0x00007f00
+#define HW_ATL_TDM_DESCDWRB_THRESH_MSK 0x00007f00
/* inverted bitmask for bitfield desc{d}_wrb_thresh[6:0] */
-#define tdm_descdwrb_thresh_mskn 0xffff80ff
+#define HW_ATL_TDM_DESCDWRB_THRESH_MSKN 0xffff80ff
/* lower bit position of bitfield desc{d}_wrb_thresh[6:0] */
-#define tdm_descdwrb_thresh_shift 8
+#define HW_ATL_TDM_DESCDWRB_THRESH_SHIFT 8
/* width of bitfield desc{d}_wrb_thresh[6:0] */
-#define tdm_descdwrb_thresh_width 7
+#define HW_ATL_TDM_DESCDWRB_THRESH_WIDTH 7
/* default value of bitfield desc{d}_wrb_thresh[6:0] */
-#define tdm_descdwrb_thresh_default 0x0
+#define HW_ATL_TDM_DESCDWRB_THRESH_DEFAULT 0x0
/* tx lso_tcp_flag_first[b:0] bitfield definitions
* preprocessor definitions for the bitfield "lso_tcp_flag_first[b:0]".
@@ -1502,17 +1510,17 @@
*/
/* register address for bitfield lso_tcp_flag_first[b:0] */
-#define thm_lso_tcp_flag_first_adr 0x00007820
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_ADR 0x00007820
/* bitmask for bitfield lso_tcp_flag_first[b:0] */
-#define thm_lso_tcp_flag_first_msk 0x00000fff
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_MSK 0x00000fff
/* inverted bitmask for bitfield lso_tcp_flag_first[b:0] */
-#define thm_lso_tcp_flag_first_mskn 0xfffff000
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_MSKN 0xfffff000
/* lower bit position of bitfield lso_tcp_flag_first[b:0] */
-#define thm_lso_tcp_flag_first_shift 0
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_SHIFT 0
/* width of bitfield lso_tcp_flag_first[b:0] */
-#define thm_lso_tcp_flag_first_width 12
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_WIDTH 12
/* default value of bitfield lso_tcp_flag_first[b:0] */
-#define thm_lso_tcp_flag_first_default 0x0
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_DEFAULT 0x0
/* tx lso_tcp_flag_last[b:0] bitfield definitions
* preprocessor definitions for the bitfield "lso_tcp_flag_last[b:0]".
@@ -1520,17 +1528,17 @@
*/
/* register address for bitfield lso_tcp_flag_last[b:0] */
-#define thm_lso_tcp_flag_last_adr 0x00007824
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_ADR 0x00007824
/* bitmask for bitfield lso_tcp_flag_last[b:0] */
-#define thm_lso_tcp_flag_last_msk 0x00000fff
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_MSK 0x00000fff
/* inverted bitmask for bitfield lso_tcp_flag_last[b:0] */
-#define thm_lso_tcp_flag_last_mskn 0xfffff000
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_MSKN 0xfffff000
/* lower bit position of bitfield lso_tcp_flag_last[b:0] */
-#define thm_lso_tcp_flag_last_shift 0
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_SHIFT 0
/* width of bitfield lso_tcp_flag_last[b:0] */
-#define thm_lso_tcp_flag_last_width 12
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_WIDTH 12
/* default value of bitfield lso_tcp_flag_last[b:0] */
-#define thm_lso_tcp_flag_last_default 0x0
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_DEFAULT 0x0
/* tx lso_tcp_flag_mid[b:0] bitfield definitions
* preprocessor definitions for the bitfield "lso_tcp_flag_mid[b:0]".
@@ -1538,17 +1546,17 @@
*/
/* Register address for bitfield lro_rsc_max[1F:0] */
-#define rpo_lro_rsc_max_adr 0x00005598
+#define HW_ATL_RPO_LRO_RSC_MAX_ADR 0x00005598
/* Bitmask for bitfield lro_rsc_max[1F:0] */
-#define rpo_lro_rsc_max_msk 0xFFFFFFFF
+#define HW_ATL_RPO_LRO_RSC_MAX_MSK 0xFFFFFFFF
/* Inverted bitmask for bitfield lro_rsc_max[1F:0] */
-#define rpo_lro_rsc_max_mskn 0x00000000
+#define HW_ATL_RPO_LRO_RSC_MAX_MSKN 0x00000000
/* Lower bit position of bitfield lro_rsc_max[1F:0] */
-#define rpo_lro_rsc_max_shift 0
+#define HW_ATL_RPO_LRO_RSC_MAX_SHIFT 0
/* Width of bitfield lro_rsc_max[1F:0] */
-#define rpo_lro_rsc_max_width 32
+#define HW_ATL_RPO_LRO_RSC_MAX_WIDTH 32
/* Default value of bitfield lro_rsc_max[1F:0] */
-#define rpo_lro_rsc_max_default 0x0
+#define HW_ATL_RPO_LRO_RSC_MAX_DEFAULT 0x0
/* RX lro_en[1F:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_en[1F:0]".
@@ -1556,17 +1564,17 @@
*/
/* Register address for bitfield lro_en[1F:0] */
-#define rpo_lro_en_adr 0x00005590
+#define HW_ATL_RPO_LRO_EN_ADR 0x00005590
/* Bitmask for bitfield lro_en[1F:0] */
-#define rpo_lro_en_msk 0xFFFFFFFF
+#define HW_ATL_RPO_LRO_EN_MSK 0xFFFFFFFF
/* Inverted bitmask for bitfield lro_en[1F:0] */
-#define rpo_lro_en_mskn 0x00000000
+#define HW_ATL_RPO_LRO_EN_MSKN 0x00000000
/* Lower bit position of bitfield lro_en[1F:0] */
-#define rpo_lro_en_shift 0
+#define HW_ATL_RPO_LRO_EN_SHIFT 0
/* Width of bitfield lro_en[1F:0] */
-#define rpo_lro_en_width 32
+#define HW_ATL_RPO_LRO_EN_WIDTH 32
/* Default value of bitfield lro_en[1F:0] */
-#define rpo_lro_en_default 0x0
+#define HW_ATL_RPO_LRO_EN_DEFAULT 0x0
/* RX lro_ptopt_en Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_ptopt_en".
@@ -1574,17 +1582,17 @@
*/
/* Register address for bitfield lro_ptopt_en */
-#define rpo_lro_ptopt_en_adr 0x00005594
+#define HW_ATL_RPO_LRO_PTOPT_EN_ADR 0x00005594
/* Bitmask for bitfield lro_ptopt_en */
-#define rpo_lro_ptopt_en_msk 0x00008000
+#define HW_ATL_RPO_LRO_PTOPT_EN_MSK 0x00008000
/* Inverted bitmask for bitfield lro_ptopt_en */
-#define rpo_lro_ptopt_en_mskn 0xFFFF7FFF
+#define HW_ATL_RPO_LRO_PTOPT_EN_MSKN 0xFFFF7FFF
/* Lower bit position of bitfield lro_ptopt_en */
-#define rpo_lro_ptopt_en_shift 15
+#define HW_ATL_RPO_LRO_PTOPT_EN_SHIFT 15
/* Width of bitfield lro_ptopt_en */
-#define rpo_lro_ptopt_en_width 1
+#define HW_ATL_RPO_LRO_PTOPT_EN_WIDTH 1
/* Default value of bitfield lro_ptopt_en */
-#define rpo_lro_ptopt_en_defalt 0x1
+#define HW_ATL_RPO_LRO_PTOPT_EN_DEFALT 0x1
/* RX lro_q_ses_lmt Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_q_ses_lmt".
@@ -1592,17 +1600,17 @@
*/
/* Register address for bitfield lro_q_ses_lmt */
-#define rpo_lro_qses_lmt_adr 0x00005594
+#define HW_ATL_RPO_LRO_QSES_LMT_ADR 0x00005594
/* Bitmask for bitfield lro_q_ses_lmt */
-#define rpo_lro_qses_lmt_msk 0x00003000
+#define HW_ATL_RPO_LRO_QSES_LMT_MSK 0x00003000
/* Inverted bitmask for bitfield lro_q_ses_lmt */
-#define rpo_lro_qses_lmt_mskn 0xFFFFCFFF
+#define HW_ATL_RPO_LRO_QSES_LMT_MSKN 0xFFFFCFFF
/* Lower bit position of bitfield lro_q_ses_lmt */
-#define rpo_lro_qses_lmt_shift 12
+#define HW_ATL_RPO_LRO_QSES_LMT_SHIFT 12
/* Width of bitfield lro_q_ses_lmt */
-#define rpo_lro_qses_lmt_width 2
+#define HW_ATL_RPO_LRO_QSES_LMT_WIDTH 2
/* Default value of bitfield lro_q_ses_lmt */
-#define rpo_lro_qses_lmt_default 0x1
+#define HW_ATL_RPO_LRO_QSES_LMT_DEFAULT 0x1
/* RX lro_tot_dsc_lmt[1:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_tot_dsc_lmt[1:0]".
@@ -1610,17 +1618,17 @@
*/
/* Register address for bitfield lro_tot_dsc_lmt[1:0] */
-#define rpo_lro_tot_dsc_lmt_adr 0x00005594
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_ADR 0x00005594
/* Bitmask for bitfield lro_tot_dsc_lmt[1:0] */
-#define rpo_lro_tot_dsc_lmt_msk 0x00000060
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_MSK 0x00000060
/* Inverted bitmask for bitfield lro_tot_dsc_lmt[1:0] */
-#define rpo_lro_tot_dsc_lmt_mskn 0xFFFFFF9F
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_MSKN 0xFFFFFF9F
/* Lower bit position of bitfield lro_tot_dsc_lmt[1:0] */
-#define rpo_lro_tot_dsc_lmt_shift 5
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_SHIFT 5
/* Width of bitfield lro_tot_dsc_lmt[1:0] */
-#define rpo_lro_tot_dsc_lmt_width 2
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_WIDTH 2
/* Default value of bitfield lro_tot_dsc_lmt[1:0] */
-#define rpo_lro_tot_dsc_lmt_defalt 0x1
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_DEFALT 0x1
/* RX lro_pkt_min[4:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_pkt_min[4:0]".
@@ -1628,22 +1636,22 @@
*/
/* Register address for bitfield lro_pkt_min[4:0] */
-#define rpo_lro_pkt_min_adr 0x00005594
+#define HW_ATL_RPO_LRO_PKT_MIN_ADR 0x00005594
/* Bitmask for bitfield lro_pkt_min[4:0] */
-#define rpo_lro_pkt_min_msk 0x0000001F
+#define HW_ATL_RPO_LRO_PKT_MIN_MSK 0x0000001F
/* Inverted bitmask for bitfield lro_pkt_min[4:0] */
-#define rpo_lro_pkt_min_mskn 0xFFFFFFE0
+#define HW_ATL_RPO_LRO_PKT_MIN_MSKN 0xFFFFFFE0
/* Lower bit position of bitfield lro_pkt_min[4:0] */
-#define rpo_lro_pkt_min_shift 0
+#define HW_ATL_RPO_LRO_PKT_MIN_SHIFT 0
/* Width of bitfield lro_pkt_min[4:0] */
-#define rpo_lro_pkt_min_width 5
+#define HW_ATL_RPO_LRO_PKT_MIN_WIDTH 5
/* Default value of bitfield lro_pkt_min[4:0] */
-#define rpo_lro_pkt_min_default 0x8
+#define HW_ATL_RPO_LRO_PKT_MIN_DEFAULT 0x8
/* Width of bitfield lro{L}_des_max[1:0] */
-#define rpo_lro_ldes_max_width 2
+#define HW_ATL_RPO_LRO_LDES_MAX_WIDTH 2
/* Default value of bitfield lro{L}_des_max[1:0] */
-#define rpo_lro_ldes_max_default 0x0
+#define HW_ATL_RPO_LRO_LDES_MAX_DEFAULT 0x0
/* RX lro_tb_div[11:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_tb_div[11:0]".
@@ -1651,17 +1659,17 @@
*/
/* Register address for bitfield lro_tb_div[11:0] */
-#define rpo_lro_tb_div_adr 0x00005620
+#define HW_ATL_RPO_LRO_TB_DIV_ADR 0x00005620
/* Bitmask for bitfield lro_tb_div[11:0] */
-#define rpo_lro_tb_div_msk 0xFFF00000
+#define HW_ATL_RPO_LRO_TB_DIV_MSK 0xFFF00000
/* Inverted bitmask for bitfield lro_tb_div[11:0] */
-#define rpo_lro_tb_div_mskn 0x000FFFFF
+#define HW_ATL_RPO_LRO_TB_DIV_MSKN 0x000FFFFF
/* Lower bit position of bitfield lro_tb_div[11:0] */
-#define rpo_lro_tb_div_shift 20
+#define HW_ATL_RPO_LRO_TB_DIV_SHIFT 20
/* Width of bitfield lro_tb_div[11:0] */
-#define rpo_lro_tb_div_width 12
+#define HW_ATL_RPO_LRO_TB_DIV_WIDTH 12
/* Default value of bitfield lro_tb_div[11:0] */
-#define rpo_lro_tb_div_default 0xC35
+#define HW_ATL_RPO_LRO_TB_DIV_DEFAULT 0xC35
/* RX lro_ina_ival[9:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_ina_ival[9:0]".
@@ -1669,17 +1677,17 @@
*/
/* Register address for bitfield lro_ina_ival[9:0] */
-#define rpo_lro_ina_ival_adr 0x00005620
+#define HW_ATL_RPO_LRO_INA_IVAL_ADR 0x00005620
/* Bitmask for bitfield lro_ina_ival[9:0] */
-#define rpo_lro_ina_ival_msk 0x000FFC00
+#define HW_ATL_RPO_LRO_INA_IVAL_MSK 0x000FFC00
/* Inverted bitmask for bitfield lro_ina_ival[9:0] */
-#define rpo_lro_ina_ival_mskn 0xFFF003FF
+#define HW_ATL_RPO_LRO_INA_IVAL_MSKN 0xFFF003FF
/* Lower bit position of bitfield lro_ina_ival[9:0] */
-#define rpo_lro_ina_ival_shift 10
+#define HW_ATL_RPO_LRO_INA_IVAL_SHIFT 10
/* Width of bitfield lro_ina_ival[9:0] */
-#define rpo_lro_ina_ival_width 10
+#define HW_ATL_RPO_LRO_INA_IVAL_WIDTH 10
/* Default value of bitfield lro_ina_ival[9:0] */
-#define rpo_lro_ina_ival_default 0xA
+#define HW_ATL_RPO_LRO_INA_IVAL_DEFAULT 0xA
/* RX lro_max_ival[9:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_max_ival[9:0]".
@@ -1687,17 +1695,17 @@
*/
/* Register address for bitfield lro_max_ival[9:0] */
-#define rpo_lro_max_ival_adr 0x00005620
+#define HW_ATL_RPO_LRO_MAX_IVAL_ADR 0x00005620
/* Bitmask for bitfield lro_max_ival[9:0] */
-#define rpo_lro_max_ival_msk 0x000003FF
+#define HW_ATL_RPO_LRO_MAX_IVAL_MSK 0x000003FF
/* Inverted bitmask for bitfield lro_max_ival[9:0] */
-#define rpo_lro_max_ival_mskn 0xFFFFFC00
+#define HW_ATL_RPO_LRO_MAX_IVAL_MSKN 0xFFFFFC00
/* Lower bit position of bitfield lro_max_ival[9:0] */
-#define rpo_lro_max_ival_shift 0
+#define HW_ATL_RPO_LRO_MAX_IVAL_SHIFT 0
/* Width of bitfield lro_max_ival[9:0] */
-#define rpo_lro_max_ival_width 10
+#define HW_ATL_RPO_LRO_MAX_IVAL_WIDTH 10
/* Default value of bitfield lro_max_ival[9:0] */
-#define rpo_lro_max_ival_default 0x19
+#define HW_ATL_RPO_LRO_MAX_IVAL_DEFAULT 0x19
/* TX dca{D}_cpuid[7:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "dca{D}_cpuid[7:0]".
@@ -1706,17 +1714,17 @@
*/
/* Register address for bitfield dca{D}_cpuid[7:0] */
-#define tdm_dca_dcpuid_adr(dca) (0x00008400 + (dca) * 0x4)
+#define HW_ATL_TDM_DCA_DCPUID_ADR(dca) (0x00008400 + (dca) * 0x4)
/* Bitmask for bitfield dca{D}_cpuid[7:0] */
-#define tdm_dca_dcpuid_msk 0x000000FF
+#define HW_ATL_TDM_DCA_DCPUID_MSK 0x000000FF
/* Inverted bitmask for bitfield dca{D}_cpuid[7:0] */
-#define tdm_dca_dcpuid_mskn 0xFFFFFF00
+#define HW_ATL_TDM_DCA_DCPUID_MSKN 0xFFFFFF00
/* Lower bit position of bitfield dca{D}_cpuid[7:0] */
-#define tdm_dca_dcpuid_shift 0
+#define HW_ATL_TDM_DCA_DCPUID_SHIFT 0
/* Width of bitfield dca{D}_cpuid[7:0] */
-#define tdm_dca_dcpuid_width 8
+#define HW_ATL_TDM_DCA_DCPUID_WIDTH 8
/* Default value of bitfield dca{D}_cpuid[7:0] */
-#define tdm_dca_dcpuid_default 0x0
+#define HW_ATL_TDM_DCA_DCPUID_DEFAULT 0x0
/* TX dca{D}_desc_en Bitfield Definitions
* Preprocessor definitions for the bitfield "dca{D}_desc_en".
@@ -1725,17 +1733,17 @@
*/
/* Register address for bitfield dca{D}_desc_en */
-#define tdm_dca_ddesc_en_adr(dca) (0x00008400 + (dca) * 0x4)
+#define HW_ATL_TDM_DCA_DDESC_EN_ADR(dca) (0x00008400 + (dca) * 0x4)
/* Bitmask for bitfield dca{D}_desc_en */
-#define tdm_dca_ddesc_en_msk 0x80000000
+#define HW_ATL_TDM_DCA_DDESC_EN_MSK 0x80000000
/* Inverted bitmask for bitfield dca{D}_desc_en */
-#define tdm_dca_ddesc_en_mskn 0x7FFFFFFF
+#define HW_ATL_TDM_DCA_DDESC_EN_MSKN 0x7FFFFFFF
/* Lower bit position of bitfield dca{D}_desc_en */
-#define tdm_dca_ddesc_en_shift 31
+#define HW_ATL_TDM_DCA_DDESC_EN_SHIFT 31
/* Width of bitfield dca{D}_desc_en */
-#define tdm_dca_ddesc_en_width 1
+#define HW_ATL_TDM_DCA_DDESC_EN_WIDTH 1
/* Default value of bitfield dca{D}_desc_en */
-#define tdm_dca_ddesc_en_default 0x0
+#define HW_ATL_TDM_DCA_DDESC_EN_DEFAULT 0x0
/* TX desc{D}_en Bitfield Definitions
* Preprocessor definitions for the bitfield "desc{D}_en".
@@ -1744,17 +1752,17 @@
*/
/* Register address for bitfield desc{D}_en */
-#define tdm_desc_den_adr(descriptor) (0x00007C08 + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESC_DEN_ADR(descriptor) (0x00007C08 + (descriptor) * 0x40)
/* Bitmask for bitfield desc{D}_en */
-#define tdm_desc_den_msk 0x80000000
+#define HW_ATL_TDM_DESC_DEN_MSK 0x80000000
/* Inverted bitmask for bitfield desc{D}_en */
-#define tdm_desc_den_mskn 0x7FFFFFFF
+#define HW_ATL_TDM_DESC_DEN_MSKN 0x7FFFFFFF
/* Lower bit position of bitfield desc{D}_en */
-#define tdm_desc_den_shift 31
+#define HW_ATL_TDM_DESC_DEN_SHIFT 31
/* Width of bitfield desc{D}_en */
-#define tdm_desc_den_width 1
+#define HW_ATL_TDM_DESC_DEN_WIDTH 1
/* Default value of bitfield desc{D}_en */
-#define tdm_desc_den_default 0x0
+#define HW_ATL_TDM_DESC_DEN_DEFAULT 0x0
/* TX desc{D}_hd[C:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "desc{D}_hd[C:0]".
@@ -1763,15 +1771,15 @@
*/
/* Register address for bitfield desc{D}_hd[C:0] */
-#define tdm_desc_dhd_adr(descriptor) (0x00007C0C + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESC_DHD_ADR(descriptor) (0x00007C0C + (descriptor) * 0x40)
/* Bitmask for bitfield desc{D}_hd[C:0] */
-#define tdm_desc_dhd_msk 0x00001FFF
+#define HW_ATL_TDM_DESC_DHD_MSK 0x00001FFF
/* Inverted bitmask for bitfield desc{D}_hd[C:0] */
-#define tdm_desc_dhd_mskn 0xFFFFE000
+#define HW_ATL_TDM_DESC_DHD_MSKN 0xFFFFE000
/* Lower bit position of bitfield desc{D}_hd[C:0] */
-#define tdm_desc_dhd_shift 0
+#define HW_ATL_TDM_DESC_DHD_SHIFT 0
/* Width of bitfield desc{D}_hd[C:0] */
-#define tdm_desc_dhd_width 13
+#define HW_ATL_TDM_DESC_DHD_WIDTH 13
/* TX desc{D}_len[9:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "desc{D}_len[9:0]".
@@ -1780,17 +1788,17 @@
*/
/* Register address for bitfield desc{D}_len[9:0] */
-#define tdm_desc_dlen_adr(descriptor) (0x00007C08 + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESC_DLEN_ADR(descriptor) (0x00007C08 + (descriptor) * 0x40)
/* Bitmask for bitfield desc{D}_len[9:0] */
-#define tdm_desc_dlen_msk 0x00001FF8
+#define HW_ATL_TDM_DESC_DLEN_MSK 0x00001FF8
/* Inverted bitmask for bitfield desc{D}_len[9:0] */
-#define tdm_desc_dlen_mskn 0xFFFFE007
+#define HW_ATL_TDM_DESC_DLEN_MSKN 0xFFFFE007
/* Lower bit position of bitfield desc{D}_len[9:0] */
-#define tdm_desc_dlen_shift 3
+#define HW_ATL_TDM_DESC_DLEN_SHIFT 3
/* Width of bitfield desc{D}_len[9:0] */
-#define tdm_desc_dlen_width 10
+#define HW_ATL_TDM_DESC_DLEN_WIDTH 10
/* Default value of bitfield desc{D}_len[9:0] */
-#define tdm_desc_dlen_default 0x0
+#define HW_ATL_TDM_DESC_DLEN_DEFAULT 0x0
/* TX desc{D}_wrb_thresh[6:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "desc{D}_wrb_thresh[6:0]".
@@ -1799,18 +1807,18 @@
*/
/* Register address for bitfield desc{D}_wrb_thresh[6:0] */
-#define tdm_desc_dwrb_thresh_adr(descriptor) \
+#define HW_ATL_TDM_DESC_DWRB_THRESH_ADR(descriptor) \
(0x00007C18 + (descriptor) * 0x40)
/* Bitmask for bitfield desc{D}_wrb_thresh[6:0] */
-#define tdm_desc_dwrb_thresh_msk 0x00007F00
+#define HW_ATL_TDM_DESC_DWRB_THRESH_MSK 0x00007F00
/* Inverted bitmask for bitfield desc{D}_wrb_thresh[6:0] */
-#define tdm_desc_dwrb_thresh_mskn 0xFFFF80FF
+#define HW_ATL_TDM_DESC_DWRB_THRESH_MSKN 0xFFFF80FF
/* Lower bit position of bitfield desc{D}_wrb_thresh[6:0] */
-#define tdm_desc_dwrb_thresh_shift 8
+#define HW_ATL_TDM_DESC_DWRB_THRESH_SHIFT 8
/* Width of bitfield desc{D}_wrb_thresh[6:0] */
-#define tdm_desc_dwrb_thresh_width 7
+#define HW_ATL_TDM_DESC_DWRB_THRESH_WIDTH 7
/* Default value of bitfield desc{D}_wrb_thresh[6:0] */
-#define tdm_desc_dwrb_thresh_default 0x0
+#define HW_ATL_TDM_DESC_DWRB_THRESH_DEFAULT 0x0
/* TX tdm_int_mod_en Bitfield Definitions
* Preprocessor definitions for the bitfield "tdm_int_mod_en".
@@ -1818,34 +1826,34 @@
*/
/* Register address for bitfield tdm_int_mod_en */
-#define tdm_int_mod_en_adr 0x00007B40
+#define HW_ATL_TDM_INT_MOD_EN_ADR 0x00007B40
/* Bitmask for bitfield tdm_int_mod_en */
-#define tdm_int_mod_en_msk 0x00000010
+#define HW_ATL_TDM_INT_MOD_EN_MSK 0x00000010
/* Inverted bitmask for bitfield tdm_int_mod_en */
-#define tdm_int_mod_en_mskn 0xFFFFFFEF
+#define HW_ATL_TDM_INT_MOD_EN_MSKN 0xFFFFFFEF
/* Lower bit position of bitfield tdm_int_mod_en */
-#define tdm_int_mod_en_shift 4
+#define HW_ATL_TDM_INT_MOD_EN_SHIFT 4
/* Width of bitfield tdm_int_mod_en */
-#define tdm_int_mod_en_width 1
+#define HW_ATL_TDM_INT_MOD_EN_WIDTH 1
/* Default value of bitfield tdm_int_mod_en */
-#define tdm_int_mod_en_default 0x0
+#define HW_ATL_TDM_INT_MOD_EN_DEFAULT 0x0
/* TX lso_tcp_flag_mid[B:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "lso_tcp_flag_mid[B:0]".
* PORT="pif_thm_lso_tcp_flag_mid_i[11:0]"
*/
/* register address for bitfield lso_tcp_flag_mid[b:0] */
-#define thm_lso_tcp_flag_mid_adr 0x00007820
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_ADR 0x00007820
/* bitmask for bitfield lso_tcp_flag_mid[b:0] */
-#define thm_lso_tcp_flag_mid_msk 0x0fff0000
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_MSK 0x0fff0000
/* inverted bitmask for bitfield lso_tcp_flag_mid[b:0] */
-#define thm_lso_tcp_flag_mid_mskn 0xf000ffff
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_MSKN 0xf000ffff
/* lower bit position of bitfield lso_tcp_flag_mid[b:0] */
-#define thm_lso_tcp_flag_mid_shift 16
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_SHIFT 16
/* width of bitfield lso_tcp_flag_mid[b:0] */
-#define thm_lso_tcp_flag_mid_width 12
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_WIDTH 12
/* default value of bitfield lso_tcp_flag_mid[b:0] */
-#define thm_lso_tcp_flag_mid_default 0x0
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_DEFAULT 0x0
/* tx tx_buf_en bitfield definitions
* preprocessor definitions for the bitfield "tx_buf_en".
@@ -1853,17 +1861,17 @@
*/
/* register address for bitfield tx_buf_en */
-#define tpb_tx_buf_en_adr 0x00007900
+#define HW_ATL_TPB_TX_BUF_EN_ADR 0x00007900
/* bitmask for bitfield tx_buf_en */
-#define tpb_tx_buf_en_msk 0x00000001
+#define HW_ATL_TPB_TX_BUF_EN_MSK 0x00000001
/* inverted bitmask for bitfield tx_buf_en */
-#define tpb_tx_buf_en_mskn 0xfffffffe
+#define HW_ATL_TPB_TX_BUF_EN_MSKN 0xfffffffe
/* lower bit position of bitfield tx_buf_en */
-#define tpb_tx_buf_en_shift 0
+#define HW_ATL_TPB_TX_BUF_EN_SHIFT 0
/* width of bitfield tx_buf_en */
-#define tpb_tx_buf_en_width 1
+#define HW_ATL_TPB_TX_BUF_EN_WIDTH 1
/* default value of bitfield tx_buf_en */
-#define tpb_tx_buf_en_default 0x0
+#define HW_ATL_TPB_TX_BUF_EN_DEFAULT 0x0
/* tx tx{b}_hi_thresh[c:0] bitfield definitions
* preprocessor definitions for the bitfield "tx{b}_hi_thresh[c:0]".
@@ -1872,17 +1880,17 @@
*/
/* register address for bitfield tx{b}_hi_thresh[c:0] */
-#define tpb_txbhi_thresh_adr(buffer) (0x00007914 + (buffer) * 0x10)
+#define HW_ATL_TPB_TXBHI_THRESH_ADR(buffer) (0x00007914 + (buffer) * 0x10)
/* bitmask for bitfield tx{b}_hi_thresh[c:0] */
-#define tpb_txbhi_thresh_msk 0x1fff0000
+#define HW_ATL_TPB_TXBHI_THRESH_MSK 0x1fff0000
/* inverted bitmask for bitfield tx{b}_hi_thresh[c:0] */
-#define tpb_txbhi_thresh_mskn 0xe000ffff
+#define HW_ATL_TPB_TXBHI_THRESH_MSKN 0xe000ffff
/* lower bit position of bitfield tx{b}_hi_thresh[c:0] */
-#define tpb_txbhi_thresh_shift 16
+#define HW_ATL_TPB_TXBHI_THRESH_SHIFT 16
/* width of bitfield tx{b}_hi_thresh[c:0] */
-#define tpb_txbhi_thresh_width 13
+#define HW_ATL_TPB_TXBHI_THRESH_WIDTH 13
/* default value of bitfield tx{b}_hi_thresh[c:0] */
-#define tpb_txbhi_thresh_default 0x0
+#define HW_ATL_TPB_TXBHI_THRESH_DEFAULT 0x0
/* tx tx{b}_lo_thresh[c:0] bitfield definitions
* preprocessor definitions for the bitfield "tx{b}_lo_thresh[c:0]".
@@ -1891,17 +1899,17 @@
*/
/* register address for bitfield tx{b}_lo_thresh[c:0] */
-#define tpb_txblo_thresh_adr(buffer) (0x00007914 + (buffer) * 0x10)
+#define HW_ATL_TPB_TXBLO_THRESH_ADR(buffer) (0x00007914 + (buffer) * 0x10)
/* bitmask for bitfield tx{b}_lo_thresh[c:0] */
-#define tpb_txblo_thresh_msk 0x00001fff
+#define HW_ATL_TPB_TXBLO_THRESH_MSK 0x00001fff
/* inverted bitmask for bitfield tx{b}_lo_thresh[c:0] */
-#define tpb_txblo_thresh_mskn 0xffffe000
+#define HW_ATL_TPB_TXBLO_THRESH_MSKN 0xffffe000
/* lower bit position of bitfield tx{b}_lo_thresh[c:0] */
-#define tpb_txblo_thresh_shift 0
+#define HW_ATL_TPB_TXBLO_THRESH_SHIFT 0
/* width of bitfield tx{b}_lo_thresh[c:0] */
-#define tpb_txblo_thresh_width 13
+#define HW_ATL_TPB_TXBLO_THRESH_WIDTH 13
/* default value of bitfield tx{b}_lo_thresh[c:0] */
-#define tpb_txblo_thresh_default 0x0
+#define HW_ATL_TPB_TXBLO_THRESH_DEFAULT 0x0
/* tx dma_sys_loopback bitfield definitions
* preprocessor definitions for the bitfield "dma_sys_loopback".
@@ -1909,17 +1917,17 @@
*/
/* register address for bitfield dma_sys_loopback */
-#define tpb_dma_sys_lbk_adr 0x00007000
+#define HW_ATL_TPB_DMA_SYS_LBK_ADR 0x00007000
/* bitmask for bitfield dma_sys_loopback */
-#define tpb_dma_sys_lbk_msk 0x00000040
+#define HW_ATL_TPB_DMA_SYS_LBK_MSK 0x00000040
/* inverted bitmask for bitfield dma_sys_loopback */
-#define tpb_dma_sys_lbk_mskn 0xffffffbf
+#define HW_ATL_TPB_DMA_SYS_LBK_MSKN 0xffffffbf
/* lower bit position of bitfield dma_sys_loopback */
-#define tpb_dma_sys_lbk_shift 6
+#define HW_ATL_TPB_DMA_SYS_LBK_SHIFT 6
/* width of bitfield dma_sys_loopback */
-#define tpb_dma_sys_lbk_width 1
+#define HW_ATL_TPB_DMA_SYS_LBK_WIDTH 1
/* default value of bitfield dma_sys_loopback */
-#define tpb_dma_sys_lbk_default 0x0
+#define HW_ATL_TPB_DMA_SYS_LBK_DEFAULT 0x0
/* tx tx{b}_buf_size[7:0] bitfield definitions
* preprocessor definitions for the bitfield "tx{b}_buf_size[7:0]".
@@ -1928,17 +1936,17 @@
*/
/* register address for bitfield tx{b}_buf_size[7:0] */
-#define tpb_txbbuf_size_adr(buffer) (0x00007910 + (buffer) * 0x10)
+#define HW_ATL_TPB_TXBBUF_SIZE_ADR(buffer) (0x00007910 + (buffer) * 0x10)
/* bitmask for bitfield tx{b}_buf_size[7:0] */
-#define tpb_txbbuf_size_msk 0x000000ff
+#define HW_ATL_TPB_TXBBUF_SIZE_MSK 0x000000ff
/* inverted bitmask for bitfield tx{b}_buf_size[7:0] */
-#define tpb_txbbuf_size_mskn 0xffffff00
+#define HW_ATL_TPB_TXBBUF_SIZE_MSKN 0xffffff00
/* lower bit position of bitfield tx{b}_buf_size[7:0] */
-#define tpb_txbbuf_size_shift 0
+#define HW_ATL_TPB_TXBBUF_SIZE_SHIFT 0
/* width of bitfield tx{b}_buf_size[7:0] */
-#define tpb_txbbuf_size_width 8
+#define HW_ATL_TPB_TXBBUF_SIZE_WIDTH 8
/* default value of bitfield tx{b}_buf_size[7:0] */
-#define tpb_txbbuf_size_default 0x0
+#define HW_ATL_TPB_TXBBUF_SIZE_DEFAULT 0x0
/* tx tx_scp_ins_en bitfield definitions
* preprocessor definitions for the bitfield "tx_scp_ins_en".
@@ -1946,17 +1954,17 @@
*/
/* register address for bitfield tx_scp_ins_en */
-#define tpb_tx_scp_ins_en_adr 0x00007900
+#define HW_ATL_TPB_TX_SCP_INS_EN_ADR 0x00007900
/* bitmask for bitfield tx_scp_ins_en */
-#define tpb_tx_scp_ins_en_msk 0x00000004
+#define HW_ATL_TPB_TX_SCP_INS_EN_MSK 0x00000004
/* inverted bitmask for bitfield tx_scp_ins_en */
-#define tpb_tx_scp_ins_en_mskn 0xfffffffb
+#define HW_ATL_TPB_TX_SCP_INS_EN_MSKN 0xfffffffb
/* lower bit position of bitfield tx_scp_ins_en */
-#define tpb_tx_scp_ins_en_shift 2
+#define HW_ATL_TPB_TX_SCP_INS_EN_SHIFT 2
/* width of bitfield tx_scp_ins_en */
-#define tpb_tx_scp_ins_en_width 1
+#define HW_ATL_TPB_TX_SCP_INS_EN_WIDTH 1
/* default value of bitfield tx_scp_ins_en */
-#define tpb_tx_scp_ins_en_default 0x0
+#define HW_ATL_TPB_TX_SCP_INS_EN_DEFAULT 0x0
/* tx ipv4_chk_en bitfield definitions
* preprocessor definitions for the bitfield "ipv4_chk_en".
@@ -1964,17 +1972,17 @@
*/
/* register address for bitfield ipv4_chk_en */
-#define tpo_ipv4chk_en_adr 0x00007800
+#define HW_ATL_TPO_IPV4CHK_EN_ADR 0x00007800
/* bitmask for bitfield ipv4_chk_en */
-#define tpo_ipv4chk_en_msk 0x00000002
+#define HW_ATL_TPO_IPV4CHK_EN_MSK 0x00000002
/* inverted bitmask for bitfield ipv4_chk_en */
-#define tpo_ipv4chk_en_mskn 0xfffffffd
+#define HW_ATL_TPO_IPV4CHK_EN_MSKN 0xfffffffd
/* lower bit position of bitfield ipv4_chk_en */
-#define tpo_ipv4chk_en_shift 1
+#define HW_ATL_TPO_IPV4CHK_EN_SHIFT 1
/* width of bitfield ipv4_chk_en */
-#define tpo_ipv4chk_en_width 1
+#define HW_ATL_TPO_IPV4CHK_EN_WIDTH 1
/* default value of bitfield ipv4_chk_en */
-#define tpo_ipv4chk_en_default 0x0
+#define HW_ATL_TPO_IPV4CHK_EN_DEFAULT 0x0
/* tx l4_chk_en bitfield definitions
* preprocessor definitions for the bitfield "l4_chk_en".
@@ -1982,17 +1990,17 @@
*/
/* register address for bitfield l4_chk_en */
-#define tpol4chk_en_adr 0x00007800
+#define HW_ATL_TPOL4CHK_EN_ADR 0x00007800
/* bitmask for bitfield l4_chk_en */
-#define tpol4chk_en_msk 0x00000001
+#define HW_ATL_TPOL4CHK_EN_MSK 0x00000001
/* inverted bitmask for bitfield l4_chk_en */
-#define tpol4chk_en_mskn 0xfffffffe
+#define HW_ATL_TPOL4CHK_EN_MSKN 0xfffffffe
/* lower bit position of bitfield l4_chk_en */
-#define tpol4chk_en_shift 0
+#define HW_ATL_TPOL4CHK_EN_SHIFT 0
/* width of bitfield l4_chk_en */
-#define tpol4chk_en_width 1
+#define HW_ATL_TPOL4CHK_EN_WIDTH 1
/* default value of bitfield l4_chk_en */
-#define tpol4chk_en_default 0x0
+#define HW_ATL_TPOL4CHK_EN_DEFAULT 0x0
/* tx pkt_sys_loopback bitfield definitions
* preprocessor definitions for the bitfield "pkt_sys_loopback".
@@ -2000,17 +2008,17 @@
*/
/* register address for bitfield pkt_sys_loopback */
-#define tpo_pkt_sys_lbk_adr 0x00007000
+#define HW_ATL_TPO_PKT_SYS_LBK_ADR 0x00007000
/* bitmask for bitfield pkt_sys_loopback */
-#define tpo_pkt_sys_lbk_msk 0x00000080
+#define HW_ATL_TPO_PKT_SYS_LBK_MSK 0x00000080
/* inverted bitmask for bitfield pkt_sys_loopback */
-#define tpo_pkt_sys_lbk_mskn 0xffffff7f
+#define HW_ATL_TPO_PKT_SYS_LBK_MSKN 0xffffff7f
/* lower bit position of bitfield pkt_sys_loopback */
-#define tpo_pkt_sys_lbk_shift 7
+#define HW_ATL_TPO_PKT_SYS_LBK_SHIFT 7
/* width of bitfield pkt_sys_loopback */
-#define tpo_pkt_sys_lbk_width 1
+#define HW_ATL_TPO_PKT_SYS_LBK_WIDTH 1
/* default value of bitfield pkt_sys_loopback */
-#define tpo_pkt_sys_lbk_default 0x0
+#define HW_ATL_TPO_PKT_SYS_LBK_DEFAULT 0x0
/* tx data_tc_arb_mode bitfield definitions
* preprocessor definitions for the bitfield "data_tc_arb_mode".
@@ -2018,17 +2026,17 @@
*/
/* register address for bitfield data_tc_arb_mode */
-#define tps_data_tc_arb_mode_adr 0x00007100
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_ADR 0x00007100
/* bitmask for bitfield data_tc_arb_mode */
-#define tps_data_tc_arb_mode_msk 0x00000001
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_MSK 0x00000001
/* inverted bitmask for bitfield data_tc_arb_mode */
-#define tps_data_tc_arb_mode_mskn 0xfffffffe
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_MSKN 0xfffffffe
/* lower bit position of bitfield data_tc_arb_mode */
-#define tps_data_tc_arb_mode_shift 0
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_SHIFT 0
/* width of bitfield data_tc_arb_mode */
-#define tps_data_tc_arb_mode_width 1
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_WIDTH 1
/* default value of bitfield data_tc_arb_mode */
-#define tps_data_tc_arb_mode_default 0x0
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_DEFAULT 0x0
/* tx desc_rate_ta_rst bitfield definitions
* preprocessor definitions for the bitfield "desc_rate_ta_rst".
@@ -2036,17 +2044,17 @@
*/
/* register address for bitfield desc_rate_ta_rst */
-#define tps_desc_rate_ta_rst_adr 0x00007310
+#define HW_ATL_TPS_DESC_RATE_TA_RST_ADR 0x00007310
/* bitmask for bitfield desc_rate_ta_rst */
-#define tps_desc_rate_ta_rst_msk 0x80000000
+#define HW_ATL_TPS_DESC_RATE_TA_RST_MSK 0x80000000
/* inverted bitmask for bitfield desc_rate_ta_rst */
-#define tps_desc_rate_ta_rst_mskn 0x7fffffff
+#define HW_ATL_TPS_DESC_RATE_TA_RST_MSKN 0x7fffffff
/* lower bit position of bitfield desc_rate_ta_rst */
-#define tps_desc_rate_ta_rst_shift 31
+#define HW_ATL_TPS_DESC_RATE_TA_RST_SHIFT 31
/* width of bitfield desc_rate_ta_rst */
-#define tps_desc_rate_ta_rst_width 1
+#define HW_ATL_TPS_DESC_RATE_TA_RST_WIDTH 1
/* default value of bitfield desc_rate_ta_rst */
-#define tps_desc_rate_ta_rst_default 0x0
+#define HW_ATL_TPS_DESC_RATE_TA_RST_DEFAULT 0x0
/* tx desc_rate_limit[a:0] bitfield definitions
* preprocessor definitions for the bitfield "desc_rate_limit[a:0]".
@@ -2054,17 +2062,17 @@
*/
/* register address for bitfield desc_rate_limit[a:0] */
-#define tps_desc_rate_lim_adr 0x00007310
+#define HW_ATL_TPS_DESC_RATE_LIM_ADR 0x00007310
/* bitmask for bitfield desc_rate_limit[a:0] */
-#define tps_desc_rate_lim_msk 0x000007ff
+#define HW_ATL_TPS_DESC_RATE_LIM_MSK 0x000007ff
/* inverted bitmask for bitfield desc_rate_limit[a:0] */
-#define tps_desc_rate_lim_mskn 0xfffff800
+#define HW_ATL_TPS_DESC_RATE_LIM_MSKN 0xfffff800
/* lower bit position of bitfield desc_rate_limit[a:0] */
-#define tps_desc_rate_lim_shift 0
+#define HW_ATL_TPS_DESC_RATE_LIM_SHIFT 0
/* width of bitfield desc_rate_limit[a:0] */
-#define tps_desc_rate_lim_width 11
+#define HW_ATL_TPS_DESC_RATE_LIM_WIDTH 11
/* default value of bitfield desc_rate_limit[a:0] */
-#define tps_desc_rate_lim_default 0x0
+#define HW_ATL_TPS_DESC_RATE_LIM_DEFAULT 0x0
/* tx desc_tc_arb_mode[1:0] bitfield definitions
* preprocessor definitions for the bitfield "desc_tc_arb_mode[1:0]".
@@ -2072,17 +2080,17 @@
*/
/* register address for bitfield desc_tc_arb_mode[1:0] */
-#define tps_desc_tc_arb_mode_adr 0x00007200
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_ADR 0x00007200
/* bitmask for bitfield desc_tc_arb_mode[1:0] */
-#define tps_desc_tc_arb_mode_msk 0x00000003
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_MSK 0x00000003
/* inverted bitmask for bitfield desc_tc_arb_mode[1:0] */
-#define tps_desc_tc_arb_mode_mskn 0xfffffffc
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_MSKN 0xfffffffc
/* lower bit position of bitfield desc_tc_arb_mode[1:0] */
-#define tps_desc_tc_arb_mode_shift 0
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_SHIFT 0
/* width of bitfield desc_tc_arb_mode[1:0] */
-#define tps_desc_tc_arb_mode_width 2
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_WIDTH 2
/* default value of bitfield desc_tc_arb_mode[1:0] */
-#define tps_desc_tc_arb_mode_default 0x0
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_DEFAULT 0x0
/* tx desc_tc{t}_credit_max[b:0] bitfield definitions
* preprocessor definitions for the bitfield "desc_tc{t}_credit_max[b:0]".
@@ -2091,17 +2099,17 @@
*/
/* register address for bitfield desc_tc{t}_credit_max[b:0] */
-#define tps_desc_tctcredit_max_adr(tc) (0x00007210 + (tc) * 0x4)
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_ADR(tc) (0x00007210 + (tc) * 0x4)
/* bitmask for bitfield desc_tc{t}_credit_max[b:0] */
-#define tps_desc_tctcredit_max_msk 0x0fff0000
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_MSK 0x0fff0000
/* inverted bitmask for bitfield desc_tc{t}_credit_max[b:0] */
-#define tps_desc_tctcredit_max_mskn 0xf000ffff
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_MSKN 0xf000ffff
/* lower bit position of bitfield desc_tc{t}_credit_max[b:0] */
-#define tps_desc_tctcredit_max_shift 16
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_SHIFT 16
/* width of bitfield desc_tc{t}_credit_max[b:0] */
-#define tps_desc_tctcredit_max_width 12
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_WIDTH 12
/* default value of bitfield desc_tc{t}_credit_max[b:0] */
-#define tps_desc_tctcredit_max_default 0x0
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_DEFAULT 0x0
/* tx desc_tc{t}_weight[8:0] bitfield definitions
* preprocessor definitions for the bitfield "desc_tc{t}_weight[8:0]".
@@ -2110,17 +2118,17 @@
*/
/* register address for bitfield desc_tc{t}_weight[8:0] */
-#define tps_desc_tctweight_adr(tc) (0x00007210 + (tc) * 0x4)
+#define HW_ATL_TPS_DESC_TCTWEIGHT_ADR(tc) (0x00007210 + (tc) * 0x4)
/* bitmask for bitfield desc_tc{t}_weight[8:0] */
-#define tps_desc_tctweight_msk 0x000001ff
+#define HW_ATL_TPS_DESC_TCTWEIGHT_MSK 0x000001ff
/* inverted bitmask for bitfield desc_tc{t}_weight[8:0] */
-#define tps_desc_tctweight_mskn 0xfffffe00
+#define HW_ATL_TPS_DESC_TCTWEIGHT_MSKN 0xfffffe00
/* lower bit position of bitfield desc_tc{t}_weight[8:0] */
-#define tps_desc_tctweight_shift 0
+#define HW_ATL_TPS_DESC_TCTWEIGHT_SHIFT 0
/* width of bitfield desc_tc{t}_weight[8:0] */
-#define tps_desc_tctweight_width 9
+#define HW_ATL_TPS_DESC_TCTWEIGHT_WIDTH 9
/* default value of bitfield desc_tc{t}_weight[8:0] */
-#define tps_desc_tctweight_default 0x0
+#define HW_ATL_TPS_DESC_TCTWEIGHT_DEFAULT 0x0
/* tx desc_vm_arb_mode bitfield definitions
* preprocessor definitions for the bitfield "desc_vm_arb_mode".
@@ -2128,17 +2136,17 @@
*/
/* register address for bitfield desc_vm_arb_mode */
-#define tps_desc_vm_arb_mode_adr 0x00007300
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_ADR 0x00007300
/* bitmask for bitfield desc_vm_arb_mode */
-#define tps_desc_vm_arb_mode_msk 0x00000001
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_MSK 0x00000001
/* inverted bitmask for bitfield desc_vm_arb_mode */
-#define tps_desc_vm_arb_mode_mskn 0xfffffffe
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_MSKN 0xfffffffe
/* lower bit position of bitfield desc_vm_arb_mode */
-#define tps_desc_vm_arb_mode_shift 0
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_SHIFT 0
/* width of bitfield desc_vm_arb_mode */
-#define tps_desc_vm_arb_mode_width 1
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_WIDTH 1
/* default value of bitfield desc_vm_arb_mode */
-#define tps_desc_vm_arb_mode_default 0x0
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_DEFAULT 0x0
/* tx data_tc{t}_credit_max[b:0] bitfield definitions
* preprocessor definitions for the bitfield "data_tc{t}_credit_max[b:0]".
@@ -2147,17 +2155,17 @@
*/
/* register address for bitfield data_tc{t}_credit_max[b:0] */
-#define tps_data_tctcredit_max_adr(tc) (0x00007110 + (tc) * 0x4)
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_ADR(tc) (0x00007110 + (tc) * 0x4)
/* bitmask for bitfield data_tc{t}_credit_max[b:0] */
-#define tps_data_tctcredit_max_msk 0x0fff0000
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_MSK 0x0fff0000
/* inverted bitmask for bitfield data_tc{t}_credit_max[b:0] */
-#define tps_data_tctcredit_max_mskn 0xf000ffff
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_MSKN 0xf000ffff
/* lower bit position of bitfield data_tc{t}_credit_max[b:0] */
-#define tps_data_tctcredit_max_shift 16
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_SHIFT 16
/* width of bitfield data_tc{t}_credit_max[b:0] */
-#define tps_data_tctcredit_max_width 12
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_WIDTH 12
/* default value of bitfield data_tc{t}_credit_max[b:0] */
-#define tps_data_tctcredit_max_default 0x0
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_DEFAULT 0x0
/* tx data_tc{t}_weight[8:0] bitfield definitions
* preprocessor definitions for the bitfield "data_tc{t}_weight[8:0]".
@@ -2166,17 +2174,17 @@
*/
/* register address for bitfield data_tc{t}_weight[8:0] */
-#define tps_data_tctweight_adr(tc) (0x00007110 + (tc) * 0x4)
+#define HW_ATL_TPS_DATA_TCTWEIGHT_ADR(tc) (0x00007110 + (tc) * 0x4)
/* bitmask for bitfield data_tc{t}_weight[8:0] */
-#define tps_data_tctweight_msk 0x000001ff
+#define HW_ATL_TPS_DATA_TCTWEIGHT_MSK 0x000001ff
/* inverted bitmask for bitfield data_tc{t}_weight[8:0] */
-#define tps_data_tctweight_mskn 0xfffffe00
+#define HW_ATL_TPS_DATA_TCTWEIGHT_MSKN 0xfffffe00
/* lower bit position of bitfield data_tc{t}_weight[8:0] */
-#define tps_data_tctweight_shift 0
+#define HW_ATL_TPS_DATA_TCTWEIGHT_SHIFT 0
/* width of bitfield data_tc{t}_weight[8:0] */
-#define tps_data_tctweight_width 9
+#define HW_ATL_TPS_DATA_TCTWEIGHT_WIDTH 9
/* default value of bitfield data_tc{t}_weight[8:0] */
-#define tps_data_tctweight_default 0x0
+#define HW_ATL_TPS_DATA_TCTWEIGHT_DEFAULT 0x0
/* tx reg_res_dsbl bitfield definitions
* preprocessor definitions for the bitfield "reg_res_dsbl".
@@ -2184,17 +2192,17 @@
*/
/* register address for bitfield reg_res_dsbl */
-#define tx_reg_res_dsbl_adr 0x00007000
+#define HW_ATL_TX_REG_RES_DSBL_ADR 0x00007000
/* bitmask for bitfield reg_res_dsbl */
-#define tx_reg_res_dsbl_msk 0x20000000
+#define HW_ATL_TX_REG_RES_DSBL_MSK 0x20000000
/* inverted bitmask for bitfield reg_res_dsbl */
-#define tx_reg_res_dsbl_mskn 0xdfffffff
+#define HW_ATL_TX_REG_RES_DSBL_MSKN 0xdfffffff
/* lower bit position of bitfield reg_res_dsbl */
-#define tx_reg_res_dsbl_shift 29
+#define HW_ATL_TX_REG_RES_DSBL_SHIFT 29
/* width of bitfield reg_res_dsbl */
-#define tx_reg_res_dsbl_width 1
+#define HW_ATL_TX_REG_RES_DSBL_WIDTH 1
/* default value of bitfield reg_res_dsbl */
-#define tx_reg_res_dsbl_default 0x1
+#define HW_ATL_TX_REG_RES_DSBL_DEFAULT 0x1
/* mac_phy register access busy bitfield definitions
* preprocessor definitions for the bitfield "register access busy".
@@ -2202,15 +2210,15 @@
*/
/* register address for bitfield register access busy */
-#define msm_reg_access_busy_adr 0x00004400
+#define HW_ATL_MSM_REG_ACCESS_BUSY_ADR 0x00004400
/* bitmask for bitfield register access busy */
-#define msm_reg_access_busy_msk 0x00001000
+#define HW_ATL_MSM_REG_ACCESS_BUSY_MSK 0x00001000
/* inverted bitmask for bitfield register access busy */
-#define msm_reg_access_busy_mskn 0xffffefff
+#define HW_ATL_MSM_REG_ACCESS_BUSY_MSKN 0xffffefff
/* lower bit position of bitfield register access busy */
-#define msm_reg_access_busy_shift 12
+#define HW_ATL_MSM_REG_ACCESS_BUSY_SHIFT 12
/* width of bitfield register access busy */
-#define msm_reg_access_busy_width 1
+#define HW_ATL_MSM_REG_ACCESS_BUSY_WIDTH 1
/* mac_phy msm register address[7:0] bitfield definitions
* preprocessor definitions for the bitfield "msm register address[7:0]".
@@ -2218,17 +2226,17 @@
*/
/* register address for bitfield msm register address[7:0] */
-#define msm_reg_addr_adr 0x00004400
+#define HW_ATL_MSM_REG_ADDR_ADR 0x00004400
/* bitmask for bitfield msm register address[7:0] */
-#define msm_reg_addr_msk 0x000000ff
+#define HW_ATL_MSM_REG_ADDR_MSK 0x000000ff
/* inverted bitmask for bitfield msm register address[7:0] */
-#define msm_reg_addr_mskn 0xffffff00
+#define HW_ATL_MSM_REG_ADDR_MSKN 0xffffff00
/* lower bit position of bitfield msm register address[7:0] */
-#define msm_reg_addr_shift 0
+#define HW_ATL_MSM_REG_ADDR_SHIFT 0
/* width of bitfield msm register address[7:0] */
-#define msm_reg_addr_width 8
+#define HW_ATL_MSM_REG_ADDR_WIDTH 8
/* default value of bitfield msm register address[7:0] */
-#define msm_reg_addr_default 0x0
+#define HW_ATL_MSM_REG_ADDR_DEFAULT 0x0
/* mac_phy register read strobe bitfield definitions
* preprocessor definitions for the bitfield "register read strobe".
@@ -2236,17 +2244,17 @@
*/
/* register address for bitfield register read strobe */
-#define msm_reg_rd_strobe_adr 0x00004400
+#define HW_ATL_MSM_REG_RD_STROBE_ADR 0x00004400
/* bitmask for bitfield register read strobe */
-#define msm_reg_rd_strobe_msk 0x00000200
+#define HW_ATL_MSM_REG_RD_STROBE_MSK 0x00000200
/* inverted bitmask for bitfield register read strobe */
-#define msm_reg_rd_strobe_mskn 0xfffffdff
+#define HW_ATL_MSM_REG_RD_STROBE_MSKN 0xfffffdff
/* lower bit position of bitfield register read strobe */
-#define msm_reg_rd_strobe_shift 9
+#define HW_ATL_MSM_REG_RD_STROBE_SHIFT 9
/* width of bitfield register read strobe */
-#define msm_reg_rd_strobe_width 1
+#define HW_ATL_MSM_REG_RD_STROBE_WIDTH 1
/* default value of bitfield register read strobe */
-#define msm_reg_rd_strobe_default 0x0
+#define HW_ATL_MSM_REG_RD_STROBE_DEFAULT 0x0
/* mac_phy msm register read data[31:0] bitfield definitions
* preprocessor definitions for the bitfield "msm register read data[31:0]".
@@ -2254,15 +2262,15 @@
*/
/* register address for bitfield msm register read data[31:0] */
-#define msm_reg_rd_data_adr 0x00004408
+#define HW_ATL_MSM_REG_RD_DATA_ADR 0x00004408
/* bitmask for bitfield msm register read data[31:0] */
-#define msm_reg_rd_data_msk 0xffffffff
+#define HW_ATL_MSM_REG_RD_DATA_MSK 0xffffffff
/* inverted bitmask for bitfield msm register read data[31:0] */
-#define msm_reg_rd_data_mskn 0x00000000
+#define HW_ATL_MSM_REG_RD_DATA_MSKN 0x00000000
/* lower bit position of bitfield msm register read data[31:0] */
-#define msm_reg_rd_data_shift 0
+#define HW_ATL_MSM_REG_RD_DATA_SHIFT 0
/* width of bitfield msm register read data[31:0] */
-#define msm_reg_rd_data_width 32
+#define HW_ATL_MSM_REG_RD_DATA_WIDTH 32
/* mac_phy msm register write data[31:0] bitfield definitions
* preprocessor definitions for the bitfield "msm register write data[31:0]".
@@ -2270,17 +2278,17 @@
*/
/* register address for bitfield msm register write data[31:0] */
-#define msm_reg_wr_data_adr 0x00004404
+#define HW_ATL_MSM_REG_WR_DATA_ADR 0x00004404
/* bitmask for bitfield msm register write data[31:0] */
-#define msm_reg_wr_data_msk 0xffffffff
+#define HW_ATL_MSM_REG_WR_DATA_MSK 0xffffffff
/* inverted bitmask for bitfield msm register write data[31:0] */
-#define msm_reg_wr_data_mskn 0x00000000
+#define HW_ATL_MSM_REG_WR_DATA_MSKN 0x00000000
/* lower bit position of bitfield msm register write data[31:0] */
-#define msm_reg_wr_data_shift 0
+#define HW_ATL_MSM_REG_WR_DATA_SHIFT 0
/* width of bitfield msm register write data[31:0] */
-#define msm_reg_wr_data_width 32
+#define HW_ATL_MSM_REG_WR_DATA_WIDTH 32
/* default value of bitfield msm register write data[31:0] */
-#define msm_reg_wr_data_default 0x0
+#define HW_ATL_MSM_REG_WR_DATA_DEFAULT 0x0
/* mac_phy register write strobe bitfield definitions
* preprocessor definitions for the bitfield "register write strobe".
@@ -2288,17 +2296,17 @@
*/
/* register address for bitfield register write strobe */
-#define msm_reg_wr_strobe_adr 0x00004400
+#define HW_ATL_MSM_REG_WR_STROBE_ADR 0x00004400
/* bitmask for bitfield register write strobe */
-#define msm_reg_wr_strobe_msk 0x00000100
+#define HW_ATL_MSM_REG_WR_STROBE_MSK 0x00000100
/* inverted bitmask for bitfield register write strobe */
-#define msm_reg_wr_strobe_mskn 0xfffffeff
+#define HW_ATL_MSM_REG_WR_STROBE_MSKN 0xfffffeff
/* lower bit position of bitfield register write strobe */
-#define msm_reg_wr_strobe_shift 8
+#define HW_ATL_MSM_REG_WR_STROBE_SHIFT 8
/* width of bitfield register write strobe */
-#define msm_reg_wr_strobe_width 1
+#define HW_ATL_MSM_REG_WR_STROBE_WIDTH 1
/* default value of bitfield register write strobe */
-#define msm_reg_wr_strobe_default 0x0
+#define HW_ATL_MSM_REG_WR_STROBE_DEFAULT 0x0
/* mif soft reset bitfield definitions
* preprocessor definitions for the bitfield "soft reset".
@@ -2306,17 +2314,17 @@
*/
/* register address for bitfield soft reset */
-#define glb_soft_res_adr 0x00000000
+#define HW_ATL_GLB_SOFT_RES_ADR 0x00000000
/* bitmask for bitfield soft reset */
-#define glb_soft_res_msk 0x00008000
+#define HW_ATL_GLB_SOFT_RES_MSK 0x00008000
/* inverted bitmask for bitfield soft reset */
-#define glb_soft_res_mskn 0xffff7fff
+#define HW_ATL_GLB_SOFT_RES_MSKN 0xffff7fff
/* lower bit position of bitfield soft reset */
-#define glb_soft_res_shift 15
+#define HW_ATL_GLB_SOFT_RES_SHIFT 15
/* width of bitfield soft reset */
-#define glb_soft_res_width 1
+#define HW_ATL_GLB_SOFT_RES_WIDTH 1
/* default value of bitfield soft reset */
-#define glb_soft_res_default 0x0
+#define HW_ATL_GLB_SOFT_RES_DEFAULT 0x0
/* mif register reset disable bitfield definitions
* preprocessor definitions for the bitfield "register reset disable".
@@ -2324,27 +2332,27 @@
*/
/* register address for bitfield register reset disable */
-#define glb_reg_res_dis_adr 0x00000000
+#define HW_ATL_GLB_REG_RES_DIS_ADR 0x00000000
/* bitmask for bitfield register reset disable */
-#define glb_reg_res_dis_msk 0x00004000
+#define HW_ATL_GLB_REG_RES_DIS_MSK 0x00004000
/* inverted bitmask for bitfield register reset disable */
-#define glb_reg_res_dis_mskn 0xffffbfff
+#define HW_ATL_GLB_REG_RES_DIS_MSKN 0xffffbfff
/* lower bit position of bitfield register reset disable */
-#define glb_reg_res_dis_shift 14
+#define HW_ATL_GLB_REG_RES_DIS_SHIFT 14
/* width of bitfield register reset disable */
-#define glb_reg_res_dis_width 1
+#define HW_ATL_GLB_REG_RES_DIS_WIDTH 1
/* default value of bitfield register reset disable */
-#define glb_reg_res_dis_default 0x1
+#define HW_ATL_GLB_REG_RES_DIS_DEFAULT 0x1
/* tx dma debug control definitions */
-#define tx_dma_debug_ctl_adr 0x00008920u
+#define HW_ATL_TX_DMA_DEBUG_CTL_ADR 0x00008920u
/* tx dma descriptor base address msw definitions */
-#define tx_dma_desc_base_addrmsw_adr(descriptor) \
+#define HW_ATL_TX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor) \
(0x00007c04u + (descriptor) * 0x40)
/* tx dma total request limit */
-#define tx_dma_total_req_limit_adr 0x00007b20u
+#define HW_ATL_TX_DMA_TOTAL_REQ_LIMIT_ADR 0x00007b20u
/* tx interrupt moderation control register definitions
* Preprocessor definitions for TX Interrupt Moderation Control Register
@@ -2352,7 +2360,7 @@
* Parameter: queue {Q} | stride size 0x4 | range [0, 31]
*/
-#define tx_intr_moderation_ctl_adr(queue) (0x00008980u + (queue) * 0x4)
+#define HW_ATL_TX_INTR_MODERATION_CTL_ADR(queue) (0x00008980u + (queue) * 0x4)
/* pcie reg_res_dsbl bitfield definitions
* preprocessor definitions for the bitfield "reg_res_dsbl".
@@ -2360,22 +2368,23 @@
*/
/* register address for bitfield reg_res_dsbl */
-#define pci_reg_res_dsbl_adr 0x00001000
+#define HW_ATL_PCI_REG_RES_DSBL_ADR 0x00001000
/* bitmask for bitfield reg_res_dsbl */
-#define pci_reg_res_dsbl_msk 0x20000000
+#define HW_ATL_PCI_REG_RES_DSBL_MSK 0x20000000
/* inverted bitmask for bitfield reg_res_dsbl */
-#define pci_reg_res_dsbl_mskn 0xdfffffff
+#define HW_ATL_PCI_REG_RES_DSBL_MSKN 0xdfffffff
/* lower bit position of bitfield reg_res_dsbl */
-#define pci_reg_res_dsbl_shift 29
+#define HW_ATL_PCI_REG_RES_DSBL_SHIFT 29
/* width of bitfield reg_res_dsbl */
-#define pci_reg_res_dsbl_width 1
+#define HW_ATL_PCI_REG_RES_DSBL_WIDTH 1
/* default value of bitfield reg_res_dsbl */
-#define pci_reg_res_dsbl_default 0x1
+#define HW_ATL_PCI_REG_RES_DSBL_DEFAULT 0x1
/* PCI core control register */
-#define pci_reg_control6_adr 0x1014u
+#define HW_ATL_PCI_REG_CONTROL6_ADR 0x1014u
/* global microprocessor scratch pad definitions */
-#define glb_cpu_scratch_scp_adr(scratch_scp) (0x00000300u + (scratch_scp) * 0x4)
+#define HW_ATL_GLB_CPU_SCRATCH_SCP_ADR(scratch_scp) \
+ (0x00000300u + (scratch_scp) * 0x4)
#endif /* HW_ATL_LLH_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index f2ce12ed4218..9c7e9161b4db 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -11,11 +11,9 @@
* abstraction layer.
*/
-#include "../aq_hw.h"
+#include "../aq_nic.h"
#include "../aq_hw_utils.h"
#include "../aq_pci_func.h"
-#include "../aq_ring.h"
-#include "../aq_vec.h"
#include "hw_atl_utils.h"
#include "hw_atl_llh.h"
@@ -37,15 +35,15 @@ static int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
{
int err = 0;
- AQ_HW_WAIT_FOR(reg_glb_cpu_sem_get(self,
- HW_ATL_FW_SM_RAM) == 1U,
- 1U, 10000U);
+ AQ_HW_WAIT_FOR(hw_atl_reg_glb_cpu_sem_get(self,
+ HW_ATL_FW_SM_RAM) == 1U,
+ 1U, 10000U);
if (err < 0) {
bool is_locked;
- reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
- is_locked = reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
+ hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
+ is_locked = hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
if (!is_locked) {
err = -ETIME;
goto err_exit;
@@ -66,7 +64,7 @@ static int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
*(p++) = aq_hw_read_reg(self, 0x0000020CU);
}
- reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
+ hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
err_exit:
return err;
@@ -78,7 +76,7 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
int err = 0;
bool is_locked;
- is_locked = reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
+ is_locked = hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
if (!is_locked) {
err = -ETIME;
goto err_exit;
@@ -97,7 +95,7 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
}
}
- reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
+ hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
err_exit:
return err;
@@ -119,7 +117,7 @@ err_exit:
}
static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps)
+ const struct aq_hw_caps_s *aq_hw_caps)
{
int err = 0;
@@ -133,10 +131,10 @@ static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370);
}
- reg_glb_cpu_scratch_scp_set(self, 0x00000000U, 25U);
+ hw_atl_reg_glb_cpu_scratch_scp_set(self, 0x00000000U, 25U);
/* check 10 times by 1ms */
- AQ_HW_WAIT_FOR(0U != (PHAL_ATLANTIC_A0->mbox_addr =
+ AQ_HW_WAIT_FOR(0U != (self->mbox_addr =
aq_hw_read_reg(self, 0x360U)), 1000U, 10U);
err = hw_atl_utils_ver_match(aq_hw_caps->fw_ver_expected,
@@ -174,14 +172,14 @@ static int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size)
err = -1;
goto err_exit;
}
- err = hw_atl_utils_fw_upload_dwords(self, PHAL_ATLANTIC->rpc_addr,
- (u32 *)(void *)&PHAL_ATLANTIC->rpc,
+ err = hw_atl_utils_fw_upload_dwords(self, self->rpc_addr,
+ (u32 *)(void *)&self->rpc,
(rpc_size + sizeof(u32) -
sizeof(u8)) / sizeof(u32));
if (err < 0)
goto err_exit;
- sw.tid = 0xFFFFU & (++PHAL_ATLANTIC->rpc_tid);
+ sw.tid = 0xFFFFU & (++self->rpc_tid);
sw.len = (u16)rpc_size;
aq_hw_write_reg(self, HW_ATL_RPC_CONTROL_ADR, sw.val);
@@ -199,7 +197,7 @@ static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
do {
sw.val = aq_hw_read_reg(self, HW_ATL_RPC_CONTROL_ADR);
- PHAL_ATLANTIC->rpc_tid = sw.tid;
+ self->rpc_tid = sw.tid;
AQ_HW_WAIT_FOR(sw.tid ==
(fw.val =
@@ -221,9 +219,9 @@ static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
if (fw.len) {
err =
hw_atl_utils_fw_downld_dwords(self,
- PHAL_ATLANTIC->rpc_addr,
+ self->rpc_addr,
(u32 *)(void *)
- &PHAL_ATLANTIC->rpc,
+ &self->rpc,
(fw.len + sizeof(u32) -
sizeof(u8)) /
sizeof(u32));
@@ -231,19 +229,18 @@ static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
goto err_exit;
}
- *rpc = &PHAL_ATLANTIC->rpc;
+ *rpc = &self->rpc;
}
err_exit:
return err;
}
-static int hw_atl_utils_mpi_create(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps)
+static int hw_atl_utils_mpi_create(struct aq_hw_s *self)
{
int err = 0;
- err = hw_atl_utils_init_ucp(self, aq_hw_caps);
+ err = hw_atl_utils_init_ucp(self, self->aq_nic_cfg->aq_hw_caps);
if (err < 0)
goto err_exit;
@@ -259,7 +256,7 @@ int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
struct hw_aq_atl_utils_mbox_header *pmbox)
{
return hw_atl_utils_fw_downld_dwords(self,
- PHAL_ATLANTIC->mbox_addr,
+ self->mbox_addr,
(u32 *)(void *)pmbox,
sizeof(*pmbox) / sizeof(u32));
}
@@ -270,7 +267,7 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
int err = 0;
err = hw_atl_utils_fw_downld_dwords(self,
- PHAL_ATLANTIC->mbox_addr,
+ self->mbox_addr,
(u32 *)(void *)pmbox,
sizeof(*pmbox) / sizeof(u32));
if (err < 0)
@@ -281,9 +278,9 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
self->aq_nic_cfg->mtu : 1514U;
pmbox->stats.ubrc = pmbox->stats.uprc * mtu;
pmbox->stats.ubtc = pmbox->stats.uptc * mtu;
- pmbox->stats.dpc = atomic_read(&PHAL_ATLANTIC_A0->dpc);
+ pmbox->stats.dpc = atomic_read(&self->dpc);
} else {
- pmbox->stats.dpc = reg_rx_dma_stat_counter7get(self);
+ pmbox->stats.dpc = hw_atl_reg_rx_dma_stat_counter7get(self);
}
err_exit:;
@@ -365,7 +362,6 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
}
int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps,
u8 *mac)
{
int err = 0;
@@ -376,9 +372,9 @@ int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
self->mmio = aq_pci_func_get_mmio(self->aq_pci_func);
hw_atl_utils_hw_chip_features_init(self,
- &PHAL_ATLANTIC_A0->chip_features);
+ &self->chip_features);
- err = hw_atl_utils_mpi_create(self, aq_hw_caps);
+ err = hw_atl_utils_mpi_create(self);
if (err < 0)
goto err_exit;
@@ -396,7 +392,7 @@ int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
aq_hw_read_reg(self, 0x00000374U) +
(40U * 4U),
mac_addr,
- AQ_DIMOF(mac_addr));
+ ARRAY_SIZE(mac_addr));
if (err < 0) {
mac_addr[0] = 0U;
mac_addr[1] = 0U;
@@ -465,7 +461,7 @@ unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps)
void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
{
u32 chip_features = 0U;
- u32 val = reg_glb_mif_id_get(self);
+ u32 val = hw_atl_reg_glb_mif_id_get(self);
u32 mif_rev = val & 0xFFU;
if ((3U & mif_rev) == 1U) {
@@ -500,13 +496,13 @@ int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
int hw_atl_utils_update_stats(struct aq_hw_s *self)
{
- struct hw_atl_s *hw_self = PHAL_ATLANTIC;
struct hw_aq_atl_utils_mbox mbox;
hw_atl_utils_mpi_read_stats(self, &mbox);
-#define AQ_SDELTA(_N_) (hw_self->curr_stats._N_ += \
- mbox.stats._N_ - hw_self->last_stats._N_)
+#define AQ_SDELTA(_N_) (self->curr_stats._N_ += \
+ mbox.stats._N_ - self->last_stats._N_)
+
if (self->aq_link_status.mbps) {
AQ_SDELTA(uprc);
AQ_SDELTA(mprc);
@@ -527,19 +523,19 @@ int hw_atl_utils_update_stats(struct aq_hw_s *self)
AQ_SDELTA(dpc);
}
#undef AQ_SDELTA
- hw_self->curr_stats.dma_pkt_rc = stats_rx_dma_good_pkt_counterlsw_get(self);
- hw_self->curr_stats.dma_pkt_tc = stats_tx_dma_good_pkt_counterlsw_get(self);
- hw_self->curr_stats.dma_oct_rc = stats_rx_dma_good_octet_counterlsw_get(self);
- hw_self->curr_stats.dma_oct_tc = stats_tx_dma_good_octet_counterlsw_get(self);
+ self->curr_stats.dma_pkt_rc = hw_atl_stats_rx_dma_good_pkt_counterlsw_get(self);
+ self->curr_stats.dma_pkt_tc = hw_atl_stats_tx_dma_good_pkt_counterlsw_get(self);
+ self->curr_stats.dma_oct_rc = hw_atl_stats_rx_dma_good_octet_counterlsw_get(self);
+ self->curr_stats.dma_oct_tc = hw_atl_stats_tx_dma_good_octet_counterlsw_get(self);
- memcpy(&hw_self->last_stats, &mbox.stats, sizeof(mbox.stats));
+ memcpy(&self->last_stats, &mbox.stats, sizeof(mbox.stats));
return 0;
}
struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self)
{
- return &PHAL_ATLANTIC->curr_stats;
+ return &self->curr_stats;
}
static const u32 hw_atl_utils_hw_mac_regs[] = {
@@ -568,7 +564,7 @@ static const u32 hw_atl_utils_hw_mac_regs[] = {
};
int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps,
+ const struct aq_hw_caps_s *aq_hw_caps,
u32 *regs_buff)
{
unsigned int i = 0U;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index 21aeca6908d3..40e2319c65d5 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -14,10 +14,39 @@
#ifndef HW_ATL_UTILS_H
#define HW_ATL_UTILS_H
-#include "../aq_common.h"
-
#define HW_ATL_FLUSH() { (void)aq_hw_read_reg(self, 0x10); }
+/* Hardware tx descriptor */
+struct __packed hw_atl_txd_s {
+ u64 buf_addr;
+ u32 ctl;
+ u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */
+};
+
+/* Hardware tx context descriptor */
+struct __packed hw_atl_txc_s {
+ u32 rsvd;
+ u32 len;
+ u32 ctl;
+ u32 len2;
+};
+
+/* Hardware rx descriptor */
+struct __packed hw_atl_rxd_s {
+ u64 buf_addr;
+ u64 hdr_addr;
+};
+
+/* Hardware rx descriptor writeback */
+struct __packed hw_atl_rxd_wb_s {
+ u32 type;
+ u32 rss_hash;
+ u16 status;
+ u16 pkt_len;
+ u16 next_desc_ptr;
+ u16 vlan;
+};
+
struct __packed hw_atl_stats_s {
u32 uprc;
u32 mprc;
@@ -126,26 +155,6 @@ struct __packed hw_aq_atl_utils_mbox {
struct hw_atl_stats_s stats;
};
-struct __packed hw_atl_s {
- struct aq_hw_s base;
- struct hw_atl_stats_s last_stats;
- struct aq_stats_s curr_stats;
- u64 speed;
- unsigned int chip_features;
- u32 fw_ver_actual;
- atomic_t dpc;
- u32 mbox_addr;
- u32 rpc_addr;
- u32 rpc_tid;
- struct hw_aq_atl_utils_fw_rpc rpc;
-};
-
-#define SELF ((struct hw_atl_s *)self)
-
-#define PHAL_ATLANTIC ((struct hw_atl_s *)((void *)(self)))
-#define PHAL_ATLANTIC_A0 ((struct hw_atl_s *)((void *)(self)))
-#define PHAL_ATLANTIC_B0 ((struct hw_atl_s *)((void *)(self)))
-
#define HAL_ATLANTIC_UTILS_CHIP_MIPS 0x00000001U
#define HAL_ATLANTIC_UTILS_CHIP_TPO2 0x00000002U
#define HAL_ATLANTIC_UTILS_CHIP_RPF2 0x00000004U
@@ -154,7 +163,7 @@ struct __packed hw_atl_s {
#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 0x02000000U
#define IS_CHIP_FEATURE(_F_) (HAL_ATLANTIC_UTILS_CHIP_##_F_ & \
- PHAL_ATLANTIC->chip_features)
+ self->chip_features)
enum hal_atl_utils_fw_state_e {
MPI_DEINIT = 0,
@@ -171,6 +180,10 @@ enum hal_atl_utils_fw_state_e {
#define HAL_ATLANTIC_RATE_100M BIT(5)
#define HAL_ATLANTIC_RATE_INVALID BIT(6)
+struct aq_hw_s;
+struct aq_hw_caps_s;
+struct aq_hw_link_status_s;
+
void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p);
int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
@@ -189,13 +202,12 @@ int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed,
int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self);
int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps,
u8 *mac);
unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps);
int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps,
+ const struct aq_hw_caps_s *aq_hw_caps,
u32 *regs_buff);
int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile
index 59c8ec9c1cad..7c560d545c03 100644
--- a/drivers/net/ethernet/broadcom/bnxt/Makefile
+++ b/drivers/net/ethernet/broadcom/bnxt/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_BNXT) += bnxt_en.o
-bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_devlink.o
+bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o
bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 89c3c8760a78..6b7e99675571 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1,7 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
- * Copyright (c) 2016-2017 Broadcom Limited
+ * Copyright (c) 2016-2018 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -107,6 +107,7 @@ enum board_idx {
BCM57416_NPAR,
BCM57452,
BCM57454,
+ BCM5745x_NPAR,
BCM58802,
BCM58804,
BCM58808,
@@ -147,6 +148,7 @@ static const struct {
[BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
[BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
[BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
+ [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
[BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
[BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
[BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
@@ -156,6 +158,8 @@ static const struct {
};
static const struct pci_device_id bnxt_pci_tbl[] = {
+ { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
{ PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
@@ -209,6 +213,7 @@ MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
static const u16 bnxt_vf_req_snif[] = {
HWRM_FUNC_CFG,
+ HWRM_FUNC_VF_CFG,
HWRM_PORT_PHY_QCFG,
HWRM_CFA_L2_FILTER_ALLOC,
};
@@ -1510,7 +1515,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
(struct rx_tpa_start_cmp_ext *)rxcmp1);
*event |= BNXT_RX_EVENT;
- goto next_rx_no_prod;
+ goto next_rx_no_prod_no_len;
} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
@@ -1526,7 +1531,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
rc = 1;
}
*event |= BNXT_RX_EVENT;
- goto next_rx_no_prod;
+ goto next_rx_no_prod_no_len;
}
cons = rxcmp->rx_cmp_opaque;
@@ -1644,7 +1649,10 @@ next_rx:
rxr->rx_prod = NEXT_RX(prod);
rxr->rx_next_cons = NEXT_RX(cons);
-next_rx_no_prod:
+ cpr->rx_packets += 1;
+ cpr->rx_bytes += len;
+
+next_rx_no_prod_no_len:
*raw_cons = tmp_raw_cons;
return rc;
@@ -1802,6 +1810,7 @@ static irqreturn_t bnxt_msix(int irq, void *dev_instance)
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
u32 cons = RING_CMP(cpr->cp_raw_cons);
+ cpr->event_ctr++;
prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
napi_schedule(&bnapi->napi);
return IRQ_HANDLED;
@@ -2025,6 +2034,15 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
break;
}
}
+ if (bp->flags & BNXT_FLAG_DIM) {
+ struct net_dim_sample dim_sample;
+
+ net_dim_sample(cpr->event_ctr,
+ cpr->rx_packets,
+ cpr->rx_bytes,
+ &dim_sample);
+ net_dim(&cpr->dim, dim_sample);
+ }
mmiowb();
return work_done;
}
@@ -2617,6 +2635,8 @@ static void bnxt_init_cp_rings(struct bnxt *bp)
struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
ring->fw_ring_id = INVALID_HW_RING_ID;
+ cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
+ cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
}
}
@@ -4483,6 +4503,42 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
}
}
+static int bnxt_hwrm_get_rings(struct bnxt *bp)
+{
+ struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ struct hwrm_func_qcfg_input req = {0};
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10601)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc) {
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return -EIO;
+ }
+
+ hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
+ if (bp->flags & BNXT_FLAG_NEW_RM) {
+ u16 cp, stats;
+
+ hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
+ hw_resc->resv_hw_ring_grps =
+ le32_to_cpu(resp->alloc_hw_ring_grps);
+ hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
+ cp = le16_to_cpu(resp->alloc_cmpl_rings);
+ stats = le16_to_cpu(resp->alloc_stat_ctx);
+ cp = min_t(u16, cp, stats);
+ hw_resc->resv_cp_rings = cp;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return 0;
+}
+
/* Caller must hold bp->hwrm_cmd_lock */
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
{
@@ -4502,55 +4558,283 @@ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
return rc;
}
-static int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings)
+static int
+bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+ int ring_grps, int cp_rings, int vnics)
{
struct hwrm_func_cfg_input req = {0};
+ u32 enables = 0;
int rc;
- if (bp->hwrm_spec_code < 0x10601)
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+ enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
+ req.num_tx_rings = cpu_to_le16(tx_rings);
+ if (bp->flags & BNXT_FLAG_NEW_RM) {
+ enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
+ enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+ FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= ring_grps ?
+ FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
+ enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
+
+ req.num_rx_rings = cpu_to_le16(rx_rings);
+ req.num_hw_ring_grps = cpu_to_le16(ring_grps);
+ req.num_cmpl_rings = cpu_to_le16(cp_rings);
+ req.num_stat_ctxs = req.num_cmpl_rings;
+ req.num_vnics = cpu_to_le16(vnics);
+ }
+ if (!enables)
return 0;
- if (BNXT_VF(bp))
+ req.enables = cpu_to_le32(enables);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ return -ENOMEM;
+
+ if (bp->hwrm_spec_code < 0x10601)
+ bp->hw_resc.resv_tx_rings = tx_rings;
+
+ rc = bnxt_hwrm_get_rings(bp);
+ return rc;
+}
+
+static int
+bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+ int ring_grps, int cp_rings, int vnics)
+{
+ struct hwrm_func_vf_cfg_input req = {0};
+ u32 enables = 0;
+ int rc;
+
+ if (!(bp->flags & BNXT_FLAG_NEW_RM)) {
+ bp->hw_resc.resv_tx_rings = tx_rings;
return 0;
+ }
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
- req.fid = cpu_to_le16(0xffff);
- req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS);
- req.num_tx_rings = cpu_to_le16(*tx_rings);
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
+ enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
+ enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
+ enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
+ enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
+
+ req.num_tx_rings = cpu_to_le16(tx_rings);
+ req.num_rx_rings = cpu_to_le16(rx_rings);
+ req.num_hw_ring_grps = cpu_to_le16(ring_grps);
+ req.num_cmpl_rings = cpu_to_le16(cp_rings);
+ req.num_stat_ctxs = req.num_cmpl_rings;
+ req.num_vnics = cpu_to_le16(vnics);
+
+ req.enables = cpu_to_le32(enables);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
+ return -ENOMEM;
+
+ rc = bnxt_hwrm_get_rings(bp);
+ return rc;
+}
+
+static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
+ int cp, int vnic)
+{
+ if (BNXT_PF(bp))
+ return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, vnic);
+ else
+ return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, vnic);
+}
+
+static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
+ bool shared);
+
+static int __bnxt_reserve_rings(struct bnxt *bp)
+{
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ int tx = bp->tx_nr_rings;
+ int rx = bp->rx_nr_rings;
+ int cp = bp->cp_nr_rings;
+ int grp, rx_rings, rc;
+ bool sh = false;
+ int vnic = 1;
+
+ if (bp->hwrm_spec_code < 0x10601)
+ return 0;
+
+ if (bp->flags & BNXT_FLAG_SHARED_RINGS)
+ sh = true;
+ if (bp->flags & BNXT_FLAG_RFS)
+ vnic = rx + 1;
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ rx <<= 1;
+
+ grp = bp->rx_nr_rings;
+ if (tx == hw_resc->resv_tx_rings &&
+ (!(bp->flags & BNXT_FLAG_NEW_RM) ||
+ (rx == hw_resc->resv_rx_rings &&
+ grp == hw_resc->resv_hw_ring_grps &&
+ cp == hw_resc->resv_cp_rings && vnic == hw_resc->resv_vnics)))
+ return 0;
+
+ rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, vnic);
+ if (rc)
return rc;
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = __bnxt_hwrm_get_tx_rings(bp, 0xffff, tx_rings);
- mutex_unlock(&bp->hwrm_cmd_lock);
- if (!rc)
- bp->tx_reserved_rings = *tx_rings;
+ tx = hw_resc->resv_tx_rings;
+ if (bp->flags & BNXT_FLAG_NEW_RM) {
+ rx = hw_resc->resv_rx_rings;
+ cp = hw_resc->resv_cp_rings;
+ grp = hw_resc->resv_hw_ring_grps;
+ vnic = hw_resc->resv_vnics;
+ }
+
+ rx_rings = rx;
+ if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+ if (rx >= 2) {
+ rx_rings = rx >> 1;
+ } else {
+ if (netif_running(bp->dev))
+ return -ENOMEM;
+
+ bp->flags &= ~BNXT_FLAG_AGG_RINGS;
+ bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
+ bp->dev->hw_features &= ~NETIF_F_LRO;
+ bp->dev->features &= ~NETIF_F_LRO;
+ bnxt_set_ring_params(bp);
+ }
+ }
+ rx_rings = min_t(int, rx_rings, grp);
+ rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ rx = rx_rings << 1;
+ cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
+ bp->tx_nr_rings = tx;
+ bp->rx_nr_rings = rx_rings;
+ bp->cp_nr_rings = cp;
+
+ if (!tx || !rx || !cp || !grp || !vnic)
+ return -ENOMEM;
+
return rc;
}
-static int bnxt_hwrm_check_tx_rings(struct bnxt *bp, int tx_rings)
+static bool bnxt_need_reserve_rings(struct bnxt *bp)
{
- struct hwrm_func_cfg_input req = {0};
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ int rx = bp->rx_nr_rings;
+ int vnic = 1;
+
+ if (bp->hwrm_spec_code < 0x10601)
+ return false;
+
+ if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
+ return true;
+
+ if (bp->flags & BNXT_FLAG_RFS)
+ vnic = rx + 1;
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ rx <<= 1;
+ if ((bp->flags & BNXT_FLAG_NEW_RM) &&
+ (hw_resc->resv_rx_rings != rx ||
+ hw_resc->resv_cp_rings != bp->cp_nr_rings ||
+ hw_resc->resv_vnics != vnic))
+ return true;
+ return false;
+}
+
+static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+ int ring_grps, int cp_rings)
+{
+ struct hwrm_func_vf_cfg_input req = {0};
+ u32 flags, enables;
int rc;
- if (bp->hwrm_spec_code < 0x10801)
+ if (!(bp->flags & BNXT_FLAG_NEW_RM))
return 0;
- if (BNXT_VF(bp))
- return 0;
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
+ flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
+ FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
+ FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
+ FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
+ FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
+ FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
+ enables = FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS;
+
+ req.flags = cpu_to_le32(flags);
+ req.enables = cpu_to_le32(enables);
+ req.num_tx_rings = cpu_to_le16(tx_rings);
+ req.num_rx_rings = cpu_to_le16(rx_rings);
+ req.num_cmpl_rings = cpu_to_le16(cp_rings);
+ req.num_hw_ring_grps = cpu_to_le16(ring_grps);
+ req.num_stat_ctxs = cpu_to_le16(cp_rings);
+ req.num_vnics = cpu_to_le16(1);
+ if (bp->flags & BNXT_FLAG_RFS)
+ req.num_vnics = cpu_to_le16(rx_rings + 1);
+ rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ return -ENOMEM;
+ return 0;
+}
+
+static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+ int ring_grps, int cp_rings)
+{
+ struct hwrm_func_cfg_input req = {0};
+ u32 flags, enables;
+ int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(0xffff);
- req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST);
- req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS);
+ flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
+ enables = FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS;
req.num_tx_rings = cpu_to_le16(tx_rings);
+ if (bp->flags & BNXT_FLAG_NEW_RM) {
+ flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
+ FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
+ FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
+ FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
+ FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
+ enables |= FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
+ FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+ FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
+ FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
+ FUNC_CFG_REQ_ENABLES_NUM_VNICS;
+ req.num_rx_rings = cpu_to_le16(rx_rings);
+ req.num_cmpl_rings = cpu_to_le16(cp_rings);
+ req.num_hw_ring_grps = cpu_to_le16(ring_grps);
+ req.num_stat_ctxs = cpu_to_le16(cp_rings);
+ req.num_vnics = cpu_to_le16(1);
+ if (bp->flags & BNXT_FLAG_RFS)
+ req.num_vnics = cpu_to_le16(rx_rings + 1);
+ }
+ req.flags = cpu_to_le32(flags);
+ req.enables = cpu_to_le32(enables);
rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
return -ENOMEM;
return 0;
}
+static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+ int ring_grps, int cp_rings)
+{
+ if (bp->hwrm_spec_code < 0x10801)
+ return 0;
+
+ if (BNXT_PF(bp))
+ return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
+ ring_grps, cp_rings);
+
+ return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
+ cp_rings);
+}
+
static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
{
@@ -4593,6 +4877,36 @@ static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
req->flags = cpu_to_le16(flags);
}
+int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
+{
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_coal coal;
+ unsigned int grp_idx;
+
+ /* Tick values in micro seconds.
+ * 1 coal_buf x bufs_per_record = 1 completion record.
+ */
+ memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
+
+ coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
+ coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
+
+ if (!bnapi->rx_ring)
+ return -ENODEV;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
+ HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
+
+ bnxt_hwrm_set_coal_params(&coal, &req_rx);
+
+ grp_idx = bnapi->index;
+ req_rx.ring_id = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
+
+ return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
+ HWRM_CMD_TIMEOUT);
+}
+
int bnxt_hwrm_set_coal(struct bnxt *bp)
{
int i, rc = 0;
@@ -4746,11 +5060,60 @@ func_qcfg_exit:
return rc;
}
-static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+static int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
+{
+ struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_resource_qcaps_input req = {0};
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc) {
+ rc = -EIO;
+ goto hwrm_func_resc_qcaps_exit;
+ }
+
+ hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
+ hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
+ hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
+ hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
+ hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
+ hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
+ hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
+ hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
+ hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
+ hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
+ hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
+ hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
+ hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
+ hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
+ hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
+ hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+
+ if (BNXT_PF(bp)) {
+ struct bnxt_pf_info *pf = &bp->pf;
+
+ pf->vf_resv_strategy =
+ le16_to_cpu(resp->vf_reservation_strategy);
+ if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL)
+ pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
+ }
+hwrm_func_resc_qcaps_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
{
int rc = 0;
struct hwrm_func_qcaps_input req = {0};
struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ u32 flags;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
req.fid = cpu_to_le16(0xffff);
@@ -4760,16 +5123,27 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
if (rc)
goto hwrm_func_qcaps_exit;
- if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED))
+ flags = le32_to_cpu(resp->flags);
+ if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
bp->flags |= BNXT_FLAG_ROCEV1_CAP;
- if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED))
+ if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
bp->flags |= BNXT_FLAG_ROCEV2_CAP;
bp->tx_push_thresh = 0;
- if (resp->flags &
- cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
+ if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
+ hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
+ hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
+ hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
+ hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
+ hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
+ if (!hw_resc->max_hw_ring_grps)
+ hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
+ hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
+ hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
+ hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+
if (BNXT_PF(bp)) {
struct bnxt_pf_info *pf = &bp->pf;
@@ -4777,16 +5151,6 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
pf->port_id = le16_to_cpu(resp->port_id);
bp->dev->dev_port = pf->port_id;
memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
- pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
- pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
- pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
- pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
- pf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
- if (!pf->max_hw_ring_grps)
- pf->max_hw_ring_grps = pf->max_tx_rings;
- pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
- pf->max_vnics = le16_to_cpu(resp->max_vnics);
- pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
pf->max_vfs = le16_to_cpu(resp->max_vfs);
pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
@@ -4795,26 +5159,13 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
- if (resp->flags &
- cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED))
+ if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
bp->flags |= BNXT_FLAG_WOL_CAP;
} else {
#ifdef CONFIG_BNXT_SRIOV
struct bnxt_vf_info *vf = &bp->vf;
vf->fw_fid = le16_to_cpu(resp->fid);
-
- vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
- vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
- vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
- vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
- vf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
- if (!vf->max_hw_ring_grps)
- vf->max_hw_ring_grps = vf->max_tx_rings;
- vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
- vf->max_vnics = le16_to_cpu(resp->max_vnics);
- vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
-
memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
#endif
}
@@ -4824,6 +5175,21 @@ hwrm_func_qcaps_exit:
return rc;
}
+static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+{
+ int rc;
+
+ rc = __bnxt_hwrm_func_qcaps(bp);
+ if (rc)
+ return rc;
+ if (bp->hwrm_spec_code >= 0x10803) {
+ rc = bnxt_hwrm_func_resc_qcaps(bp);
+ if (!rc)
+ bp->flags |= BNXT_FLAG_NEW_RM;
+ }
+ return 0;
+}
+
static int bnxt_hwrm_func_reset(struct bnxt *bp)
{
struct hwrm_func_reset_input req = {0};
@@ -4893,23 +5259,24 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
- bp->hwrm_spec_code = resp->hwrm_intf_maj << 16 |
- resp->hwrm_intf_min << 8 | resp->hwrm_intf_upd;
- if (resp->hwrm_intf_maj < 1) {
+ bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
+ resp->hwrm_intf_min_8b << 8 |
+ resp->hwrm_intf_upd_8b;
+ if (resp->hwrm_intf_maj_8b < 1) {
netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
- resp->hwrm_intf_maj, resp->hwrm_intf_min,
- resp->hwrm_intf_upd);
+ resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
+ resp->hwrm_intf_upd_8b);
netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
}
snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
- resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
- resp->hwrm_fw_rsvd);
+ resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
+ resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
if (!bp->hwrm_cmd_timeout)
bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
- if (resp->hwrm_intf_maj >= 1)
+ if (resp->hwrm_intf_maj_8b >= 1)
bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
bp->chip_num = le16_to_cpu(resp->chip_num);
@@ -5045,6 +5412,28 @@ static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
return rc;
}
+static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
+{
+ struct hwrm_func_cfg_input req = {0};
+ int rc;
+
+ if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+ req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
+ req.cache_linesize = FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_64;
+ if (size == 128)
+ req.cache_linesize =
+ FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128;
+
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ rc = -EIO;
+ return rc;
+}
+
static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
{
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
@@ -5180,15 +5569,6 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
rc);
goto err_out;
}
- if (bp->tx_reserved_rings != bp->tx_nr_rings) {
- int tx = bp->tx_nr_rings;
-
- if (bnxt_hwrm_reserve_tx_rings(bp, &tx) ||
- tx < bp->tx_nr_rings) {
- rc = -ENOMEM;
- goto err_out;
- }
- }
}
rc = bnxt_hwrm_ring_alloc(bp);
@@ -5408,79 +5788,45 @@ static int bnxt_setup_int_mode(struct bnxt *bp)
#ifdef CONFIG_RFS_ACCEL
static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
{
-#if defined(CONFIG_BNXT_SRIOV)
- if (BNXT_VF(bp))
- return bp->vf.max_rsscos_ctxs;
-#endif
- return bp->pf.max_rsscos_ctxs;
+ return bp->hw_resc.max_rsscos_ctxs;
}
static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
{
-#if defined(CONFIG_BNXT_SRIOV)
- if (BNXT_VF(bp))
- return bp->vf.max_vnics;
-#endif
- return bp->pf.max_vnics;
+ return bp->hw_resc.max_vnics;
}
#endif
unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
{
-#if defined(CONFIG_BNXT_SRIOV)
- if (BNXT_VF(bp))
- return bp->vf.max_stat_ctxs;
-#endif
- return bp->pf.max_stat_ctxs;
+ return bp->hw_resc.max_stat_ctxs;
}
void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max)
{
-#if defined(CONFIG_BNXT_SRIOV)
- if (BNXT_VF(bp))
- bp->vf.max_stat_ctxs = max;
- else
-#endif
- bp->pf.max_stat_ctxs = max;
+ bp->hw_resc.max_stat_ctxs = max;
}
unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
{
-#if defined(CONFIG_BNXT_SRIOV)
- if (BNXT_VF(bp))
- return bp->vf.max_cp_rings;
-#endif
- return bp->pf.max_cp_rings;
+ return bp->hw_resc.max_cp_rings;
}
void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max)
{
-#if defined(CONFIG_BNXT_SRIOV)
- if (BNXT_VF(bp))
- bp->vf.max_cp_rings = max;
- else
-#endif
- bp->pf.max_cp_rings = max;
+ bp->hw_resc.max_cp_rings = max;
}
static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
{
-#if defined(CONFIG_BNXT_SRIOV)
- if (BNXT_VF(bp))
- return min_t(unsigned int, bp->vf.max_irqs,
- bp->vf.max_cp_rings);
-#endif
- return min_t(unsigned int, bp->pf.max_irqs, bp->pf.max_cp_rings);
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+
+ return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
}
void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
{
-#if defined(CONFIG_BNXT_SRIOV)
- if (BNXT_VF(bp))
- bp->vf.max_irqs = max_irqs;
- else
-#endif
- bp->pf.max_irqs = max_irqs;
+ bp->hw_resc.max_irqs = max_irqs;
}
static int bnxt_init_msix(struct bnxt *bp)
@@ -5581,6 +5927,36 @@ static void bnxt_clear_int_mode(struct bnxt *bp)
bp->flags &= ~BNXT_FLAG_USING_MSIX;
}
+static int bnxt_reserve_rings(struct bnxt *bp)
+{
+ int orig_cp = bp->hw_resc.resv_cp_rings;
+ int tcs = netdev_get_num_tc(bp->dev);
+ int rc;
+
+ if (!bnxt_need_reserve_rings(bp))
+ return 0;
+
+ rc = __bnxt_reserve_rings(bp);
+ if (rc) {
+ netdev_err(bp->dev, "ring reservation failure rc: %d\n", rc);
+ return rc;
+ }
+ if ((bp->flags & BNXT_FLAG_NEW_RM) && bp->cp_nr_rings > orig_cp) {
+ bnxt_clear_int_mode(bp);
+ rc = bnxt_init_int_mode(bp);
+ if (rc)
+ return rc;
+ }
+ if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
+ netdev_err(bp->dev, "tx ring reservation failure\n");
+ netdev_reset_tc(bp->dev);
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ return -ENOMEM;
+ }
+ bp->num_stat_ctxs = bp->cp_nr_rings;
+ return 0;
+}
+
static void bnxt_free_irq(struct bnxt *bp)
{
struct bnxt_irq *irq;
@@ -5715,7 +6091,13 @@ static void bnxt_enable_napi(struct bnxt *bp)
int i;
for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
bp->bnapi[i]->in_reset = false;
+
+ if (bp->bnapi[i]->rx_ring) {
+ INIT_WORK(&cpr->dim.work, bnxt_dim_work);
+ cpr->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ }
napi_enable(&bp->bnapi[i]->napi);
}
}
@@ -6325,6 +6707,10 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
bnxt_preset_reg_win(bp);
netif_carrier_off(bp->dev);
if (irq_re_init) {
+ rc = bnxt_reserve_rings(bp);
+ if (rc)
+ return rc;
+
rc = bnxt_setup_int_mode(bp);
if (rc) {
netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
@@ -6460,23 +6846,13 @@ static bool bnxt_drv_busy(struct bnxt *bp)
test_bit(BNXT_STATE_READ_STATS, &bp->state));
}
-int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
+ bool link_re_init)
{
- int rc = 0;
-
-#ifdef CONFIG_BNXT_SRIOV
- if (bp->sriov_cfg) {
- rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
- !bp->sriov_cfg,
- BNXT_SRIOV_CFG_WAIT_TMO);
- if (rc)
- netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
- }
-
/* Close the VF-reps before closing PF */
if (BNXT_PF(bp))
bnxt_vf_reps_close(bp);
-#endif
+
/* Change device state to avoid TX queue wake up's */
bnxt_tx_disable(bp);
@@ -6499,6 +6875,22 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
bnxt_del_napi(bp);
}
bnxt_free_mem(bp, irq_re_init);
+}
+
+int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+{
+ int rc = 0;
+
+#ifdef CONFIG_BNXT_SRIOV
+ if (bp->sriov_cfg) {
+ rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
+ !bp->sriov_cfg,
+ BNXT_SRIOV_CFG_WAIT_TMO);
+ if (rc)
+ netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
+ }
+#endif
+ __bnxt_close_nic(bp, irq_re_init, link_re_init);
return rc;
}
@@ -6778,13 +7170,26 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
max_rss_ctxs = max_vnics;
if (vnics > max_vnics || vnics > max_rss_ctxs) {
- netdev_warn(bp->dev,
- "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
- min(max_rss_ctxs - 1, max_vnics - 1));
+ if (bp->rx_nr_rings > 1)
+ netdev_warn(bp->dev,
+ "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
+ min(max_rss_ctxs - 1, max_vnics - 1));
return false;
}
- return true;
+ if (!(bp->flags & BNXT_FLAG_NEW_RM))
+ return true;
+
+ if (vnics == bp->hw_resc.resv_vnics)
+ return true;
+
+ bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, vnics);
+ if (vnics <= bp->hw_resc.resv_vnics)
+ return true;
+
+ netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
+ bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 1);
+ return false;
#else
return false;
#endif
@@ -7119,7 +7524,8 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
{
int max_rx, max_tx, tx_sets = 1;
int tx_rings_needed;
- int rc;
+ int rx_rings = rx;
+ int cp, rc;
if (tcs)
tx_sets = tcs;
@@ -7135,7 +7541,10 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
if (max_tx < tx_rings_needed)
return -ENOMEM;
- return bnxt_hwrm_check_tx_rings(bp, tx_rings_needed);
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ rx_rings <<= 1;
+ cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
+ return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp);
}
static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
@@ -7740,12 +8149,8 @@ int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr)
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
- /* In SRIOV each PF-pool (PF + child VFs) serves as a
- * switching domain, the PF's perm mac-addr can be used
- * as the unique parent-id
- */
- attr->u.ppid.id_len = ETH_ALEN;
- ether_addr_copy(attr->u.ppid.id, bp->pf.mac_addr);
+ attr->u.ppid.id_len = sizeof(bp->switch_id);
+ memcpy(attr->u.ppid.id, bp->switch_id, attr->u.ppid.id_len);
break;
default:
return -EOPNOTSUPP;
@@ -7890,24 +8295,14 @@ static int bnxt_get_max_irq(struct pci_dev *pdev)
static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
int *max_cp)
{
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
int max_ring_grps = 0;
-#ifdef CONFIG_BNXT_SRIOV
- if (!BNXT_PF(bp)) {
- *max_tx = bp->vf.max_tx_rings;
- *max_rx = bp->vf.max_rx_rings;
- *max_cp = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings);
- *max_cp = min_t(int, *max_cp, bp->vf.max_stat_ctxs);
- max_ring_grps = bp->vf.max_hw_ring_grps;
- } else
-#endif
- {
- *max_tx = bp->pf.max_tx_rings;
- *max_rx = bp->pf.max_rx_rings;
- *max_cp = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings);
- *max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs);
- max_ring_grps = bp->pf.max_hw_ring_grps;
- }
+ *max_tx = hw_resc->max_tx_rings;
+ *max_rx = hw_resc->max_rx_rings;
+ *max_cp = min_t(int, hw_resc->max_irqs, hw_resc->max_cp_rings);
+ *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs);
+ max_ring_grps = hw_resc->max_hw_ring_grps;
if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
*max_cp -= 1;
*max_rx -= 2;
@@ -7972,6 +8367,17 @@ static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
return rc;
}
+/* In initial default shared ring setting, each shared ring must have a
+ * RX/TX ring pair.
+ */
+static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
+{
+ bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
+ bp->rx_nr_rings = bp->cp_nr_rings;
+ bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+}
+
static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
{
int dflt_rings, max_rx_rings, max_tx_rings, rc;
@@ -7987,14 +8393,26 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
return rc;
bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
+ if (sh)
+ bnxt_trim_dflt_sh_rings(bp);
+ else
+ bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
- rc = bnxt_hwrm_reserve_tx_rings(bp, &bp->tx_nr_rings_per_tc);
+ rc = __bnxt_reserve_rings(bp);
if (rc)
netdev_warn(bp->dev, "Unable to reserve tx rings\n");
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ if (sh)
+ bnxt_trim_dflt_sh_rings(bp);
- bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
- bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
- bp->tx_nr_rings + bp->rx_nr_rings;
+ /* Rings may have been trimmed, re-reserve the trimmed rings. */
+ if (bnxt_need_reserve_rings(bp)) {
+ rc = __bnxt_reserve_rings(bp);
+ if (rc)
+ netdev_warn(bp->dev, "2nd rings reservation failed.\n");
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ }
bp->num_stat_ctxs = bp->cp_nr_rings;
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
bp->rx_nr_rings++;
@@ -8003,11 +8421,23 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
return rc;
}
-void bnxt_restore_pf_fw_resources(struct bnxt *bp)
+int bnxt_restore_pf_fw_resources(struct bnxt *bp)
{
+ int rc;
+
ASSERT_RTNL();
+ if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
+ return 0;
+
bnxt_hwrm_func_qcaps(bp);
- bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP);
+ __bnxt_close_nic(bp, true, false);
+ bnxt_clear_int_mode(bp);
+ rc = bnxt_init_int_mode(bp);
+ if (rc)
+ dev_close(bp->dev);
+ else
+ rc = bnxt_open_nic(bp, true, false);
+ return rc;
}
static int bnxt_init_mac_addr(struct bnxt *bp)
@@ -8021,7 +8451,7 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
struct bnxt_vf_info *vf = &bp->vf;
if (is_valid_ether_addr(vf->mac_addr)) {
- /* overwrite netdev dev_adr with admin VF MAC */
+ /* overwrite netdev dev_addr with admin VF MAC */
memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
} else {
eth_hw_addr_random(bp->dev);
@@ -8233,6 +8663,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
else
device_set_wakeup_capable(&pdev->dev, false);
+ bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
+
if (BNXT_PF(bp)) {
if (!bnxt_pf_wq) {
bnxt_pf_wq =
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 2d268fc26f5e..1989c470172c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1,7 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
- * Copyright (c) 2016-2017 Broadcom Limited
+ * Copyright (c) 2016-2018 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -12,10 +12,10 @@
#define BNXT_H
#define DRV_MODULE_NAME "bnxt_en"
-#define DRV_MODULE_VERSION "1.8.0"
+#define DRV_MODULE_VERSION "1.9.0"
#define DRV_VER_MAJ 1
-#define DRV_VER_MIN 8
+#define DRV_VER_MIN 9
#define DRV_VER_UPD 0
#include <linux/interrupt.h>
@@ -24,6 +24,7 @@
#include <net/dst_metadata.h>
#include <net/switchdev.h>
#include <net/xdp.h>
+#include <linux/net_dim.h>
struct tx_bd {
__le32 tx_bd_len_flags_type;
@@ -608,6 +609,17 @@ struct bnxt_tx_ring_info {
struct bnxt_ring_struct tx_ring_struct;
};
+struct bnxt_coal {
+ u16 coal_ticks;
+ u16 coal_ticks_irq;
+ u16 coal_bufs;
+ u16 coal_bufs_irq;
+ /* RING_IDLE enabled when coal ticks < idle_thresh */
+ u16 idle_thresh;
+ u8 bufs_per_record;
+ u8 budget;
+};
+
struct bnxt_tpa_info {
void *data;
u8 *data_ptr;
@@ -672,6 +684,13 @@ struct bnxt_cp_ring_info {
u32 cp_raw_cons;
void __iomem *cp_doorbell;
+ struct bnxt_coal rx_ring_coal;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 event_ctr;
+
+ struct net_dim dim;
+
struct tx_cmp *cp_desc_ring[MAX_CP_PAGES];
dma_addr_t cp_desc_mapping[MAX_CP_PAGES];
@@ -757,19 +776,38 @@ struct bnxt_vnic_info {
#define BNXT_VNIC_RFS_NEW_RSS_FLAG 0x10
};
-#if defined(CONFIG_BNXT_SRIOV)
-struct bnxt_vf_info {
- u16 fw_fid;
- u8 mac_addr[ETH_ALEN];
+struct bnxt_hw_resc {
+ u16 min_rsscos_ctxs;
u16 max_rsscos_ctxs;
+ u16 min_cp_rings;
u16 max_cp_rings;
+ u16 resv_cp_rings;
+ u16 min_tx_rings;
u16 max_tx_rings;
+ u16 resv_tx_rings;
+ u16 min_rx_rings;
u16 max_rx_rings;
+ u16 resv_rx_rings;
+ u16 min_hw_ring_grps;
u16 max_hw_ring_grps;
+ u16 resv_hw_ring_grps;
+ u16 min_l2_ctxs;
u16 max_l2_ctxs;
- u16 max_irqs;
+ u16 min_vnics;
u16 max_vnics;
+ u16 resv_vnics;
+ u16 min_stat_ctxs;
u16 max_stat_ctxs;
+ u16 max_irqs;
+};
+
+#if defined(CONFIG_BNXT_SRIOV)
+struct bnxt_vf_info {
+ u16 fw_fid;
+ u8 mac_addr[ETH_ALEN]; /* PF assigned MAC Address */
+ u8 vf_mac_addr[ETH_ALEN]; /* VF assigned MAC address, only
+ * stored by PF.
+ */
u16 vlan;
u32 flags;
#define BNXT_VF_QOS 0x1
@@ -790,15 +828,6 @@ struct bnxt_pf_info {
u16 fw_fid;
u16 port_id;
u8 mac_addr[ETH_ALEN];
- u16 max_rsscos_ctxs;
- u16 max_cp_rings;
- u16 max_tx_rings; /* HW assigned max tx rings for this PF */
- u16 max_rx_rings; /* HW assigned max rx rings for this PF */
- u16 max_hw_ring_grps;
- u16 max_irqs;
- u16 max_l2_ctxs;
- u16 max_vnics;
- u16 max_stat_ctxs;
u32 first_vf_id;
u16 active_vfs;
u16 max_vfs;
@@ -810,6 +839,9 @@ struct bnxt_pf_info {
u32 max_rx_wm_flows;
unsigned long *vf_event_bmap;
u16 hwrm_cmd_req_pages;
+ u8 vf_resv_strategy;
+#define BNXT_VF_RESV_STRATEGY_MAXIMAL 0
+#define BNXT_VF_RESV_STRATEGY_MINIMAL 1
void *hwrm_cmd_req_addr[4];
dma_addr_t hwrm_cmd_req_dma_addr[4];
struct bnxt_vf_info *vf;
@@ -946,17 +978,6 @@ struct bnxt_test_info {
#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
#define BNXT_CAG_REG_BASE 0x300000
-struct bnxt_coal {
- u16 coal_ticks;
- u16 coal_ticks_irq;
- u16 coal_bufs;
- u16 coal_bufs_irq;
- /* RING_IDLE enabled when coal ticks < idle_thresh */
- u16 idle_thresh;
- u8 bufs_per_record;
- u8 budget;
-};
-
struct bnxt_tc_flow_stats {
u64 packets;
u64 bytes;
@@ -1128,6 +1149,8 @@ struct bnxt {
#define BNXT_FLAG_DOUBLE_DB 0x400000
#define BNXT_FLAG_FW_DCBX_AGENT 0x800000
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
+ #define BNXT_FLAG_DIM 0x2000000
+ #define BNXT_FLAG_NEW_RM 0x8000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
BNXT_FLAG_RFS | \
@@ -1187,7 +1210,6 @@ struct bnxt {
int tx_nr_rings;
int tx_nr_rings_per_tc;
int tx_nr_rings_xdp;
- int tx_reserved_rings;
int tx_wake_thresh;
int tx_push_thresh;
@@ -1299,6 +1321,7 @@ struct bnxt {
#define BNXT_LINK_SPEED_CHNG_SP_EVENT 14
#define BNXT_FLOW_STATS_SP_EVENT 15
+ struct bnxt_hw_resc hw_resc;
struct bnxt_pf_info pf;
#ifdef CONFIG_BNXT_SRIOV
int nr_vfs;
@@ -1348,6 +1371,7 @@ struct bnxt {
enum devlink_eswitch_mode eswitch_mode;
struct bnxt_vf_rep **vf_reps; /* array of vf-rep ptrs */
u16 *cfa_code_map; /* cfa_code -> vf_idx map */
+ u8 switch_id[8];
struct bnxt_tc_info *tc_info;
};
@@ -1423,6 +1447,9 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp);
int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
-void bnxt_restore_pf_fw_resources(struct bnxt *bp);
+int bnxt_restore_pf_fw_resources(struct bnxt *bp);
int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr);
+void bnxt_dim_work(struct work_struct *work);
+int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi);
+
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
new file mode 100644
index 000000000000..408dd190331e
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
@@ -0,0 +1,32 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2017-2018 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/net_dim.h>
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+
+void bnxt_dim_work(struct work_struct *work)
+{
+ struct net_dim *dim = container_of(work, struct net_dim,
+ work);
+ struct bnxt_cp_ring_info *cpr = container_of(dim,
+ struct bnxt_cp_ring_info,
+ dim);
+ struct bnxt_napi *bnapi = container_of(cpr,
+ struct bnxt_napi,
+ cp_ring);
+ struct net_dim_cq_moder cur_profile = net_dim_get_profile(dim->mode,
+ dim->profile_ix);
+
+ cpr->rx_ring_coal.coal_ticks = cur_profile.usec;
+ cpr->rx_ring_coal.coal_bufs = cur_profile.pkts;
+
+ bnxt_hwrm_set_ring_coal(bnapi->bp, bnapi);
+ dim->state = NET_DIM_START_MEASURE;
+}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index fe7599f404bf..1801582076be 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -49,6 +49,8 @@ static int bnxt_get_coalesce(struct net_device *dev,
memset(coal, 0, sizeof(*coal));
+ coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM;
+
hw_coal = &bp->rx_coal;
mult = hw_coal->bufs_per_record;
coal->rx_coalesce_usecs = hw_coal->coal_ticks;
@@ -77,6 +79,15 @@ static int bnxt_set_coalesce(struct net_device *dev,
int rc = 0;
u16 mult;
+ if (coal->use_adaptive_rx_coalesce) {
+ bp->flags |= BNXT_FLAG_DIM;
+ } else {
+ if (bp->flags & BNXT_FLAG_DIM) {
+ bp->flags &= ~(BNXT_FLAG_DIM);
+ goto reset_coalesce;
+ }
+ }
+
hw_coal = &bp->rx_coal;
mult = hw_coal->bufs_per_record;
hw_coal->coal_ticks = coal->rx_coalesce_usecs;
@@ -104,6 +115,7 @@ static int bnxt_set_coalesce(struct net_device *dev,
update_stats = true;
}
+reset_coalesce:
if (netif_running(dev)) {
if (update_stats) {
rc = bnxt_close_nic(bp, true, false);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index c99f4d0880e4..82d17f8cc0db 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -1,2437 +1,2700 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
- * Copyright (c) 2016-2017 Broadcom Limited
+ * Copyright (c) 2016-2018 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
+ *
+ * DO NOT MODIFY!!! This file is automatically generated.
*/
-#ifndef BNXT_HSI_H
-#define BNXT_HSI_H
+#ifndef _BNXT_HSI_H_
+#define _BNXT_HSI_H_
+
+/* hwrm_cmd_hdr (size:128b/16B) */
+struct hwrm_cmd_hdr {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_resp_hdr (size:64b/8B) */
+struct hwrm_resp_hdr {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+};
+
+#define CMD_DISCR_TLV_ENCAP 0x8000UL
+#define CMD_DISCR_LAST CMD_DISCR_TLV_ENCAP
+
+
+#define TLV_TYPE_HWRM_REQUEST 0x1UL
+#define TLV_TYPE_HWRM_RESPONSE 0x2UL
+#define TLV_TYPE_ROCE_SP_COMMAND 0x3UL
+#define TLV_TYPE_ENGINE_CKV_DEVICE_SERIAL_NUMBER 0x8001UL
+#define TLV_TYPE_ENGINE_CKV_NONCE 0x8002UL
+#define TLV_TYPE_ENGINE_CKV_IV 0x8003UL
+#define TLV_TYPE_ENGINE_CKV_AUTH_TAG 0x8004UL
+#define TLV_TYPE_ENGINE_CKV_CIPHERTEXT 0x8005UL
+#define TLV_TYPE_ENGINE_CKV_ALGORITHMS 0x8006UL
+#define TLV_TYPE_ENGINE_CKV_ECC_PUBLIC_KEY 0x8007UL
+#define TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE 0x8008UL
+#define TLV_TYPE_LAST TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE
+
+
+/* tlv (size:64b/8B) */
+struct tlv {
+ __le16 cmd_discr;
+ u8 reserved_8b;
+ u8 flags;
+ #define TLV_FLAGS_MORE 0x1UL
+ #define TLV_FLAGS_MORE_LAST 0x0UL
+ #define TLV_FLAGS_MORE_NOT_LAST 0x1UL
+ #define TLV_FLAGS_REQUIRED 0x2UL
+ #define TLV_FLAGS_REQUIRED_NO (0x0UL << 1)
+ #define TLV_FLAGS_REQUIRED_YES (0x1UL << 1)
+ #define TLV_FLAGS_REQUIRED_LAST TLV_FLAGS_REQUIRED_YES
+ __le16 tlv_type;
+ __le16 length;
+};
+
+/* input (size:128b/16B) */
+struct input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
-/* HSI and HWRM Specification 1.8.3 */
-#define HWRM_VERSION_MAJOR 1
-#define HWRM_VERSION_MINOR 8
-#define HWRM_VERSION_UPDATE 3
+/* output (size:64b/8B) */
+struct output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+};
-#define HWRM_VERSION_RSVD 1 /* non-zero means beta version */
+/* hwrm_short_input (size:128b/16B) */
+struct hwrm_short_input {
+ __le16 req_type;
+ __le16 signature;
+ #define SHORT_REQ_SIGNATURE_SHORT_CMD 0x4321UL
+ #define SHORT_REQ_SIGNATURE_LAST SHORT_REQ_SIGNATURE_SHORT_CMD
+ __le16 unused_0;
+ __le16 size;
+ __le64 req_addr;
+};
-#define HWRM_VERSION_STR "1.8.3.1"
-/*
- * Following is the signature for HWRM message field that indicates not
- * applicable (All F's). Need to cast it the size of the field if needed.
- */
-#define HWRM_NA_SIGNATURE ((__le32)(-1))
-#define HWRM_MAX_REQ_LEN (128) /* hwrm_func_buf_rgtr */
-#define HWRM_MAX_RESP_LEN (280) /* hwrm_selftest_qlist */
-#define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */
-#define HW_HASH_KEY_SIZE 40
-#define HWRM_RESP_VALID_KEY 1 /* valid key for HWRM response */
-
-/* Statistics Ejection Buffer Completion Record (16 bytes) */
+/* cmd_nums (size:64b/8B) */
+struct cmd_nums {
+ __le16 req_type;
+ #define HWRM_VER_GET 0x0UL
+ #define HWRM_FUNC_BUF_UNRGTR 0xeUL
+ #define HWRM_FUNC_VF_CFG 0xfUL
+ #define HWRM_RESERVED1 0x10UL
+ #define HWRM_FUNC_RESET 0x11UL
+ #define HWRM_FUNC_GETFID 0x12UL
+ #define HWRM_FUNC_VF_ALLOC 0x13UL
+ #define HWRM_FUNC_VF_FREE 0x14UL
+ #define HWRM_FUNC_QCAPS 0x15UL
+ #define HWRM_FUNC_QCFG 0x16UL
+ #define HWRM_FUNC_CFG 0x17UL
+ #define HWRM_FUNC_QSTATS 0x18UL
+ #define HWRM_FUNC_CLR_STATS 0x19UL
+ #define HWRM_FUNC_DRV_UNRGTR 0x1aUL
+ #define HWRM_FUNC_VF_RESC_FREE 0x1bUL
+ #define HWRM_FUNC_VF_VNIC_IDS_QUERY 0x1cUL
+ #define HWRM_FUNC_DRV_RGTR 0x1dUL
+ #define HWRM_FUNC_DRV_QVER 0x1eUL
+ #define HWRM_FUNC_BUF_RGTR 0x1fUL
+ #define HWRM_PORT_PHY_CFG 0x20UL
+ #define HWRM_PORT_MAC_CFG 0x21UL
+ #define HWRM_PORT_TS_QUERY 0x22UL
+ #define HWRM_PORT_QSTATS 0x23UL
+ #define HWRM_PORT_LPBK_QSTATS 0x24UL
+ #define HWRM_PORT_CLR_STATS 0x25UL
+ #define HWRM_PORT_LPBK_CLR_STATS 0x26UL
+ #define HWRM_PORT_PHY_QCFG 0x27UL
+ #define HWRM_PORT_MAC_QCFG 0x28UL
+ #define HWRM_PORT_MAC_PTP_QCFG 0x29UL
+ #define HWRM_PORT_PHY_QCAPS 0x2aUL
+ #define HWRM_PORT_PHY_I2C_WRITE 0x2bUL
+ #define HWRM_PORT_PHY_I2C_READ 0x2cUL
+ #define HWRM_PORT_LED_CFG 0x2dUL
+ #define HWRM_PORT_LED_QCFG 0x2eUL
+ #define HWRM_PORT_LED_QCAPS 0x2fUL
+ #define HWRM_QUEUE_QPORTCFG 0x30UL
+ #define HWRM_QUEUE_QCFG 0x31UL
+ #define HWRM_QUEUE_CFG 0x32UL
+ #define HWRM_FUNC_VLAN_CFG 0x33UL
+ #define HWRM_FUNC_VLAN_QCFG 0x34UL
+ #define HWRM_QUEUE_PFCENABLE_QCFG 0x35UL
+ #define HWRM_QUEUE_PFCENABLE_CFG 0x36UL
+ #define HWRM_QUEUE_PRI2COS_QCFG 0x37UL
+ #define HWRM_QUEUE_PRI2COS_CFG 0x38UL
+ #define HWRM_QUEUE_COS2BW_QCFG 0x39UL
+ #define HWRM_QUEUE_COS2BW_CFG 0x3aUL
+ #define HWRM_QUEUE_DSCP_QCAPS 0x3bUL
+ #define HWRM_QUEUE_DSCP2PRI_QCFG 0x3cUL
+ #define HWRM_QUEUE_DSCP2PRI_CFG 0x3dUL
+ #define HWRM_VNIC_ALLOC 0x40UL
+ #define HWRM_VNIC_FREE 0x41UL
+ #define HWRM_VNIC_CFG 0x42UL
+ #define HWRM_VNIC_QCFG 0x43UL
+ #define HWRM_VNIC_TPA_CFG 0x44UL
+ #define HWRM_VNIC_TPA_QCFG 0x45UL
+ #define HWRM_VNIC_RSS_CFG 0x46UL
+ #define HWRM_VNIC_RSS_QCFG 0x47UL
+ #define HWRM_VNIC_PLCMODES_CFG 0x48UL
+ #define HWRM_VNIC_PLCMODES_QCFG 0x49UL
+ #define HWRM_VNIC_QCAPS 0x4aUL
+ #define HWRM_RING_ALLOC 0x50UL
+ #define HWRM_RING_FREE 0x51UL
+ #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS 0x52UL
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS 0x53UL
+ #define HWRM_RING_RESET 0x5eUL
+ #define HWRM_RING_GRP_ALLOC 0x60UL
+ #define HWRM_RING_GRP_FREE 0x61UL
+ #define HWRM_RESERVED5 0x64UL
+ #define HWRM_RESERVED6 0x65UL
+ #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC 0x70UL
+ #define HWRM_VNIC_RSS_COS_LB_CTX_FREE 0x71UL
+ #define HWRM_CFA_L2_FILTER_ALLOC 0x90UL
+ #define HWRM_CFA_L2_FILTER_FREE 0x91UL
+ #define HWRM_CFA_L2_FILTER_CFG 0x92UL
+ #define HWRM_CFA_L2_SET_RX_MASK 0x93UL
+ #define HWRM_CFA_VLAN_ANTISPOOF_CFG 0x94UL
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC 0x95UL
+ #define HWRM_CFA_TUNNEL_FILTER_FREE 0x96UL
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC 0x97UL
+ #define HWRM_CFA_ENCAP_RECORD_FREE 0x98UL
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC 0x99UL
+ #define HWRM_CFA_NTUPLE_FILTER_FREE 0x9aUL
+ #define HWRM_CFA_NTUPLE_FILTER_CFG 0x9bUL
+ #define HWRM_CFA_EM_FLOW_ALLOC 0x9cUL
+ #define HWRM_CFA_EM_FLOW_FREE 0x9dUL
+ #define HWRM_CFA_EM_FLOW_CFG 0x9eUL
+ #define HWRM_TUNNEL_DST_PORT_QUERY 0xa0UL
+ #define HWRM_TUNNEL_DST_PORT_ALLOC 0xa1UL
+ #define HWRM_TUNNEL_DST_PORT_FREE 0xa2UL
+ #define HWRM_STAT_CTX_ALLOC 0xb0UL
+ #define HWRM_STAT_CTX_FREE 0xb1UL
+ #define HWRM_STAT_CTX_QUERY 0xb2UL
+ #define HWRM_STAT_CTX_CLR_STATS 0xb3UL
+ #define HWRM_FW_RESET 0xc0UL
+ #define HWRM_FW_QSTATUS 0xc1UL
+ #define HWRM_FW_SET_TIME 0xc8UL
+ #define HWRM_FW_GET_TIME 0xc9UL
+ #define HWRM_FW_SET_STRUCTURED_DATA 0xcaUL
+ #define HWRM_FW_GET_STRUCTURED_DATA 0xcbUL
+ #define HWRM_FW_IPC_MAILBOX 0xccUL
+ #define HWRM_EXEC_FWD_RESP 0xd0UL
+ #define HWRM_REJECT_FWD_RESP 0xd1UL
+ #define HWRM_FWD_RESP 0xd2UL
+ #define HWRM_FWD_ASYNC_EVENT_CMPL 0xd3UL
+ #define HWRM_TEMP_MONITOR_QUERY 0xe0UL
+ #define HWRM_WOL_FILTER_ALLOC 0xf0UL
+ #define HWRM_WOL_FILTER_FREE 0xf1UL
+ #define HWRM_WOL_FILTER_QCFG 0xf2UL
+ #define HWRM_WOL_REASON_QCFG 0xf3UL
+ #define HWRM_CFA_METER_PROFILE_ALLOC 0xf5UL
+ #define HWRM_CFA_METER_PROFILE_FREE 0xf6UL
+ #define HWRM_CFA_METER_PROFILE_CFG 0xf7UL
+ #define HWRM_CFA_METER_INSTANCE_ALLOC 0xf8UL
+ #define HWRM_CFA_METER_INSTANCE_FREE 0xf9UL
+ #define HWRM_CFA_VFR_ALLOC 0xfdUL
+ #define HWRM_CFA_VFR_FREE 0xfeUL
+ #define HWRM_CFA_VF_PAIR_ALLOC 0x100UL
+ #define HWRM_CFA_VF_PAIR_FREE 0x101UL
+ #define HWRM_CFA_VF_PAIR_INFO 0x102UL
+ #define HWRM_CFA_FLOW_ALLOC 0x103UL
+ #define HWRM_CFA_FLOW_FREE 0x104UL
+ #define HWRM_CFA_FLOW_FLUSH 0x105UL
+ #define HWRM_CFA_FLOW_STATS 0x106UL
+ #define HWRM_CFA_FLOW_INFO 0x107UL
+ #define HWRM_CFA_DECAP_FILTER_ALLOC 0x108UL
+ #define HWRM_CFA_DECAP_FILTER_FREE 0x109UL
+ #define HWRM_CFA_VLAN_ANTISPOOF_QCFG 0x10aUL
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC 0x10bUL
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE 0x10cUL
+ #define HWRM_CFA_PAIR_ALLOC 0x10dUL
+ #define HWRM_CFA_PAIR_FREE 0x10eUL
+ #define HWRM_CFA_PAIR_INFO 0x10fUL
+ #define HWRM_FW_IPC_MSG 0x110UL
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO 0x111UL
+ #define HWRM_ENGINE_CKV_HELLO 0x12dUL
+ #define HWRM_ENGINE_CKV_STATUS 0x12eUL
+ #define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL
+ #define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL
+ #define HWRM_ENGINE_CKV_KEY_ADD 0x131UL
+ #define HWRM_ENGINE_CKV_KEY_DELETE 0x132UL
+ #define HWRM_ENGINE_CKV_FLUSH 0x133UL
+ #define HWRM_ENGINE_CKV_RNG_GET 0x134UL
+ #define HWRM_ENGINE_CKV_KEY_GEN 0x135UL
+ #define HWRM_ENGINE_QG_CONFIG_QUERY 0x13cUL
+ #define HWRM_ENGINE_QG_QUERY 0x13dUL
+ #define HWRM_ENGINE_QG_METER_PROFILE_CONFIG_QUERY 0x13eUL
+ #define HWRM_ENGINE_QG_METER_PROFILE_QUERY 0x13fUL
+ #define HWRM_ENGINE_QG_METER_PROFILE_ALLOC 0x140UL
+ #define HWRM_ENGINE_QG_METER_PROFILE_FREE 0x141UL
+ #define HWRM_ENGINE_QG_METER_QUERY 0x142UL
+ #define HWRM_ENGINE_QG_METER_BIND 0x143UL
+ #define HWRM_ENGINE_QG_METER_UNBIND 0x144UL
+ #define HWRM_ENGINE_QG_FUNC_BIND 0x145UL
+ #define HWRM_ENGINE_SG_CONFIG_QUERY 0x146UL
+ #define HWRM_ENGINE_SG_QUERY 0x147UL
+ #define HWRM_ENGINE_SG_METER_QUERY 0x148UL
+ #define HWRM_ENGINE_SG_METER_CONFIG 0x149UL
+ #define HWRM_ENGINE_SG_QG_BIND 0x14aUL
+ #define HWRM_ENGINE_QG_SG_UNBIND 0x14bUL
+ #define HWRM_ENGINE_CONFIG_QUERY 0x154UL
+ #define HWRM_ENGINE_STATS_CONFIG 0x155UL
+ #define HWRM_ENGINE_STATS_CLEAR 0x156UL
+ #define HWRM_ENGINE_STATS_QUERY 0x157UL
+ #define HWRM_ENGINE_RQ_ALLOC 0x15eUL
+ #define HWRM_ENGINE_RQ_FREE 0x15fUL
+ #define HWRM_ENGINE_CQ_ALLOC 0x160UL
+ #define HWRM_ENGINE_CQ_FREE 0x161UL
+ #define HWRM_ENGINE_NQ_ALLOC 0x162UL
+ #define HWRM_ENGINE_NQ_FREE 0x163UL
+ #define HWRM_ENGINE_ON_DIE_RQE_CREDITS 0x164UL
+ #define HWRM_FUNC_RESOURCE_QCAPS 0x190UL
+ #define HWRM_FUNC_VF_RESOURCE_CFG 0x191UL
+ #define HWRM_SELFTEST_QLIST 0x200UL
+ #define HWRM_SELFTEST_EXEC 0x201UL
+ #define HWRM_SELFTEST_IRQ 0x202UL
+ #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA 0x203UL
+ #define HWRM_DBG_READ_DIRECT 0xff10UL
+ #define HWRM_DBG_READ_INDIRECT 0xff11UL
+ #define HWRM_DBG_WRITE_DIRECT 0xff12UL
+ #define HWRM_DBG_WRITE_INDIRECT 0xff13UL
+ #define HWRM_DBG_DUMP 0xff14UL
+ #define HWRM_DBG_ERASE_NVM 0xff15UL
+ #define HWRM_DBG_CFG 0xff16UL
+ #define HWRM_DBG_COREDUMP_LIST 0xff17UL
+ #define HWRM_DBG_COREDUMP_INITIATE 0xff18UL
+ #define HWRM_DBG_COREDUMP_RETRIEVE 0xff19UL
+ #define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL
+ #define HWRM_NVM_VALIDATE_OPTION 0xffefUL
+ #define HWRM_NVM_FLUSH 0xfff0UL
+ #define HWRM_NVM_GET_VARIABLE 0xfff1UL
+ #define HWRM_NVM_SET_VARIABLE 0xfff2UL
+ #define HWRM_NVM_INSTALL_UPDATE 0xfff3UL
+ #define HWRM_NVM_MODIFY 0xfff4UL
+ #define HWRM_NVM_VERIFY_UPDATE 0xfff5UL
+ #define HWRM_NVM_GET_DEV_INFO 0xfff6UL
+ #define HWRM_NVM_ERASE_DIR_ENTRY 0xfff7UL
+ #define HWRM_NVM_MOD_DIR_ENTRY 0xfff8UL
+ #define HWRM_NVM_FIND_DIR_ENTRY 0xfff9UL
+ #define HWRM_NVM_GET_DIR_ENTRIES 0xfffaUL
+ #define HWRM_NVM_GET_DIR_INFO 0xfffbUL
+ #define HWRM_NVM_RAW_DUMP 0xfffcUL
+ #define HWRM_NVM_READ 0xfffdUL
+ #define HWRM_NVM_WRITE 0xfffeUL
+ #define HWRM_NVM_RAW_WRITE_BLK 0xffffUL
+ #define HWRM_LAST HWRM_NVM_RAW_WRITE_BLK
+ __le16 unused_0[3];
+};
+
+/* ret_codes (size:64b/8B) */
+struct ret_codes {
+ __le16 error_code;
+ #define HWRM_ERR_CODE_SUCCESS 0x0UL
+ #define HWRM_ERR_CODE_FAIL 0x1UL
+ #define HWRM_ERR_CODE_INVALID_PARAMS 0x2UL
+ #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED 0x3UL
+ #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR 0x4UL
+ #define HWRM_ERR_CODE_INVALID_FLAGS 0x5UL
+ #define HWRM_ERR_CODE_INVALID_ENABLES 0x6UL
+ #define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL
+ #define HWRM_ERR_CODE_NO_BUFFER 0x8UL
+ #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
+ #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
+ #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
+ #define HWRM_ERR_CODE_LAST HWRM_ERR_CODE_CMD_NOT_SUPPORTED
+ __le16 unused_0[3];
+};
+
+/* hwrm_err_output (size:128b/16B) */
+struct hwrm_err_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 opaque_0;
+ __le16 opaque_1;
+ u8 cmd_err;
+ u8 valid;
+};
+#define HWRM_NA_SIGNATURE ((__le32)(-1))
+#define HWRM_MAX_REQ_LEN 128
+#define HWRM_MAX_RESP_LEN 280
+#define HW_HASH_INDEX_SIZE 0x80
+#define HW_HASH_KEY_SIZE 40
+#define HWRM_RESP_VALID_KEY 1
+#define HWRM_VERSION_MAJOR 1
+#define HWRM_VERSION_MINOR 9
+#define HWRM_VERSION_UPDATE 0
+#define HWRM_VERSION_RSVD 0
+#define HWRM_VERSION_STR "1.9.0.0"
+
+/* hwrm_ver_get_input (size:192b/24B) */
+struct hwrm_ver_get_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 hwrm_intf_maj;
+ u8 hwrm_intf_min;
+ u8 hwrm_intf_upd;
+ u8 unused_0[5];
+};
+
+/* hwrm_ver_get_output (size:1408b/176B) */
+struct hwrm_ver_get_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 hwrm_intf_maj_8b;
+ u8 hwrm_intf_min_8b;
+ u8 hwrm_intf_upd_8b;
+ u8 hwrm_intf_rsvd_8b;
+ u8 hwrm_fw_maj_8b;
+ u8 hwrm_fw_min_8b;
+ u8 hwrm_fw_bld_8b;
+ u8 hwrm_fw_rsvd_8b;
+ u8 mgmt_fw_maj_8b;
+ u8 mgmt_fw_min_8b;
+ u8 mgmt_fw_bld_8b;
+ u8 mgmt_fw_rsvd_8b;
+ u8 netctrl_fw_maj_8b;
+ u8 netctrl_fw_min_8b;
+ u8 netctrl_fw_bld_8b;
+ u8 netctrl_fw_rsvd_8b;
+ __le32 dev_caps_cfg;
+ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL
+ u8 roce_fw_maj_8b;
+ u8 roce_fw_min_8b;
+ u8 roce_fw_bld_8b;
+ u8 roce_fw_rsvd_8b;
+ char hwrm_fw_name[16];
+ char mgmt_fw_name[16];
+ char netctrl_fw_name[16];
+ u8 reserved2[16];
+ char roce_fw_name[16];
+ __le16 chip_num;
+ u8 chip_rev;
+ u8 chip_metal;
+ u8 chip_bond_id;
+ u8 chip_platform_type;
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC 0x0UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA 0x1UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM 0x2UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_LAST VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM
+ __le16 max_req_win_len;
+ __le16 max_resp_len;
+ __le16 def_req_timeout;
+ u8 flags;
+ #define VER_GET_RESP_FLAGS_DEV_NOT_RDY 0x1UL
+ #define VER_GET_RESP_FLAGS_EXT_VER_AVAIL 0x2UL
+ u8 unused_0[2];
+ u8 always_1;
+ __le16 hwrm_intf_major;
+ __le16 hwrm_intf_minor;
+ __le16 hwrm_intf_build;
+ __le16 hwrm_intf_patch;
+ __le16 hwrm_fw_major;
+ __le16 hwrm_fw_minor;
+ __le16 hwrm_fw_build;
+ __le16 hwrm_fw_patch;
+ __le16 mgmt_fw_major;
+ __le16 mgmt_fw_minor;
+ __le16 mgmt_fw_build;
+ __le16 mgmt_fw_patch;
+ __le16 netctrl_fw_major;
+ __le16 netctrl_fw_minor;
+ __le16 netctrl_fw_build;
+ __le16 netctrl_fw_patch;
+ __le16 roce_fw_major;
+ __le16 roce_fw_minor;
+ __le16 roce_fw_build;
+ __le16 roce_fw_patch;
+ __le16 max_ext_req_len;
+ u8 unused_1[5];
+ u8 valid;
+};
+
+/* eject_cmpl (size:128b/16B) */
struct eject_cmpl {
- __le16 type;
- #define EJECT_CMPL_TYPE_MASK 0x3fUL
- #define EJECT_CMPL_TYPE_SFT 0
- #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL
- __le16 len;
- __le32 opaque;
- __le32 v;
- #define EJECT_CMPL_V 0x1UL
- __le32 unused_2;
-};
-
-/* HWRM Completion Record (16 bytes) */
+ __le16 type;
+ #define EJECT_CMPL_TYPE_MASK 0x3fUL
+ #define EJECT_CMPL_TYPE_SFT 0
+ #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL
+ #define EJECT_CMPL_TYPE_LAST EJECT_CMPL_TYPE_STAT_EJECT
+ __le16 len;
+ __le32 opaque;
+ __le32 v;
+ #define EJECT_CMPL_V 0x1UL
+ __le32 unused_2;
+};
+
+/* hwrm_cmpl (size:128b/16B) */
struct hwrm_cmpl {
- __le16 type;
- #define CMPL_TYPE_MASK 0x3fUL
- #define CMPL_TYPE_SFT 0
- #define CMPL_TYPE_HWRM_DONE 0x20UL
- __le16 sequence_id;
- __le32 unused_1;
- __le32 v;
- #define CMPL_V 0x1UL
- __le32 unused_3;
-};
-
-/* HWRM Forwarded Request (16 bytes) */
+ __le16 type;
+ #define CMPL_TYPE_MASK 0x3fUL
+ #define CMPL_TYPE_SFT 0
+ #define CMPL_TYPE_HWRM_DONE 0x20UL
+ #define CMPL_TYPE_LAST CMPL_TYPE_HWRM_DONE
+ __le16 sequence_id;
+ __le32 unused_1;
+ __le32 v;
+ #define CMPL_V 0x1UL
+ __le32 unused_3;
+};
+
+/* hwrm_fwd_req_cmpl (size:128b/16B) */
struct hwrm_fwd_req_cmpl {
- __le16 req_len_type;
- #define FWD_REQ_CMPL_TYPE_MASK 0x3fUL
- #define FWD_REQ_CMPL_TYPE_SFT 0
- #define FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ 0x22UL
- #define FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL
- #define FWD_REQ_CMPL_REQ_LEN_SFT 6
- __le16 source_id;
- __le32 unused_0;
- __le32 req_buf_addr_v[2];
- #define FWD_REQ_CMPL_V 0x1UL
- #define FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL
- #define FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
-};
-
-/* HWRM Forwarded Response (16 bytes) */
+ __le16 req_len_type;
+ #define FWD_REQ_CMPL_TYPE_MASK 0x3fUL
+ #define FWD_REQ_CMPL_TYPE_SFT 0
+ #define FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ 0x22UL
+ #define FWD_REQ_CMPL_TYPE_LAST FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ
+ #define FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL
+ #define FWD_REQ_CMPL_REQ_LEN_SFT 6
+ __le16 source_id;
+ __le32 unused0;
+ __le32 req_buf_addr_v[2];
+ #define FWD_REQ_CMPL_V 0x1UL
+ #define FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL
+ #define FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
+};
+
+/* hwrm_fwd_resp_cmpl (size:128b/16B) */
struct hwrm_fwd_resp_cmpl {
- __le16 type;
- #define FWD_RESP_CMPL_TYPE_MASK 0x3fUL
- #define FWD_RESP_CMPL_TYPE_SFT 0
- #define FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP 0x24UL
- __le16 source_id;
- __le16 resp_len;
- __le16 unused_1;
- __le32 resp_buf_addr_v[2];
- #define FWD_RESP_CMPL_V 0x1UL
- #define FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL
- #define FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1
-};
-
-/* HWRM Asynchronous Event Completion Record (16 bytes) */
+ __le16 type;
+ #define FWD_RESP_CMPL_TYPE_MASK 0x3fUL
+ #define FWD_RESP_CMPL_TYPE_SFT 0
+ #define FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP 0x24UL
+ #define FWD_RESP_CMPL_TYPE_LAST FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP
+ __le16 source_id;
+ __le16 resp_len;
+ __le16 unused_1;
+ __le32 resp_buf_addr_v[2];
+ #define FWD_RESP_CMPL_V 0x1UL
+ #define FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL
+ #define FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1
+};
+
+/* hwrm_async_event_cmpl (size:128b/16B) */
struct hwrm_async_event_cmpl {
- __le16 type;
- #define ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_TYPE_LAST ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_V 0x1UL
- #define ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
-};
-
-/* HWRM Asynchronous Event Completion Record for link status change (16 bytes) */
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_V 0x1UL
+ #define ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_link_status_change (size:128b/16B) */
struct hwrm_async_event_cmpl_link_status_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN (0x0UL << 0)
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP (0x1UL << 0)
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4
-};
-
-/* HWRM Asynchronous Event Completion Record for link MTU change (16 bytes) */
-struct hwrm_async_event_cmpl_link_mtu_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE 0x1UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record for link speed change (16 bytes) */
-struct hwrm_async_event_cmpl_link_speed_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_FORCE 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_MASK 0xfffeUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_SFT 1
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100MB (0x1UL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_1GB (0xaUL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2GB (0x14UL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2_5GB (0x19UL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10GB (0x64UL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_20GB (0xc8UL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB (0xfaUL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB (0x3e8UL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16
-};
-
-/* HWRM Asynchronous Event Completion Record for DCB Config change (16 bytes) */
-struct hwrm_async_event_cmpl_dcb_config_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_ETS 0x1UL
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_PFC 0x2UL
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_APP 0x4UL
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_MASK 0xff0000UL
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_SFT 16
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE (0xffUL << 16)
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_LAST ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_MASK 0xff000000UL
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_SFT 24
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE (0xffUL << 24)
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_LAST ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE
-};
-
-/* HWRM Asynchronous Event Completion Record for port connection not allowed (16 bytes) */
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN 0x0UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4
+};
+
+/* hwrm_async_event_cmpl_port_conn_not_allowed (size:128b/16B) */
struct hwrm_async_event_cmpl_port_conn_not_allowed {
- __le16 type;
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL
#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16)
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16)
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16)
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16)
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
-};
-
-/* HWRM Asynchronous Event Completion Record for link speed config not allowed (16 bytes) */
-struct hwrm_async_event_cmpl_link_speed_cfg_not_allowed {
- __le16 type;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_V 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record for link speed configuration change (16 bytes) */
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
+};
+
+/* hwrm_async_event_cmpl_link_speed_cfg_change (size:128b/16B) */
struct hwrm_async_event_cmpl_link_speed_cfg_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL
#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL
-};
-
-/* HWRM Asynchronous Event Completion Record for Function Driver Unload (16 bytes) */
-struct hwrm_async_event_cmpl_func_drvr_unload {
- __le16 type;
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V 0x1UL
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record for Function Driver load (16 bytes) */
-struct hwrm_async_event_cmpl_func_drvr_load {
- __le16 type;
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V 0x1UL
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record to indicate completion of FLR related processing (16 bytes) */
-struct hwrm_async_event_cmpl_func_flr_proc_cmplt {
- __le16 type;
- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_V 0x1UL
- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record for PF Driver Unload (16 bytes) */
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL
+};
+
+/* hwrm_async_event_cmpl_pf_drvr_unload (size:128b/16B) */
struct hwrm_async_event_cmpl_pf_drvr_unload {
- __le16 type;
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_LAST ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_LAST ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK 0x70000UL
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16
-};
-
-/* HWRM Asynchronous Event Completion Record for PF Driver load (16 bytes) */
-struct hwrm_async_event_cmpl_pf_drvr_load {
- __le16 type;
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD 0x21UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V 0x1UL
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_MASK 0x70000UL
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_SFT 16
-};
-
-/* HWRM Asynchronous Event Completion Record for VF FLR (16 bytes) */
-struct hwrm_async_event_cmpl_vf_flr {
- __le16 type;
- #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR 0x30UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_VF_FLR_V 0x1UL
- #define ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record for VF MAC Addr change (16 bytes) */
-struct hwrm_async_event_cmpl_vf_mac_addr_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record for PF-VF communication status change (16 bytes) */
-struct hwrm_async_event_cmpl_pf_vf_comm_status_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_DATA1_COMM_ESTABLISHED 0x1UL
-};
-
-/* HWRM Asynchronous Event Completion Record for VF configuration change (16 bytes) */
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK 0x70000UL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16
+};
+
+/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */
struct hwrm_async_event_cmpl_vf_cfg_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE 0x33UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
-};
-
-/* HWRM Asynchronous Event Completion Record for HWRM Error (16 bytes) */
-struct hwrm_async_event_cmpl_hwrm_error {
- __le16 type;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR 0xffUL
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING 0x0UL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL 0x1UL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL 0x2UL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
-};
-
-/* hwrm_ver_get */
-/* Input (24 bytes) */
-struct hwrm_ver_get_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 hwrm_intf_maj;
- u8 hwrm_intf_min;
- u8 hwrm_intf_upd;
- u8 unused_0[5];
-};
-
-/* Output (128 bytes) */
-struct hwrm_ver_get_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 hwrm_intf_maj;
- u8 hwrm_intf_min;
- u8 hwrm_intf_upd;
- u8 hwrm_intf_rsvd;
- u8 hwrm_fw_maj;
- u8 hwrm_fw_min;
- u8 hwrm_fw_bld;
- u8 hwrm_fw_rsvd;
- u8 mgmt_fw_maj;
- u8 mgmt_fw_min;
- u8 mgmt_fw_bld;
- u8 mgmt_fw_rsvd;
- u8 netctrl_fw_maj;
- u8 netctrl_fw_min;
- u8 netctrl_fw_bld;
- u8 netctrl_fw_rsvd;
- __le32 dev_caps_cfg;
- #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL
- #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL
- #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL
- #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL
- u8 roce_fw_maj;
- u8 roce_fw_min;
- u8 roce_fw_bld;
- u8 roce_fw_rsvd;
- char hwrm_fw_name[16];
- char mgmt_fw_name[16];
- char netctrl_fw_name[16];
- __le32 reserved2[4];
- char roce_fw_name[16];
- __le16 chip_num;
- u8 chip_rev;
- u8 chip_metal;
- u8 chip_bond_id;
- u8 chip_platform_type;
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC 0x0UL
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA 0x1UL
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM 0x2UL
- __le16 max_req_win_len;
- __le16 max_resp_len;
- __le16 def_req_timeout;
- u8 init_pending;
- #define VER_GET_RESP_INIT_PENDING_DEV_NOT_RDY 0x1UL
- u8 unused_0;
- u8 unused_1;
- u8 valid;
-};
-
-/* hwrm_func_reset */
-/* Input (24 bytes) */
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
+};
+
+/* hwrm_func_reset_input (size:192b/24B) */
struct hwrm_func_reset_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL
- __le16 vf_id;
- u8 func_reset_level;
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL 0x0UL
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME 0x1UL
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN 0x2UL
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF 0x3UL
- u8 unused_0;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL
+ __le16 vf_id;
+ u8 func_reset_level;
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL 0x0UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME 0x1UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN 0x2UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF 0x3UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_LAST FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF
+ u8 unused_0;
+};
+
+/* hwrm_func_reset_output (size:128b/16B) */
struct hwrm_func_reset_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_getfid */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_getfid_input (size:192b/24B) */
struct hwrm_func_getfid_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_GETFID_REQ_ENABLES_PCI_ID 0x1UL
- __le16 pci_id;
- __le16 unused_0;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_GETFID_REQ_ENABLES_PCI_ID 0x1UL
+ __le16 pci_id;
+ u8 unused_0[2];
+};
+
+/* hwrm_func_getfid_output (size:128b/16B) */
struct hwrm_func_getfid_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 fid;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_func_vf_alloc */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_func_vf_alloc_input (size:192b/24B) */
struct hwrm_func_vf_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID 0x1UL
- __le16 first_vf_id;
- __le16 num_vfs;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID 0x1UL
+ __le16 first_vf_id;
+ __le16 num_vfs;
+};
+
+/* hwrm_func_vf_alloc_output (size:128b/16B) */
struct hwrm_func_vf_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 first_vf_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_func_vf_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 first_vf_id;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_func_vf_free_input (size:192b/24B) */
struct hwrm_func_vf_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID 0x1UL
- __le16 first_vf_id;
- __le16 num_vfs;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID 0x1UL
+ __le16 first_vf_id;
+ __le16 num_vfs;
+};
+
+/* hwrm_func_vf_free_output (size:128b/16B) */
struct hwrm_func_vf_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_vf_cfg */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_vf_cfg_input (size:448b/56B) */
struct hwrm_func_vf_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL
- #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL
- #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL
- #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL
- __le16 mtu;
- __le16 guest_vlan;
- __le16 async_event_cr;
- u8 dflt_mac_addr[6];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL
+ #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL
+ #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL
+ #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x10UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x20UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS 0x40UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS 0x80UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS 0x100UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS 0x200UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x400UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x800UL
+ __le16 mtu;
+ __le16 guest_vlan;
+ __le16 async_event_cr;
+ u8 dflt_mac_addr[6];
+ __le32 flags;
+ #define FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x1UL
+ #define FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x2UL
+ #define FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x4UL
+ #define FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x8UL
+ #define FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x10UL
+ #define FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x20UL
+ #define FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x40UL
+ #define FUNC_VF_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x80UL
+ __le16 num_rsscos_ctxs;
+ __le16 num_cmpl_rings;
+ __le16 num_tx_rings;
+ __le16 num_rx_rings;
+ __le16 num_l2_ctxs;
+ __le16 num_vnics;
+ __le16 num_stat_ctxs;
+ __le16 num_hw_ring_grps;
+ u8 unused_0[4];
+};
+
+/* hwrm_func_vf_cfg_output (size:128b/16B) */
struct hwrm_func_vf_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_qcaps */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_qcaps_input (size:192b/24B) */
struct hwrm_func_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
};
-/* Output (80 bytes) */
+/* hwrm_func_qcaps_output (size:640b/80B) */
struct hwrm_func_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 fid;
- __le16 port_id;
- __le32 flags;
- #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
- #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
- #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL
- #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL
- #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL
- #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL
- #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL
- #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL
- #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL
- #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL
- #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL
- #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL
- u8 mac_address[6];
- __le16 max_rsscos_ctx;
- __le16 max_cmpl_rings;
- __le16 max_tx_rings;
- __le16 max_rx_rings;
- __le16 max_l2_ctxs;
- __le16 max_vnics;
- __le16 first_vf_id;
- __le16 max_vfs;
- __le16 max_stat_ctx;
- __le32 max_encap_records;
- __le32 max_decap_records;
- __le32 max_tx_em_flows;
- __le32 max_tx_wm_flows;
- __le32 max_rx_em_flows;
- __le32 max_rx_wm_flows;
- __le32 max_mcast_filters;
- __le32 max_flow_id;
- __le32 max_hw_ring_grps;
- __le16 max_sp_tx_rings;
- u8 unused_0;
- u8 valid;
-};
-
-/* hwrm_func_qcfg */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ __le16 port_id;
+ __le32 flags;
+ #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED 0x8000UL
+ u8 mac_address[6];
+ __le16 max_rsscos_ctx;
+ __le16 max_cmpl_rings;
+ __le16 max_tx_rings;
+ __le16 max_rx_rings;
+ __le16 max_l2_ctxs;
+ __le16 max_vnics;
+ __le16 first_vf_id;
+ __le16 max_vfs;
+ __le16 max_stat_ctx;
+ __le32 max_encap_records;
+ __le32 max_decap_records;
+ __le32 max_tx_em_flows;
+ __le32 max_tx_wm_flows;
+ __le32 max_rx_em_flows;
+ __le32 max_rx_wm_flows;
+ __le32 max_mcast_filters;
+ __le32 max_flow_id;
+ __le32 max_hw_ring_grps;
+ __le16 max_sp_tx_rings;
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_func_qcfg_input (size:192b/24B) */
struct hwrm_func_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
};
-/* Output (72 bytes) */
+/* hwrm_func_qcfg_output (size:640b/80B) */
struct hwrm_func_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 fid;
- __le16 port_id;
- __le16 vlan;
- __le16 flags;
- #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL
- #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL
- #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL
- #define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL
- #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL
- #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
- u8 mac_address[6];
- __le16 pci_id;
- __le16 alloc_rsscos_ctx;
- __le16 alloc_cmpl_rings;
- __le16 alloc_tx_rings;
- __le16 alloc_rx_rings;
- __le16 alloc_l2_ctx;
- __le16 alloc_vnics;
- __le16 mtu;
- __le16 mru;
- __le16 stat_ctx_id;
- u8 port_partition_type;
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF 0x0UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS 0x1UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 0x2UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL
- u8 port_pf_cnt;
- #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL
- __le16 dflt_vnic_id;
- __le16 max_mtu_configured;
- __le32 min_bw;
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0
- #define FUNC_QCFG_RESP_MIN_BW_SCALE 0x10000000UL
- #define FUNC_QCFG_RESP_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_QCFG_RESP_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ __le16 port_id;
+ __le16 vlan;
+ __le16 flags;
+ #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL
+ #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL
+ #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL
+ #define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL
+ #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL
+ #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
+ u8 mac_address[6];
+ __le16 pci_id;
+ __le16 alloc_rsscos_ctx;
+ __le16 alloc_cmpl_rings;
+ __le16 alloc_tx_rings;
+ __le16 alloc_rx_rings;
+ __le16 alloc_l2_ctx;
+ __le16 alloc_vnics;
+ __le16 mtu;
+ __le16 mru;
+ __le16 stat_ctx_id;
+ u8 port_partition_type;
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF 0x0UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS 0x1UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 0x2UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_LAST FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN
+ u8 port_pf_cnt;
+ #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL
+ #define FUNC_QCFG_RESP_PORT_PF_CNT_LAST FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL
+ __le16 dflt_vnic_id;
+ __le16 max_mtu_configured;
+ __le32 min_bw;
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 max_bw;
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT 0
- #define FUNC_QCFG_RESP_MAX_BW_SCALE 0x10000000UL
- #define FUNC_QCFG_RESP_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_QCFG_RESP_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 max_bw;
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 evb_mode;
- #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL
- #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL
- #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL
- u8 unused_0;
- __le16 alloc_vfs;
- __le32 alloc_mcast_filters;
- __le32 alloc_hw_ring_grps;
- __le16 alloc_sp_tx_rings;
- u8 unused_1;
- u8 valid;
-};
-
-/* hwrm_func_vlan_cfg */
-/* Input (48 bytes) */
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 evb_mode;
+ #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL
+ #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL
+ #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL
+ #define FUNC_QCFG_RESP_EVB_MODE_LAST FUNC_QCFG_RESP_EVB_MODE_VEPA
+ u8 cache_linesize;
+ #define FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_64 0x0UL
+ #define FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128 0x1UL
+ #define FUNC_QCFG_RESP_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128
+ __le16 alloc_vfs;
+ __le32 alloc_mcast_filters;
+ __le32 alloc_hw_ring_grps;
+ __le16 alloc_sp_tx_rings;
+ __le16 alloc_stat_ctx;
+ u8 unused_2[7];
+ u8 valid;
+};
+
+/* hwrm_func_vlan_cfg_input (size:384b/48B) */
struct hwrm_func_vlan_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 unused_0;
- u8 unused_1;
- __le32 enables;
- #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_VID 0x1UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_VID 0x2UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_PCP 0x4UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_PCP 0x8UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_TPID 0x10UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_TPID 0x20UL
- __le16 stag_vid;
- u8 stag_pcp;
- u8 unused_2;
- __be16 stag_tpid;
- __le16 ctag_vid;
- u8 ctag_pcp;
- u8 unused_3;
- __be16 ctag_tpid;
- __le32 rsvd1;
- __le32 rsvd2;
- __le32 unused_4;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[2];
+ __le32 enables;
+ #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_VID 0x1UL
+ #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_VID 0x2UL
+ #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_PCP 0x4UL
+ #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_PCP 0x8UL
+ #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_TPID 0x10UL
+ #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_TPID 0x20UL
+ __le16 stag_vid;
+ u8 stag_pcp;
+ u8 unused_1;
+ __be16 stag_tpid;
+ __le16 ctag_vid;
+ u8 ctag_pcp;
+ u8 unused_2;
+ __be16 ctag_tpid;
+ __le32 rsvd1;
+ __le32 rsvd2;
+ u8 unused_3[4];
+};
+
+/* hwrm_func_vlan_cfg_output (size:128b/16B) */
struct hwrm_func_vlan_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_cfg */
-/* Input (88 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_cfg_input (size:704b/88B) */
struct hwrm_func_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 unused_0;
- u8 unused_1;
- __le32 flags;
- #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE 0x1UL
- #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE 0x2UL
- #define FUNC_CFG_REQ_FLAGS_RSVD_MASK 0x1fcUL
- #define FUNC_CFG_REQ_FLAGS_RSVD_SFT 2
- #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE 0x200UL
- #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL
- #define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL
- #define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC 0x1000UL
- #define FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x2000UL
- __le32 enables;
- #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
- #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
- #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL
- #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL
- #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL
- #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL
- #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL
- #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL
- #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL
- #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL
- #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL
- #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL
- #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL
- #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL
- #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL
- #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL
- #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL
- #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL
- #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
- #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
- __le16 mtu;
- __le16 mru;
- __le16 num_rsscos_ctxs;
- __le16 num_cmpl_rings;
- __le16 num_tx_rings;
- __le16 num_rx_rings;
- __le16 num_l2_ctxs;
- __le16 num_vnics;
- __le16 num_stat_ctxs;
- __le16 num_hw_ring_grps;
- u8 dflt_mac_addr[6];
- __le16 dflt_vlan;
- __be32 dflt_ip_addr[4];
- __le32 min_bw;
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT 0
- #define FUNC_CFG_REQ_MIN_BW_SCALE 0x10000000UL
- #define FUNC_CFG_REQ_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_CFG_REQ_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_CFG_REQ_MIN_BW_SCALE_LAST FUNC_CFG_REQ_MIN_BW_SCALE_BYTES
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 max_bw;
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT 0
- #define FUNC_CFG_REQ_MAX_BW_SCALE 0x10000000UL
- #define FUNC_CFG_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_CFG_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_CFG_REQ_MAX_BW_SCALE_LAST FUNC_CFG_REQ_MAX_BW_SCALE_BYTES
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
- __le16 async_event_cr;
- u8 vlan_antispoof_mode;
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK 0x0UL
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN 0x1UL
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE 0x2UL
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[2];
+ __le32 flags;
+ #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE 0x1UL
+ #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE 0x2UL
+ #define FUNC_CFG_REQ_FLAGS_RSVD_MASK 0x1fcUL
+ #define FUNC_CFG_REQ_FLAGS_RSVD_SFT 2
+ #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE 0x200UL
+ #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL
+ #define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL
+ #define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC 0x1000UL
+ #define FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x2000UL
+ #define FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x4000UL
+ #define FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x8000UL
+ #define FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x10000UL
+ #define FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x20000UL
+ #define FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x40000UL
+ #define FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x80000UL
+ #define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL
+ __le32 enables;
+ #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
+ #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL
+ #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL
+ #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL
+ #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL
+ #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL
+ #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL
+ #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
+ #define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL
+ __le16 mtu;
+ __le16 mru;
+ __le16 num_rsscos_ctxs;
+ __le16 num_cmpl_rings;
+ __le16 num_tx_rings;
+ __le16 num_rx_rings;
+ __le16 num_l2_ctxs;
+ __le16 num_vnics;
+ __le16 num_stat_ctxs;
+ __le16 num_hw_ring_grps;
+ u8 dflt_mac_addr[6];
+ __le16 dflt_vlan;
+ __be32 dflt_ip_addr[4];
+ __le32 min_bw;
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_MIN_BW_SCALE_LAST FUNC_CFG_REQ_MIN_BW_SCALE_BYTES
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 max_bw;
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_MAX_BW_SCALE_LAST FUNC_CFG_REQ_MAX_BW_SCALE_BYTES
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
+ __le16 async_event_cr;
+ u8 vlan_antispoof_mode;
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK 0x0UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN 0x1UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE 0x2UL
#define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN 0x3UL
- u8 allowed_vlan_pris;
- u8 evb_mode;
- #define FUNC_CFG_REQ_EVB_MODE_NO_EVB 0x0UL
- #define FUNC_CFG_REQ_EVB_MODE_VEB 0x1UL
- #define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL
- u8 unused_2;
- __le16 num_mcast_filters;
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_LAST FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN
+ u8 allowed_vlan_pris;
+ u8 evb_mode;
+ #define FUNC_CFG_REQ_EVB_MODE_NO_EVB 0x0UL
+ #define FUNC_CFG_REQ_EVB_MODE_VEB 0x1UL
+ #define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL
+ #define FUNC_CFG_REQ_EVB_MODE_LAST FUNC_CFG_REQ_EVB_MODE_VEPA
+ u8 cache_linesize;
+ #define FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_64 0x0UL
+ #define FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_128 0x1UL
+ #define FUNC_CFG_REQ_CACHE_LINESIZE_LAST FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_128
+ __le16 num_mcast_filters;
+};
+
+/* hwrm_func_cfg_output (size:128b/16B) */
+struct hwrm_func_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
};
-/* Output (16 bytes) */
-struct hwrm_func_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_qstats */
-/* Input (24 bytes) */
+/* hwrm_func_qstats_input (size:192b/24B) */
struct hwrm_func_qstats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
};
-/* Output (176 bytes) */
+/* hwrm_func_qstats_output (size:1408b/176B) */
struct hwrm_func_qstats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_discard_pkts;
- __le64 tx_drop_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_discard_pkts;
- __le64 rx_drop_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 rx_agg_pkts;
- __le64 rx_agg_bytes;
- __le64 rx_agg_events;
- __le64 rx_agg_aborts;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_clr_stats */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 rx_agg_pkts;
+ __le64 rx_agg_bytes;
+ __le64 rx_agg_events;
+ __le64 rx_agg_aborts;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_clr_stats_input (size:192b/24B) */
struct hwrm_func_clr_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
};
-/* Output (16 bytes) */
+/* hwrm_func_clr_stats_output (size:128b/16B) */
struct hwrm_func_clr_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_vf_resc_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_vf_resc_free_input (size:192b/24B) */
struct hwrm_func_vf_resc_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 vf_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ u8 unused_0[6];
};
-/* Output (16 bytes) */
+/* hwrm_func_vf_resc_free_output (size:128b/16B) */
struct hwrm_func_vf_resc_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_vf_vnic_ids_query */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_vf_vnic_ids_query_input (size:256b/32B) */
struct hwrm_func_vf_vnic_ids_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 vf_id;
- u8 unused_0;
- u8 unused_1;
- __le32 max_vnic_id_cnt;
- __le64 vnic_id_tbl_addr;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ u8 unused_0[2];
+ __le32 max_vnic_id_cnt;
+ __le64 vnic_id_tbl_addr;
+};
+
+/* hwrm_func_vf_vnic_ids_query_output (size:128b/16B) */
struct hwrm_func_vf_vnic_ids_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 vnic_id_cnt;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_func_drv_rgtr */
-/* Input (80 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 vnic_id_cnt;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_func_drv_rgtr_input (size:832b/104B) */
struct hwrm_func_drv_rgtr_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
- __le32 enables;
- #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
- #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
- #define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP 0x4UL
- #define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL
- #define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL
- __le16 os_type;
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN 0x0UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER 0x1UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS 0xeUL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS 0x12UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS 0x1dUL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX 0x24UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD 0x2aUL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI 0x68UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 0x73UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI 0x8000UL
- u8 ver_maj;
- u8 ver_min;
- u8 ver_upd;
- u8 unused_0;
- __le16 unused_1;
- __le32 timestamp;
- __le32 unused_2;
- __le32 vf_req_fwd[8];
- __le32 async_event_fwd[8];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
+ __le32 enables;
+ #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP 0x4UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL
+ __le16 os_type;
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN 0x0UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER 0x1UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS 0xeUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS 0x12UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS 0x1dUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX 0x24UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD 0x2aUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI 0x68UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 0x73UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI 0x8000UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_LAST FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI
+ u8 ver_maj;
+ u8 ver_min;
+ u8 ver_upd;
+ u8 unused_0[3];
+ __le32 timestamp;
+ u8 unused_1[4];
+ __le32 vf_req_fwd[8];
+ __le32 async_event_fwd[8];
+};
+
+/* hwrm_func_drv_rgtr_output (size:128b/16B) */
struct hwrm_func_drv_rgtr_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_drv_unrgtr */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_drv_unrgtr_input (size:192b/24B) */
struct hwrm_func_drv_unrgtr_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
#define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN 0x1UL
- __le32 unused_0;
+ u8 unused_0[4];
};
-/* Output (16 bytes) */
+/* hwrm_func_drv_unrgtr_output (size:128b/16B) */
struct hwrm_func_drv_unrgtr_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_buf_rgtr */
-/* Input (128 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_buf_rgtr_input (size:1024b/128B) */
struct hwrm_func_buf_rgtr_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID 0x1UL
- #define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR 0x2UL
- __le16 vf_id;
- __le16 req_buf_num_pages;
- __le16 req_buf_page_size;
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B 0x4UL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K 0xcUL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K 0xdUL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K 0x10UL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M 0x15UL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M 0x16UL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G 0x1eUL
- __le16 req_buf_len;
- __le16 resp_buf_len;
- u8 unused_0;
- u8 unused_1;
- __le64 req_buf_page_addr0;
- __le64 req_buf_page_addr1;
- __le64 req_buf_page_addr2;
- __le64 req_buf_page_addr3;
- __le64 req_buf_page_addr4;
- __le64 req_buf_page_addr5;
- __le64 req_buf_page_addr6;
- __le64 req_buf_page_addr7;
- __le64 req_buf_page_addr8;
- __le64 req_buf_page_addr9;
- __le64 error_buf_addr;
- __le64 resp_buf_addr;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID 0x1UL
+ #define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR 0x2UL
+ __le16 vf_id;
+ __le16 req_buf_num_pages;
+ __le16 req_buf_page_size;
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B 0x4UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K 0xcUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K 0xdUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K 0x10UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M 0x15UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M 0x16UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G 0x1eUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_LAST FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G
+ __le16 req_buf_len;
+ __le16 resp_buf_len;
+ u8 unused_0[2];
+ __le64 req_buf_page_addr0;
+ __le64 req_buf_page_addr1;
+ __le64 req_buf_page_addr2;
+ __le64 req_buf_page_addr3;
+ __le64 req_buf_page_addr4;
+ __le64 req_buf_page_addr5;
+ __le64 req_buf_page_addr6;
+ __le64 req_buf_page_addr7;
+ __le64 req_buf_page_addr8;
+ __le64 req_buf_page_addr9;
+ __le64 error_buf_addr;
+ __le64 resp_buf_addr;
+};
+
+/* hwrm_func_buf_rgtr_output (size:128b/16B) */
struct hwrm_func_buf_rgtr_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_drv_qver */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_drv_qver_input (size:192b/24B) */
struct hwrm_func_drv_qver_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 reserved;
- __le16 fid;
- __le16 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 reserved;
+ __le16 fid;
+ u8 unused_0[2];
};
-/* Output (16 bytes) */
+/* hwrm_func_drv_qver_output (size:128b/16B) */
struct hwrm_func_drv_qver_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 os_type;
- #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN 0x0UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER 0x1UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS 0xeUL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS 0x12UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS 0x1dUL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX 0x24UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD 0x2aUL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI 0x68UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 0x73UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI 0x8000UL
- u8 ver_maj;
- u8 ver_min;
- u8 ver_upd;
- u8 unused_0;
- u8 unused_1;
- u8 valid;
-};
-
-/* hwrm_port_phy_cfg */
-/* Input (56 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 os_type;
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN 0x0UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER 0x1UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS 0xeUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS 0x12UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS 0x1dUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX 0x24UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD 0x2aUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI 0x68UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 0x73UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI 0x8000UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_LAST FUNC_DRV_QVER_RESP_OS_TYPE_UEFI
+ u8 ver_maj;
+ u8 ver_min;
+ u8 ver_upd;
+ u8 unused_0[2];
+ u8 valid;
+};
+
+/* hwrm_func_resource_qcaps_input (size:192b/24B) */
+struct hwrm_func_resource_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
+};
+
+/* hwrm_func_resource_qcaps_output (size:384b/48B) */
+struct hwrm_func_resource_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 max_vfs;
+ __le16 max_msix;
+ __le16 vf_reservation_strategy;
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MAXIMAL 0x0UL
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL 0x1UL
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_LAST FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL
+ __le16 min_rsscos_ctx;
+ __le16 max_rsscos_ctx;
+ __le16 min_cmpl_rings;
+ __le16 max_cmpl_rings;
+ __le16 min_tx_rings;
+ __le16 max_tx_rings;
+ __le16 min_rx_rings;
+ __le16 max_rx_rings;
+ __le16 min_l2_ctxs;
+ __le16 max_l2_ctxs;
+ __le16 min_vnics;
+ __le16 max_vnics;
+ __le16 min_stat_ctx;
+ __le16 max_stat_ctx;
+ __le16 min_hw_ring_grps;
+ __le16 max_hw_ring_grps;
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_func_vf_resource_cfg_input (size:448b/56B) */
+struct hwrm_func_vf_resource_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ __le16 max_msix;
+ __le16 min_rsscos_ctx;
+ __le16 max_rsscos_ctx;
+ __le16 min_cmpl_rings;
+ __le16 max_cmpl_rings;
+ __le16 min_tx_rings;
+ __le16 max_tx_rings;
+ __le16 min_rx_rings;
+ __le16 max_rx_rings;
+ __le16 min_l2_ctxs;
+ __le16 max_l2_ctxs;
+ __le16 min_vnics;
+ __le16 max_vnics;
+ __le16 min_stat_ctx;
+ __le16 max_stat_ctx;
+ __le16 min_hw_ring_grps;
+ __le16 max_hw_ring_grps;
+ u8 unused_0[4];
+};
+
+/* hwrm_func_vf_resource_cfg_output (size:256b/32B) */
+struct hwrm_func_vf_resource_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 reserved_rsscos_ctx;
+ __le16 reserved_cmpl_rings;
+ __le16 reserved_tx_rings;
+ __le16 reserved_rx_rings;
+ __le16 reserved_l2_ctxs;
+ __le16 reserved_vnics;
+ __le16 reserved_stat_ctx;
+ __le16 reserved_hw_ring_grps;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_cfg_input (size:448b/56B) */
struct hwrm_port_phy_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
- #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL
- #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
- #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL
- __le32 enables;
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL
- #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL
- #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
- #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
- #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
- #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL
- #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
- __le16 port_id;
- __le16 force_link_speed;
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL
- u8 auto_mode;
- #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE 0x0UL
- #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED 0x2UL
- #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW 0x3UL
- #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK 0x4UL
- u8 auto_duplex;
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF 0x0UL
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH 0x2UL
- u8 auto_pause;
- #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL
- #define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
- u8 unused_0;
- __le16 auto_link_speed;
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL
- __le16 auto_link_speed_mask;
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB 0x10UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB 0x40UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB 0x80UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
- u8 wirespeed;
- #define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL
- #define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL
- u8 lpbk;
- #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL
- #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL
- #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL
- u8 force_pause;
- #define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL
- #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL
- u8 unused_1;
- __le32 preemphasis;
- __le16 eee_link_speed_mask;
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL
- u8 unused_2;
- u8 unused_3;
- __le32 tx_lpi_timer;
- __le32 unused_4;
- #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL
- #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
+ #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
+ #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL
+ __le32 enables;
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL
+ #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL
+ #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
+ #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
+ #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL
+ #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
+ __le16 port_id;
+ __le16 force_link_speed;
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB
+ u8 auto_mode;
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK 0x4UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_LAST PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK
+ u8 auto_duplex;
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF 0x0UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_LAST PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH
+ u8 auto_pause;
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
+ u8 unused_0;
+ __le16 auto_link_speed;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_LAST PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB
+ __le16 auto_link_speed_mask;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB 0x10UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB 0x40UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB 0x80UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
+ u8 wirespeed;
+ #define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL
+ #define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL
+ #define PORT_PHY_CFG_REQ_WIRESPEED_LAST PORT_PHY_CFG_REQ_WIRESPEED_ON
+ u8 lpbk;
+ #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL
+ #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL
+ #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL
+ #define PORT_PHY_CFG_REQ_LPBK_LAST PORT_PHY_CFG_REQ_LPBK_REMOTE
+ u8 force_pause;
+ #define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL
+ u8 unused_1;
+ __le32 preemphasis;
+ __le16 eee_link_speed_mask;
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ u8 unused_2[2];
+ __le32 tx_lpi_timer;
+ #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL
+ #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0
+ __le32 unused_3;
+};
+
+/* hwrm_port_phy_cfg_output (size:128b/16B) */
struct hwrm_port_phy_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_port_phy_qcfg */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_qcfg_input (size:192b/24B) */
struct hwrm_port_phy_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
};
-/* Output (96 bytes) */
+/* hwrm_port_phy_qcfg_output (size:768b/96B) */
struct hwrm_port_phy_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 link;
- #define PORT_PHY_QCFG_RESP_LINK_NO_LINK 0x0UL
- #define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL
- u8 unused_0;
- __le16 link_speed;
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
- u8 duplex_cfg;
- #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_HALF 0x0UL
- #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL 0x1UL
- u8 pause;
- #define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL
- #define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL
- __le16 support_speeds;
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD 0x1UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD 0x4UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB 0x10UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB 0x20UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB 0x40UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB 0x80UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL
- __le16 force_link_speed;
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL
- u8 auto_mode;
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE 0x0UL
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED 0x2UL
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW 0x3UL
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK 0x4UL
- u8 auto_pause;
- #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL
- #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
- __le16 auto_link_speed;
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL
- __le16 auto_link_speed_mask;
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB 0x10UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB 0x40UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB 0x80UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
- u8 wirespeed;
- #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL
- #define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL
- u8 lpbk;
- #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL
- #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL
- #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL
- u8 force_pause;
- #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL
- #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL
- u8 module_status;
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE 0x0UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX 0x1UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG 0x2UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL
- __le32 preemphasis;
- u8 phy_maj;
- u8 phy_min;
- u8 phy_bld;
- u8 phy_type;
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN 0x0UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR 0x1UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 0x2UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR 0x3UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR 0x4UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 0x5UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX 0x6UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR 0x7UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET 0x8UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE 0x9UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY 0xaUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L 0xbUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S 0xcUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N 0xdUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR 0xeUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4 0xfUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4 0x10UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4 0x11UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4 0x12UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10 0x13UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4 0x14UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4 0x15UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4 0x16UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4 0x17UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET 0x19UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX 0x1aUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX 0x1bUL
- u8 media_type;
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC 0x2UL
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE 0x3UL
- u8 xcvr_pkg_type;
- #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL 0x1UL
- #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL 0x2UL
- u8 eee_config_phy_addr;
- #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL
- #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED 0x20UL
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE 0x40UL
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI 0x80UL
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK 0xe0UL
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT 5
- u8 parallel_detect;
- #define PORT_PHY_QCFG_RESP_PARALLEL_DETECT 0x1UL
- #define PORT_PHY_QCFG_RESP_RESERVED_MASK 0xfeUL
- #define PORT_PHY_QCFG_RESP_RESERVED_SFT 1
- __le16 link_partner_adv_speeds;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD 0x4UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB 0x10UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB 0x20UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB 0x40UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB 0x80UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB 0x800UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD 0x1000UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB 0x2000UL
- u8 link_partner_adv_auto_mode;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE 0x0UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED 0x2UL
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 link;
+ #define PORT_PHY_QCFG_RESP_LINK_NO_LINK 0x0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_LAST PORT_PHY_QCFG_RESP_LINK_LINK
+ u8 unused_0;
+ __le16 link_speed;
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB
+ u8 duplex_cfg;
+ #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_HALF 0x0UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL 0x1UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_LAST PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL
+ u8 pause;
+ #define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL
+ __le16 support_speeds;
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL
+ __le16 force_link_speed;
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB
+ u8 auto_mode;
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK 0x4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK
+ u8 auto_pause;
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
+ __le16 auto_link_speed;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB
+ __le16 auto_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
+ u8 wirespeed;
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_LAST PORT_PHY_QCFG_RESP_WIRESPEED_ON
+ u8 lpbk;
+ #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL
+ #define PORT_PHY_QCFG_RESP_LPBK_LAST PORT_PHY_QCFG_RESP_LPBK_REMOTE
+ u8 force_pause;
+ #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL
+ u8 module_status;
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX 0x1UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG 0x2UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_LAST PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE
+ __le32 preemphasis;
+ u8 phy_maj;
+ u8 phy_min;
+ u8 phy_bld;
+ u8 phy_type;
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN 0x0UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR 0x1UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 0x2UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR 0x3UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR 0x4UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 0x5UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX 0x6UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR 0x7UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET 0x8UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE 0x9UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY 0xaUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L 0xbUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S 0xcUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N 0xdUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR 0xeUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4 0xfUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4 0x10UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4 0x11UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4 0x12UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10 0x13UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4 0x14UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4 0x15UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4 0x16UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4 0x17UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET 0x19UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX 0x1aUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX 0x1bUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX
+ u8 media_type;
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC 0x2UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE 0x3UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_LAST PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE
+ u8 xcvr_pkg_type;
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL 0x2UL
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL
+ u8 eee_config_phy_addr;
+ #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL
+ #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK 0xe0UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT 5
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED 0x20UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE 0x40UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI 0x80UL
+ u8 parallel_detect;
+ #define PORT_PHY_QCFG_RESP_PARALLEL_DETECT 0x1UL
+ __le16 link_partner_adv_speeds;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB 0x2000UL
+ u8 link_partner_adv_auto_mode;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED 0x2UL
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW 0x3UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK 0x4UL
- u8 link_partner_adv_pause;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL
- __le16 adv_eee_link_speed_mask;
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
- __le16 link_partner_adv_eee_link_speed_mask;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
- __le32 xcvr_identifier_type_tx_lpi_timer;
- #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK 0xffffffUL
- #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT 0
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK 0xff000000UL
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT 24
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK
+ u8 link_partner_adv_pause;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL
+ __le16 adv_eee_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ __le16 link_partner_adv_eee_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ __le32 xcvr_identifier_type_tx_lpi_timer;
+ #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK 0xffffffUL
+ #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT 0
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK 0xff000000UL
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT 24
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_UNKNOWN (0x0UL << 24)
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFP (0x3UL << 24)
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24)
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24)
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24)
- __le16 fec_cfg;
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
- u8 duplex_state;
- #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL
- #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL
- u8 unused_1;
- char phy_vendor_name[16];
- char phy_vendor_partnumber[16];
- __le32 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 unused_5;
- u8 valid;
-};
-
-/* hwrm_port_mac_cfg */
-/* Input (40 bytes) */
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28
+ __le16 fec_cfg;
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
+ u8 duplex_state;
+ #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_LAST PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
+ u8 option_flags;
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT 0x1UL
+ char phy_vendor_name[16];
+ char phy_vendor_partnumber[16];
+ u8 unused_2[7];
+ u8 valid;
+};
+
+/* hwrm_port_mac_cfg_input (size:320b/40B) */
struct hwrm_port_mac_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL
- #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE 0x2UL
- #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL
- #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL
- #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL
- #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL
- #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL
- #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL
- #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL
- __le32 enables;
- #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
- #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
- #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI 0x4UL
- #define PORT_MAC_CFG_REQ_ENABLES_RESERVED1 0x8UL
- #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL
- #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL
- #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
- #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
- #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL
- __le16 port_id;
- u8 ipg;
- u8 lpbk;
- #define PORT_MAC_CFG_REQ_LPBK_NONE 0x0UL
- #define PORT_MAC_CFG_REQ_LPBK_LOCAL 0x1UL
- #define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL
- u8 vlan_pri2cos_map_pri;
- u8 reserved1;
- u8 tunnel_pri2cos_map_pri;
- u8 dscp2pri_map_pri;
- __le16 rx_ts_capture_ptp_msg_type;
- __le16 tx_ts_capture_ptp_msg_type;
- u8 cos_field_cfg;
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1 0x1UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
- u8 unused_0[3];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL
+ #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE 0x2UL
+ #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL
+ #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL
+ #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL
+ #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL
+ #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL
+ #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL
+ #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL
+ __le32 enables;
+ #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
+ #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
+ #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI 0x4UL
+ #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL
+ #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL
+ #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
+ #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
+ #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL
+ __le16 port_id;
+ u8 ipg;
+ u8 lpbk;
+ #define PORT_MAC_CFG_REQ_LPBK_NONE 0x0UL
+ #define PORT_MAC_CFG_REQ_LPBK_LOCAL 0x1UL
+ #define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL
+ #define PORT_MAC_CFG_REQ_LPBK_LAST PORT_MAC_CFG_REQ_LPBK_REMOTE
+ u8 vlan_pri2cos_map_pri;
+ u8 reserved1;
+ u8 tunnel_pri2cos_map_pri;
+ u8 dscp2pri_map_pri;
+ __le16 rx_ts_capture_ptp_msg_type;
+ __le16 tx_ts_capture_ptp_msg_type;
+ u8 cos_field_cfg;
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1 0x1UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
+ u8 unused_0[3];
+};
+
+/* hwrm_port_mac_cfg_output (size:128b/16B) */
struct hwrm_port_mac_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 mru;
- __le16 mtu;
- u8 ipg;
- u8 lpbk;
- #define PORT_MAC_CFG_RESP_LPBK_NONE 0x0UL
- #define PORT_MAC_CFG_RESP_LPBK_LOCAL 0x1UL
- #define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL
- u8 unused_0;
- u8 valid;
-};
-
-/* hwrm_port_mac_ptp_qcfg */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 mru;
+ __le16 mtu;
+ u8 ipg;
+ u8 lpbk;
+ #define PORT_MAC_CFG_RESP_LPBK_NONE 0x0UL
+ #define PORT_MAC_CFG_RESP_LPBK_LOCAL 0x1UL
+ #define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL
+ #define PORT_MAC_CFG_RESP_LPBK_LAST PORT_MAC_CFG_RESP_LPBK_REMOTE
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_port_mac_ptp_qcfg_input (size:192b/24B) */
struct hwrm_port_mac_ptp_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
};
-/* Output (80 bytes) */
+/* hwrm_port_mac_ptp_qcfg_output (size:640b/80B) */
struct hwrm_port_mac_ptp_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL
- u8 unused_0;
- __le16 unused_1;
- __le32 rx_ts_reg_off_lower;
- __le32 rx_ts_reg_off_upper;
- __le32 rx_ts_reg_off_seq_id;
- __le32 rx_ts_reg_off_src_id_0;
- __le32 rx_ts_reg_off_src_id_1;
- __le32 rx_ts_reg_off_src_id_2;
- __le32 rx_ts_reg_off_domain_id;
- __le32 rx_ts_reg_off_fifo;
- __le32 rx_ts_reg_off_fifo_adv;
- __le32 rx_ts_reg_off_granularity;
- __le32 tx_ts_reg_off_lower;
- __le32 tx_ts_reg_off_upper;
- __le32 tx_ts_reg_off_seq_id;
- __le32 tx_ts_reg_off_fifo;
- __le32 tx_ts_reg_off_granularity;
- __le32 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 unused_5;
- u8 valid;
-};
-
-/* hwrm_port_qstats */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL
+ u8 unused_0[3];
+ __le32 rx_ts_reg_off_lower;
+ __le32 rx_ts_reg_off_upper;
+ __le32 rx_ts_reg_off_seq_id;
+ __le32 rx_ts_reg_off_src_id_0;
+ __le32 rx_ts_reg_off_src_id_1;
+ __le32 rx_ts_reg_off_src_id_2;
+ __le32 rx_ts_reg_off_domain_id;
+ __le32 rx_ts_reg_off_fifo;
+ __le32 rx_ts_reg_off_fifo_adv;
+ __le32 rx_ts_reg_off_granularity;
+ __le32 tx_ts_reg_off_lower;
+ __le32 tx_ts_reg_off_upper;
+ __le32 tx_ts_reg_off_seq_id;
+ __le32 tx_ts_reg_off_fifo;
+ __le32 tx_ts_reg_off_granularity;
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* hwrm_port_qstats_input (size:320b/40B) */
struct hwrm_port_qstats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2[3];
- u8 unused_3;
- __le64 tx_stat_host_addr;
- __le64 rx_stat_host_addr;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+ __le64 tx_stat_host_addr;
+ __le64 rx_stat_host_addr;
+};
+
+/* hwrm_port_qstats_output (size:128b/16B) */
struct hwrm_port_qstats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 tx_stat_size;
- __le16 rx_stat_size;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_port_lpbk_qstats */
-/* Input (16 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tx_stat_size;
+ __le16 rx_stat_size;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_port_lpbk_qstats_input (size:128b/16B) */
struct hwrm_port_lpbk_qstats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
};
-/* Output (96 bytes) */
+/* hwrm_port_lpbk_qstats_output (size:768b/96B) */
struct hwrm_port_lpbk_qstats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 lpbk_ucast_frames;
- __le64 lpbk_mcast_frames;
- __le64 lpbk_bcast_frames;
- __le64 lpbk_ucast_bytes;
- __le64 lpbk_mcast_bytes;
- __le64 lpbk_bcast_bytes;
- __le64 tx_stat_discard;
- __le64 tx_stat_error;
- __le64 rx_stat_discard;
- __le64 rx_stat_error;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_port_clr_stats */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 lpbk_ucast_frames;
+ __le64 lpbk_mcast_frames;
+ __le64 lpbk_bcast_frames;
+ __le64 lpbk_ucast_bytes;
+ __le64 lpbk_mcast_bytes;
+ __le64 lpbk_bcast_bytes;
+ __le64 tx_stat_discard;
+ __le64 tx_stat_error;
+ __le64 rx_stat_discard;
+ __le64 rx_stat_error;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_clr_stats_input (size:192b/24B) */
struct hwrm_port_clr_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
};
-/* Output (16 bytes) */
+/* hwrm_port_clr_stats_output (size:128b/16B) */
struct hwrm_port_clr_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_port_lpbk_clr_stats */
-/* Input (16 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_lpbk_clr_stats_input (size:128b/16B) */
struct hwrm_port_lpbk_clr_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
};
-/* Output (16 bytes) */
+/* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */
struct hwrm_port_lpbk_clr_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_port_phy_qcaps */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_qcaps_input (size:192b/24B) */
struct hwrm_port_phy_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
};
-/* Output (24 bytes) */
+/* hwrm_port_phy_qcaps_output (size:192b/24B) */
struct hwrm_port_phy_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfeUL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 1
- u8 port_cnt;
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_2 0x2UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_3 0x3UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_4 0x4UL
- __le16 supported_speeds_force_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD 0x4UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB 0x8UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB 0x10UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB 0x20UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB 0x40UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB 0x80UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB 0x100UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB 0x200UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB 0x400UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL
- __le16 supported_speeds_auto_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD 0x4UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB 0x8UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB 0x10UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB 0x20UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB 0x40UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB 0x80UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB 0x100UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB 0x200UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB 0x400UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL
- __le16 supported_speeds_eee_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 0x4UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB 0x8UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 0x10UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 0x20UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB 0x40UL
- __le32 tx_lpi_timer_low;
- #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK 0xffffffUL
- #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT 0
- #define PORT_PHY_QCAPS_RESP_RSVD2_MASK 0xff000000UL
- #define PORT_PHY_QCAPS_RESP_RSVD2_SFT 24
- __le32 valid_tx_lpi_timer_high;
- #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL
- #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0
- #define PORT_PHY_QCAPS_RESP_VALID_MASK 0xff000000UL
- #define PORT_PHY_QCAPS_RESP_VALID_SFT 24
-};
-
-/* hwrm_port_phy_i2c_read */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfeUL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 1
+ u8 port_cnt;
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_2 0x2UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_3 0x3UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_4 0x4UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_LAST PORT_PHY_QCAPS_RESP_PORT_CNT_4
+ __le16 supported_speeds_force_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL
+ __le16 supported_speeds_auto_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL
+ __le16 supported_speeds_eee_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB 0x40UL
+ __le32 tx_lpi_timer_low;
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK 0xffffffUL
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT 0
+ #define PORT_PHY_QCAPS_RESP_RSVD2_MASK 0xff000000UL
+ #define PORT_PHY_QCAPS_RESP_RSVD2_SFT 24
+ __le32 valid_tx_lpi_timer_high;
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0
+ #define PORT_PHY_QCAPS_RESP_VALID_MASK 0xff000000UL
+ #define PORT_PHY_QCAPS_RESP_VALID_SFT 24
+};
+
+/* hwrm_port_phy_i2c_read_input (size:320b/40B) */
struct hwrm_port_phy_i2c_read_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- __le32 enables;
- #define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET 0x1UL
- __le16 port_id;
- u8 i2c_slave_addr;
- u8 unused_0;
- __le16 page_number;
- __le16 page_offset;
- u8 data_length;
- u8 unused_1[7];
-};
-
-/* Output (80 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET 0x1UL
+ __le16 port_id;
+ u8 i2c_slave_addr;
+ u8 unused_0;
+ __le16 page_number;
+ __le16 page_offset;
+ u8 data_length;
+ u8 unused_1[7];
+};
+
+/* hwrm_port_phy_i2c_read_output (size:640b/80B) */
struct hwrm_port_phy_i2c_read_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 data[16];
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_port_led_cfg */
-/* Input (64 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 data[16];
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_led_cfg_input (size:512b/64B) */
struct hwrm_port_led_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define PORT_LED_CFG_REQ_ENABLES_LED0_ID 0x1UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_STATE 0x2UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR 0x4UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON 0x8UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF 0x10UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID 0x20UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_ID 0x40UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_STATE 0x80UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR 0x100UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON 0x200UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF 0x400UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID 0x800UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_ID 0x1000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_STATE 0x2000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR 0x4000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON 0x8000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF 0x10000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID 0x20000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_ID 0x40000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_STATE 0x80000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR 0x100000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON 0x200000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF 0x400000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID 0x800000UL
- __le16 port_id;
- u8 num_leds;
- u8 rsvd;
- u8 led0_id;
- u8 led0_state;
- #define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED0_STATE_OFF 0x1UL
- #define PORT_LED_CFG_REQ_LED0_STATE_ON 0x2UL
- #define PORT_LED_CFG_REQ_LED0_STATE_BLINK 0x3UL
- #define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT 0x4UL
- u8 led0_color;
- #define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED0_COLOR_AMBER 0x1UL
- #define PORT_LED_CFG_REQ_LED0_COLOR_GREEN 0x2UL
- #define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER 0x3UL
- u8 unused_0;
- __le16 led0_blink_on;
- __le16 led0_blink_off;
- u8 led0_group_id;
- u8 rsvd0;
- u8 led1_id;
- u8 led1_state;
- #define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED1_STATE_OFF 0x1UL
- #define PORT_LED_CFG_REQ_LED1_STATE_ON 0x2UL
- #define PORT_LED_CFG_REQ_LED1_STATE_BLINK 0x3UL
- #define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT 0x4UL
- u8 led1_color;
- #define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED1_COLOR_AMBER 0x1UL
- #define PORT_LED_CFG_REQ_LED1_COLOR_GREEN 0x2UL
- #define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER 0x3UL
- u8 unused_1;
- __le16 led1_blink_on;
- __le16 led1_blink_off;
- u8 led1_group_id;
- u8 rsvd1;
- u8 led2_id;
- u8 led2_state;
- #define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED2_STATE_OFF 0x1UL
- #define PORT_LED_CFG_REQ_LED2_STATE_ON 0x2UL
- #define PORT_LED_CFG_REQ_LED2_STATE_BLINK 0x3UL
- #define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT 0x4UL
- u8 led2_color;
- #define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED2_COLOR_AMBER 0x1UL
- #define PORT_LED_CFG_REQ_LED2_COLOR_GREEN 0x2UL
- #define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER 0x3UL
- u8 unused_2;
- __le16 led2_blink_on;
- __le16 led2_blink_off;
- u8 led2_group_id;
- u8 rsvd2;
- u8 led3_id;
- u8 led3_state;
- #define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED3_STATE_OFF 0x1UL
- #define PORT_LED_CFG_REQ_LED3_STATE_ON 0x2UL
- #define PORT_LED_CFG_REQ_LED3_STATE_BLINK 0x3UL
- #define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT 0x4UL
- u8 led3_color;
- #define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED3_COLOR_AMBER 0x1UL
- #define PORT_LED_CFG_REQ_LED3_COLOR_GREEN 0x2UL
- #define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER 0x3UL
- u8 unused_3;
- __le16 led3_blink_on;
- __le16 led3_blink_off;
- u8 led3_group_id;
- u8 rsvd3;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_ID 0x1UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_STATE 0x2UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR 0x4UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON 0x8UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF 0x10UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID 0x20UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_ID 0x40UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_STATE 0x80UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR 0x100UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON 0x200UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF 0x400UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID 0x800UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_ID 0x1000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_STATE 0x2000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR 0x4000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON 0x8000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF 0x10000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID 0x20000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_ID 0x40000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_STATE 0x80000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR 0x100000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON 0x200000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF 0x400000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID 0x800000UL
+ __le16 port_id;
+ u8 num_leds;
+ u8 rsvd;
+ u8 led0_id;
+ u8 led0_state;
+ #define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_LAST PORT_LED_CFG_REQ_LED0_STATE_BLINKALT
+ u8 led0_color;
+ #define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_LAST PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER
+ u8 unused_0;
+ __le16 led0_blink_on;
+ __le16 led0_blink_off;
+ u8 led0_group_id;
+ u8 rsvd0;
+ u8 led1_id;
+ u8 led1_state;
+ #define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_LAST PORT_LED_CFG_REQ_LED1_STATE_BLINKALT
+ u8 led1_color;
+ #define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_LAST PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER
+ u8 unused_1;
+ __le16 led1_blink_on;
+ __le16 led1_blink_off;
+ u8 led1_group_id;
+ u8 rsvd1;
+ u8 led2_id;
+ u8 led2_state;
+ #define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_LAST PORT_LED_CFG_REQ_LED2_STATE_BLINKALT
+ u8 led2_color;
+ #define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_LAST PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER
+ u8 unused_2;
+ __le16 led2_blink_on;
+ __le16 led2_blink_off;
+ u8 led2_group_id;
+ u8 rsvd2;
+ u8 led3_id;
+ u8 led3_state;
+ #define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_LAST PORT_LED_CFG_REQ_LED3_STATE_BLINKALT
+ u8 led3_color;
+ #define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_LAST PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER
+ u8 unused_3;
+ __le16 led3_blink_on;
+ __le16 led3_blink_off;
+ u8 led3_group_id;
+ u8 rsvd3;
+};
+
+/* hwrm_port_led_cfg_output (size:128b/16B) */
struct hwrm_port_led_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_port_led_qcaps */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_led_qcfg_input (size:192b/24B) */
+struct hwrm_port_led_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_led_qcfg_output (size:448b/56B) */
+struct hwrm_port_led_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_leds;
+ u8 led0_id;
+ u8 led0_type;
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_LAST PORT_LED_QCFG_RESP_LED0_TYPE_INVALID
+ u8 led0_state;
+ #define PORT_LED_QCFG_RESP_LED0_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_LAST PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT
+ u8 led0_color;
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_LAST PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER
+ u8 unused_0;
+ __le16 led0_blink_on;
+ __le16 led0_blink_off;
+ u8 led0_group_id;
+ u8 led1_id;
+ u8 led1_type;
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_LAST PORT_LED_QCFG_RESP_LED1_TYPE_INVALID
+ u8 led1_state;
+ #define PORT_LED_QCFG_RESP_LED1_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_LAST PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT
+ u8 led1_color;
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_LAST PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER
+ u8 unused_1;
+ __le16 led1_blink_on;
+ __le16 led1_blink_off;
+ u8 led1_group_id;
+ u8 led2_id;
+ u8 led2_type;
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_LAST PORT_LED_QCFG_RESP_LED2_TYPE_INVALID
+ u8 led2_state;
+ #define PORT_LED_QCFG_RESP_LED2_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_LAST PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT
+ u8 led2_color;
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_LAST PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER
+ u8 unused_2;
+ __le16 led2_blink_on;
+ __le16 led2_blink_off;
+ u8 led2_group_id;
+ u8 led3_id;
+ u8 led3_type;
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_LAST PORT_LED_QCFG_RESP_LED3_TYPE_INVALID
+ u8 led3_state;
+ #define PORT_LED_QCFG_RESP_LED3_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_LAST PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT
+ u8 led3_color;
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_LAST PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER
+ u8 unused_3;
+ __le16 led3_blink_on;
+ __le16 led3_blink_off;
+ u8 led3_group_id;
+ u8 unused_4[6];
+ u8 valid;
+};
+
+/* hwrm_port_led_qcaps_input (size:192b/24B) */
struct hwrm_port_led_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
};
-/* Output (48 bytes) */
+/* hwrm_port_led_qcaps_output (size:384b/48B) */
struct hwrm_port_led_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 num_leds;
- u8 unused_0[3];
- u8 led0_id;
- u8 led0_type;
- #define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED 0x0UL
- #define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID 0xffUL
- u8 led0_group_id;
- u8 unused_1;
- __le16 led0_state_caps;
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED 0x1UL
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED 0x4UL
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
- __le16 led0_color_caps;
- #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD 0x1UL
- #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
- u8 led1_id;
- u8 led1_type;
- #define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED 0x0UL
- #define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID 0xffUL
- u8 led1_group_id;
- u8 unused_2;
- __le16 led1_state_caps;
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED 0x1UL
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED 0x4UL
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
- __le16 led1_color_caps;
- #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD 0x1UL
- #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
- u8 led2_id;
- u8 led2_type;
- #define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED 0x0UL
- #define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID 0xffUL
- u8 led2_group_id;
- u8 unused_3;
- __le16 led2_state_caps;
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED 0x1UL
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED 0x4UL
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
- __le16 led2_color_caps;
- #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD 0x1UL
- #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
- u8 led3_id;
- u8 led3_type;
- #define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED 0x0UL
- #define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID 0xffUL
- u8 led3_group_id;
- u8 unused_4;
- __le16 led3_state_caps;
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED 0x1UL
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED 0x4UL
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
- __le16 led3_color_caps;
- #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD 0x1UL
- #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
- u8 unused_5;
- u8 unused_6;
- u8 unused_7;
- u8 valid;
-};
-
-/* hwrm_queue_qportcfg */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_leds;
+ u8 unused[3];
+ u8 led0_id;
+ u8 led0_type;
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_LAST PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID
+ u8 led0_group_id;
+ u8 unused_0;
+ __le16 led0_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led0_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ u8 led1_id;
+ u8 led1_type;
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_LAST PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID
+ u8 led1_group_id;
+ u8 unused_1;
+ __le16 led1_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led1_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ u8 led2_id;
+ u8 led2_type;
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_LAST PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID
+ u8 led2_group_id;
+ u8 unused_2;
+ __le16 led2_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led2_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ u8 led3_id;
+ u8 led3_type;
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_LAST PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID
+ u8 led3_group_id;
+ u8 unused_3;
+ __le16 led3_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led3_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ u8 unused_4[3];
+ u8 valid;
+};
+
+/* hwrm_queue_qportcfg_input (size:192b/24B) */
struct hwrm_queue_qportcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX 0x0UL
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX 0x1UL
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX
- __le16 port_id;
- __le16 unused_0;
-};
-
-/* Output (32 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX
+ __le16 port_id;
+ u8 unused_0[2];
+};
+
+/* hwrm_queue_qportcfg_output (size:256b/32B) */
struct hwrm_queue_qportcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 max_configurable_queues;
- u8 max_configurable_lossless_queues;
- u8 queue_cfg_allowed;
- u8 queue_cfg_info;
- #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
- u8 queue_pfcenable_cfg_allowed;
- u8 queue_pri2cos_cfg_allowed;
- u8 queue_cos2bw_cfg_allowed;
- u8 queue_id0;
- u8 queue_id0_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 max_configurable_queues;
+ u8 max_configurable_lossless_queues;
+ u8 queue_cfg_allowed;
+ u8 queue_cfg_info;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+ u8 queue_pfcenable_cfg_allowed;
+ u8 queue_pri2cos_cfg_allowed;
+ u8 queue_cos2bw_cfg_allowed;
+ u8 queue_id0;
+ u8 queue_id0_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL
- u8 queue_id1;
- u8 queue_id1_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id1;
+ u8 queue_id1_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL
- u8 queue_id2;
- u8 queue_id2_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id2;
+ u8 queue_id2_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL
- u8 queue_id3;
- u8 queue_id3_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id3;
+ u8 queue_id3_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL
- u8 queue_id4;
- u8 queue_id4_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id4;
+ u8 queue_id4_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL
- u8 queue_id5;
- u8 queue_id5_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id5;
+ u8 queue_id5_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL
- u8 queue_id6;
- u8 queue_id6_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id6;
+ u8 queue_id6_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL
- u8 queue_id7;
- u8 queue_id7_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id7;
+ u8 queue_id7_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
- u8 valid;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN
+ u8 valid;
};
-/* hwrm_queue_cfg */
-/* Input (40 bytes) */
+/* hwrm_queue_cfg_input (size:320b/40B) */
struct hwrm_queue_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_CFG_REQ_FLAGS_PATH_MASK 0x3UL
- #define QUEUE_CFG_REQ_FLAGS_PATH_SFT 0
- #define QUEUE_CFG_REQ_FLAGS_PATH_TX 0x0UL
- #define QUEUE_CFG_REQ_FLAGS_PATH_RX 0x1UL
- #define QUEUE_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
- #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_BIDIR
- __le32 enables;
- #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL
- #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL
- __le32 queue_id;
- __le32 dflt_len;
- u8 service_profile;
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN 0xffUL
- u8 unused_0[7];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_CFG_REQ_FLAGS_PATH_MASK 0x3UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_SFT 0
+ #define QUEUE_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_BIDIR
+ __le32 enables;
+ #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL
+ #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL
+ __le32 queue_id;
+ __le32 dflt_len;
+ u8 service_profile;
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LAST QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN
+ u8 unused_0[7];
+};
+
+/* hwrm_queue_cfg_output (size:128b/16B) */
struct hwrm_queue_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_queue_pfcenable_qcfg */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_pfcenable_qcfg_input (size:192b/24B) */
struct hwrm_queue_pfcenable_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
};
-/* Output (16 bytes) */
+/* hwrm_queue_pfcenable_qcfg_output (size:128b/16B) */
struct hwrm_queue_pfcenable_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 flags;
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_queue_pfcenable_cfg */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_queue_pfcenable_cfg_input (size:192b/24B) */
struct hwrm_queue_pfcenable_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL
@@ -2440,1729 +2703,1664 @@ struct hwrm_queue_pfcenable_cfg_input {
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL
- __le16 port_id;
- __le16 unused_0;
+ __le16 port_id;
+ u8 unused_0[2];
};
-/* Output (16 bytes) */
+/* hwrm_queue_pfcenable_cfg_output (size:128b/16B) */
struct hwrm_queue_pfcenable_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_queue_pri2cos_qcfg */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_pri2cos_qcfg_input (size:192b/24B) */
struct hwrm_queue_pri2cos_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH 0x1UL
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN 0x2UL
- u8 port_id;
- u8 unused_0[3];
-};
-
-/* Output (24 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN 0x2UL
+ u8 port_id;
+ u8 unused_0[3];
+};
+
+/* hwrm_queue_pri2cos_qcfg_output (size:192b/24B) */
struct hwrm_queue_pri2cos_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 pri0_cos_queue_id;
- u8 pri1_cos_queue_id;
- u8 pri2_cos_queue_id;
- u8 pri3_cos_queue_id;
- u8 pri4_cos_queue_id;
- u8 pri5_cos_queue_id;
- u8 pri6_cos_queue_id;
- u8 pri7_cos_queue_id;
- u8 queue_cfg_info;
- #define QUEUE_PRI2COS_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
- u8 unused_0;
- __le16 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_queue_pri2cos_cfg */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 pri0_cos_queue_id;
+ u8 pri1_cos_queue_id;
+ u8 pri2_cos_queue_id;
+ u8 pri3_cos_queue_id;
+ u8 pri4_cos_queue_id;
+ u8 pri5_cos_queue_id;
+ u8 pri6_cos_queue_id;
+ u8 pri7_cos_queue_id;
+ u8 queue_cfg_info;
+ #define QUEUE_PRI2COS_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_queue_pri2cos_cfg_input (size:320b/40B) */
struct hwrm_queue_pri2cos_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_MASK 0x3UL
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_SFT 0
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR (0x2UL << 0)
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x4UL
- __le32 enables;
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID 0x1UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI1_COS_QUEUE_ID 0x2UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI2_COS_QUEUE_ID 0x4UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI3_COS_QUEUE_ID 0x8UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI4_COS_QUEUE_ID 0x10UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI5_COS_QUEUE_ID 0x20UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI6_COS_QUEUE_ID 0x40UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI7_COS_QUEUE_ID 0x80UL
- u8 port_id;
- u8 pri0_cos_queue_id;
- u8 pri1_cos_queue_id;
- u8 pri2_cos_queue_id;
- u8 pri3_cos_queue_id;
- u8 pri4_cos_queue_id;
- u8 pri5_cos_queue_id;
- u8 pri6_cos_queue_id;
- u8 pri7_cos_queue_id;
- u8 unused_0[7];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_MASK 0x3UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_SFT 0
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x4UL
+ __le32 enables;
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID 0x1UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI1_COS_QUEUE_ID 0x2UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI2_COS_QUEUE_ID 0x4UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI3_COS_QUEUE_ID 0x8UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI4_COS_QUEUE_ID 0x10UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI5_COS_QUEUE_ID 0x20UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI6_COS_QUEUE_ID 0x40UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI7_COS_QUEUE_ID 0x80UL
+ u8 port_id;
+ u8 pri0_cos_queue_id;
+ u8 pri1_cos_queue_id;
+ u8 pri2_cos_queue_id;
+ u8 pri3_cos_queue_id;
+ u8 pri4_cos_queue_id;
+ u8 pri5_cos_queue_id;
+ u8 pri6_cos_queue_id;
+ u8 pri7_cos_queue_id;
+ u8 unused_0[7];
+};
+
+/* hwrm_queue_pri2cos_cfg_output (size:128b/16B) */
struct hwrm_queue_pri2cos_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_queue_cos2bw_qcfg */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_cos2bw_qcfg_input (size:192b/24B) */
struct hwrm_queue_cos2bw_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
};
-/* Output (112 bytes) */
+/* hwrm_queue_cos2bw_qcfg_output (size:896b/112B) */
struct hwrm_queue_cos2bw_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 queue_id0;
- u8 unused_0;
- __le16 unused_1;
- __le32 queue_id0_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id0_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id0_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 queue_id0;
+ u8 unused_0;
+ __le16 unused_1;
+ __le32 queue_id0_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id0_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id0_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id0_pri_lvl;
- u8 queue_id0_bw_weight;
- u8 queue_id1;
- __le32 queue_id1_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id1_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id1_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id0_pri_lvl;
+ u8 queue_id0_bw_weight;
+ u8 queue_id1;
+ __le32 queue_id1_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id1_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id1_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id1_pri_lvl;
- u8 queue_id1_bw_weight;
- u8 queue_id2;
- __le32 queue_id2_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id2_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id2_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id1_pri_lvl;
+ u8 queue_id1_bw_weight;
+ u8 queue_id2;
+ __le32 queue_id2_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id2_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id2_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id2_pri_lvl;
- u8 queue_id2_bw_weight;
- u8 queue_id3;
- __le32 queue_id3_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id3_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id3_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id2_pri_lvl;
+ u8 queue_id2_bw_weight;
+ u8 queue_id3;
+ __le32 queue_id3_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id3_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id3_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id3_pri_lvl;
- u8 queue_id3_bw_weight;
- u8 queue_id4;
- __le32 queue_id4_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id4_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id4_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id3_pri_lvl;
+ u8 queue_id3_bw_weight;
+ u8 queue_id4;
+ __le32 queue_id4_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id4_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id4_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id4_pri_lvl;
- u8 queue_id4_bw_weight;
- u8 queue_id5;
- __le32 queue_id5_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id5_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id5_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id4_pri_lvl;
+ u8 queue_id4_bw_weight;
+ u8 queue_id5;
+ __le32 queue_id5_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id5_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id5_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id5_pri_lvl;
- u8 queue_id5_bw_weight;
- u8 queue_id6;
- __le32 queue_id6_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id6_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id6_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id5_pri_lvl;
+ u8 queue_id5_bw_weight;
+ u8 queue_id6;
+ __le32 queue_id6_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id6_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id6_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id6_pri_lvl;
- u8 queue_id6_bw_weight;
- u8 queue_id7;
- __le32 queue_id7_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id7_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id7_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id6_pri_lvl;
+ u8 queue_id6_bw_weight;
+ u8 queue_id7;
+ __le32 queue_id7_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id7_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id7_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id7_pri_lvl;
- u8 queue_id7_bw_weight;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 unused_5;
- u8 valid;
-};
-
-/* hwrm_queue_cos2bw_cfg */
-/* Input (128 bytes) */
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id7_pri_lvl;
+ u8 queue_id7_bw_weight;
+ u8 unused_2[4];
+ u8 valid;
+};
+
+/* hwrm_queue_cos2bw_cfg_input (size:1024b/128B) */
struct hwrm_queue_cos2bw_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- __le32 enables;
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID 0x1UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID 0x4UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID 0x8UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID 0x10UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID 0x20UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID 0x40UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID 0x80UL
- __le16 port_id;
- u8 queue_id0;
- u8 unused_0;
- __le32 queue_id0_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id0_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id0_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID 0x4UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID 0x8UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID 0x10UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID 0x20UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID 0x40UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID 0x80UL
+ __le16 port_id;
+ u8 queue_id0;
+ u8 unused_0;
+ __le32 queue_id0_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id0_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id0_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id0_pri_lvl;
- u8 queue_id0_bw_weight;
- u8 queue_id1;
- __le32 queue_id1_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id1_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id1_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id0_pri_lvl;
+ u8 queue_id0_bw_weight;
+ u8 queue_id1;
+ __le32 queue_id1_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id1_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id1_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id1_pri_lvl;
- u8 queue_id1_bw_weight;
- u8 queue_id2;
- __le32 queue_id2_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id2_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id2_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id1_pri_lvl;
+ u8 queue_id1_bw_weight;
+ u8 queue_id2;
+ __le32 queue_id2_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id2_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id2_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id2_pri_lvl;
- u8 queue_id2_bw_weight;
- u8 queue_id3;
- __le32 queue_id3_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id3_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id3_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id2_pri_lvl;
+ u8 queue_id2_bw_weight;
+ u8 queue_id3;
+ __le32 queue_id3_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id3_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id3_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id3_pri_lvl;
- u8 queue_id3_bw_weight;
- u8 queue_id4;
- __le32 queue_id4_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id4_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id4_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id3_pri_lvl;
+ u8 queue_id3_bw_weight;
+ u8 queue_id4;
+ __le32 queue_id4_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id4_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id4_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id4_pri_lvl;
- u8 queue_id4_bw_weight;
- u8 queue_id5;
- __le32 queue_id5_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id5_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id5_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id4_pri_lvl;
+ u8 queue_id4_bw_weight;
+ u8 queue_id5;
+ __le32 queue_id5_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id5_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id5_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id5_pri_lvl;
- u8 queue_id5_bw_weight;
- u8 queue_id6;
- __le32 queue_id6_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id6_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id6_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id5_pri_lvl;
+ u8 queue_id5_bw_weight;
+ u8 queue_id6;
+ __le32 queue_id6_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id6_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id6_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id6_pri_lvl;
- u8 queue_id6_bw_weight;
- u8 queue_id7;
- __le32 queue_id7_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id7_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id7_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id6_pri_lvl;
+ u8 queue_id6_bw_weight;
+ u8 queue_id7;
+ __le32 queue_id7_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id7_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id7_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id7_pri_lvl;
- u8 queue_id7_bw_weight;
- u8 unused_1[5];
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id7_pri_lvl;
+ u8 queue_id7_bw_weight;
+ u8 unused_1[5];
};
-/* Output (16 bytes) */
+/* hwrm_queue_cos2bw_cfg_output (size:128b/16B) */
struct hwrm_queue_cos2bw_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_queue_dscp_qcaps */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_dscp_qcaps_input (size:192b/24B) */
struct hwrm_queue_dscp_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 port_id;
- u8 unused_0[7];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 port_id;
+ u8 unused_0[7];
};
-/* Output (16 bytes) */
+/* hwrm_queue_dscp_qcaps_output (size:128b/16B) */
struct hwrm_queue_dscp_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 num_dscp_bits;
- u8 unused_0;
- __le16 max_entries;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_queue_dscp2pri_qcfg */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_dscp_bits;
+ u8 unused_0;
+ __le16 max_entries;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_queue_dscp2pri_qcfg_input (size:256b/32B) */
struct hwrm_queue_dscp2pri_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 dest_data_addr;
- u8 port_id;
- u8 unused_0;
- __le16 dest_data_buffer_size;
- __le32 unused_1;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 dest_data_addr;
+ u8 port_id;
+ u8 unused_0;
+ __le16 dest_data_buffer_size;
+ u8 unused_1[4];
+};
+
+/* hwrm_queue_dscp2pri_qcfg_output (size:128b/16B) */
struct hwrm_queue_dscp2pri_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 entry_cnt;
- u8 default_pri;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_queue_dscp2pri_cfg */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 entry_cnt;
+ u8 default_pri;
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_queue_dscp2pri_cfg_input (size:320b/40B) */
struct hwrm_queue_dscp2pri_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 src_data_addr;
- __le32 flags;
- #define QUEUE_DSCP2PRI_CFG_REQ_FLAGS_USE_HW_DEFAULT_PRI 0x1UL
- __le32 enables;
- #define QUEUE_DSCP2PRI_CFG_REQ_ENABLES_DEFAULT_PRI 0x1UL
- u8 port_id;
- u8 default_pri;
- __le16 entry_cnt;
- __le32 unused_0;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 src_data_addr;
+ __le32 flags;
+ #define QUEUE_DSCP2PRI_CFG_REQ_FLAGS_USE_HW_DEFAULT_PRI 0x1UL
+ __le32 enables;
+ #define QUEUE_DSCP2PRI_CFG_REQ_ENABLES_DEFAULT_PRI 0x1UL
+ u8 port_id;
+ u8 default_pri;
+ __le16 entry_cnt;
+ u8 unused_0[4];
+};
+
+/* hwrm_queue_dscp2pri_cfg_output (size:128b/16B) */
struct hwrm_queue_dscp2pri_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_vnic_alloc */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_alloc_input (size:192b/24B) */
struct hwrm_vnic_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
- __le32 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
+ u8 unused_0[4];
};
-/* Output (16 bytes) */
+/* hwrm_vnic_alloc_output (size:128b/16B) */
struct hwrm_vnic_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 vnic_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_vnic_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 vnic_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_vnic_free_input (size:192b/24B) */
struct hwrm_vnic_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 vnic_id;
- __le32 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 vnic_id;
+ u8 unused_0[4];
};
-/* Output (16 bytes) */
+/* hwrm_vnic_free_output (size:128b/16B) */
struct hwrm_vnic_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_vnic_cfg */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_cfg_input (size:320b/40B) */
struct hwrm_vnic_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL
- #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL
- #define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL
- #define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL
- #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL
- #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL
- #define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL
- __le32 enables;
- #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
- #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
- #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL
- #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL
- #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL
- __le16 vnic_id;
- __le16 dflt_ring_grp;
- __le16 rss_rule;
- __le16 cos_rule;
- __le16 lb_rule;
- __le16 mru;
- __le32 unused_0;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL
+ #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL
+ #define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL
+ #define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL
+ #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL
+ #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL
+ #define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL
+ __le32 enables;
+ #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
+ #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
+ #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL
+ #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL
+ #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL
+ __le16 vnic_id;
+ __le16 dflt_ring_grp;
+ __le16 rss_rule;
+ __le16 cos_rule;
+ __le16 lb_rule;
+ __le16 mru;
+ u8 unused_0[4];
+};
+
+/* hwrm_vnic_cfg_output (size:128b/16B) */
struct hwrm_vnic_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_vnic_qcaps */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_qcaps_input (size:192b/24B) */
struct hwrm_vnic_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- __le32 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ u8 unused_0[4];
};
-/* Output (24 bytes) */
+/* hwrm_vnic_qcaps_output (size:192b/24B) */
struct hwrm_vnic_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 mru;
- u8 unused_0;
- u8 unused_1;
- __le32 flags;
- #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL
- #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL
- #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL
- #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL
- #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
- #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRROING_CAPABLE_VNIC_CAP 0x40UL
- __le32 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 unused_5;
- u8 valid;
-};
-
-/* hwrm_vnic_tpa_cfg */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 mru;
+ u8 unused_0[2];
+ __le32 flags;
+ #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL
+ #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL
+ #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_tpa_cfg_input (size:320b/40B) */
struct hwrm_vnic_tpa_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define VNIC_TPA_CFG_REQ_FLAGS_TPA 0x1UL
- #define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA 0x2UL
- #define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE 0x4UL
- #define VNIC_TPA_CFG_REQ_FLAGS_GRO 0x8UL
- #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN 0x10UL
- #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
- #define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK 0x40UL
- #define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK 0x80UL
- __le32 enables;
- #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS 0x1UL
- #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL
- #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER 0x4UL
- #define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL
- __le16 vnic_id;
- __le16 max_agg_segs;
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 0x0UL
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 0x1UL
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 0x2UL
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 0x3UL
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX 0x1fUL
- __le16 max_aggs;
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 0x0UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 0x1UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 0x2UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 0x3UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 0x4UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX 0x7UL
- u8 unused_0;
- u8 unused_1;
- __le32 max_agg_timer;
- __le32 min_agg_len;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_TPA_CFG_REQ_FLAGS_TPA 0x1UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA 0x2UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE 0x4UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO 0x8UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN 0x10UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK 0x40UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK 0x80UL
+ __le32 enables;
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS 0x1UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER 0x4UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL
+ __le16 vnic_id;
+ __le16 max_agg_segs;
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 0x0UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 0x1UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 0x2UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 0x3UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX 0x1fUL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_LAST VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX
+ __le16 max_aggs;
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 0x0UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 0x1UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 0x2UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 0x3UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 0x4UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX 0x7UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_LAST VNIC_TPA_CFG_REQ_MAX_AGGS_MAX
+ u8 unused_0[2];
+ __le32 max_agg_timer;
+ __le32 min_agg_len;
+};
+
+/* hwrm_vnic_tpa_cfg_output (size:128b/16B) */
struct hwrm_vnic_tpa_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_vnic_rss_cfg */
-/* Input (48 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_tpa_qcfg_input (size:192b/24B) */
+struct hwrm_vnic_tpa_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vnic_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_vnic_tpa_qcfg_output (size:256b/32B) */
+struct hwrm_vnic_tpa_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define VNIC_TPA_QCFG_RESP_FLAGS_TPA 0x1UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_ENCAP_TPA 0x2UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_RSC_WND_UPDATE 0x4UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_GRO 0x8UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_ECN 0x10UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_IPID_CHECK 0x40UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_TTL_CHECK 0x80UL
+ __le16 max_agg_segs;
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_1 0x0UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_2 0x1UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_4 0x2UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_8 0x3UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX 0x1fUL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX
+ __le16 max_aggs;
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_1 0x0UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_2 0x1UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_4 0x2UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_8 0x3UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_16 0x4UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX 0x7UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX
+ __le32 max_agg_timer;
+ __le32 min_agg_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cfg_input (size:384b/48B) */
struct hwrm_vnic_rss_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 hash_type;
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
- __le32 unused_0;
- __le64 ring_grp_tbl_addr;
- __le64 hash_key_tbl_addr;
- __le16 rss_ctx_idx;
- __le16 unused_1[3];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 hash_type;
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
+ u8 unused_0[4];
+ __le64 ring_grp_tbl_addr;
+ __le64 hash_key_tbl_addr;
+ __le16 rss_ctx_idx;
+ u8 unused_1[6];
+};
+
+/* hwrm_vnic_rss_cfg_output (size:128b/16B) */
struct hwrm_vnic_rss_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_vnic_plcmodes_cfg */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */
struct hwrm_vnic_plcmodes_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT 0x1UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT 0x2UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 0x4UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL
- __le32 enables;
- #define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL
- #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL
- #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL
- __le32 vnic_id;
- __le16 jumbo_thresh;
- __le16 hds_offset;
- __le16 hds_threshold;
- __le16 unused_0[3];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT 0x1UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT 0x2UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 0x4UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL
+ __le32 enables;
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL
+ __le32 vnic_id;
+ __le16 jumbo_thresh;
+ __le16 hds_offset;
+ __le16 hds_threshold;
+ u8 unused_0[6];
+};
+
+/* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */
struct hwrm_vnic_plcmodes_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_vnic_rss_cos_lb_ctx_alloc */
-/* Input (16 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_alloc_input (size:128b/16B) */
struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
};
-/* Output (16 bytes) */
+/* hwrm_vnic_rss_cos_lb_ctx_alloc_output (size:128b/16B) */
struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 rss_cos_lb_ctx_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_vnic_rss_cos_lb_ctx_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 rss_cos_lb_ctx_id;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_free_input (size:192b/24B) */
struct hwrm_vnic_rss_cos_lb_ctx_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 rss_cos_lb_ctx_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 rss_cos_lb_ctx_id;
+ u8 unused_0[6];
};
-/* Output (16 bytes) */
+/* hwrm_vnic_rss_cos_lb_ctx_free_output (size:128b/16B) */
struct hwrm_vnic_rss_cos_lb_ctx_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_ring_alloc */
-/* Input (80 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_alloc_input (size:640b/80B) */
struct hwrm_ring_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define RING_ALLOC_REQ_ENABLES_RESERVED1 0x1UL
- #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
- #define RING_ALLOC_REQ_ENABLES_RESERVED3 0x4UL
- #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
- #define RING_ALLOC_REQ_ENABLES_RESERVED4 0x10UL
- #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
- u8 ring_type;
- #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
- #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
- #define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL
- #define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- u8 unused_0;
- __le16 unused_1;
- __le64 page_tbl_addr;
- __le32 fbo;
- u8 page_size;
- u8 page_tbl_depth;
- u8 unused_2;
- u8 unused_3;
- __le32 length;
- __le16 logical_id;
- __le16 cmpl_ring_id;
- __le16 queue_id;
- u8 unused_4;
- u8 unused_5;
- __le32 reserved1;
- __le16 ring_arb_cfg;
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK 0xfUL
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT 0
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SP (0x1UL << 0)
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ (0x2UL << 0)
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_LAST RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ
- #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_MASK 0xf0UL
- #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT 4
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK 0xff00UL
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
- u8 unused_6;
- u8 unused_7;
- __le32 reserved3;
- __le32 stat_ctx_id;
- __le32 reserved4;
- __le32 max_bw;
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT 0
- #define RING_ALLOC_REQ_MAX_BW_SCALE 0x10000000UL
- #define RING_ALLOC_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define RING_ALLOC_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define RING_ALLOC_REQ_MAX_BW_SCALE_LAST RING_ALLOC_REQ_MAX_BW_SCALE_BYTES
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
+ #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
+ #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
+ u8 ring_type;
+ #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
+ #define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL
+ #define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+ #define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL
+ u8 unused_0[3];
+ __le64 page_tbl_addr;
+ __le32 fbo;
+ u8 page_size;
+ u8 page_tbl_depth;
+ u8 unused_1[2];
+ __le32 length;
+ __le16 logical_id;
+ __le16 cmpl_ring_id;
+ __le16 queue_id;
+ u8 unused_2[2];
+ __le32 reserved1;
+ __le16 ring_arb_cfg;
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK 0xfUL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT 0
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SP 0x1UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ 0x2UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_LAST RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ
+ #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_MASK 0xf0UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT 4
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK 0xff00UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
+ __le16 unused_3;
+ __le32 reserved3;
+ __le32 stat_ctx_id;
+ __le32 reserved4;
+ __le32 max_bw;
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT 0
+ #define RING_ALLOC_REQ_MAX_BW_SCALE 0x10000000UL
+ #define RING_ALLOC_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define RING_ALLOC_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define RING_ALLOC_REQ_MAX_BW_SCALE_LAST RING_ALLOC_REQ_MAX_BW_SCALE_BYTES
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 int_mode;
- #define RING_ALLOC_REQ_INT_MODE_LEGACY 0x0UL
- #define RING_ALLOC_REQ_INT_MODE_RSVD 0x1UL
- #define RING_ALLOC_REQ_INT_MODE_MSIX 0x2UL
- #define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL
- u8 unused_8[3];
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 int_mode;
+ #define RING_ALLOC_REQ_INT_MODE_LEGACY 0x0UL
+ #define RING_ALLOC_REQ_INT_MODE_RSVD 0x1UL
+ #define RING_ALLOC_REQ_INT_MODE_MSIX 0x2UL
+ #define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL
+ #define RING_ALLOC_REQ_INT_MODE_LAST RING_ALLOC_REQ_INT_MODE_POLL
+ u8 unused_4[3];
};
-/* Output (16 bytes) */
+/* hwrm_ring_alloc_output (size:128b/16B) */
struct hwrm_ring_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 ring_id;
- __le16 logical_ring_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_ring_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 ring_id;
+ __le16 logical_ring_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_ring_free_input (size:192b/24B) */
struct hwrm_ring_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 ring_type;
- #define RING_FREE_REQ_RING_TYPE_L2_CMPL 0x0UL
- #define RING_FREE_REQ_RING_TYPE_TX 0x1UL
- #define RING_FREE_REQ_RING_TYPE_RX 0x2UL
- #define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- u8 unused_0;
- __le16 ring_id;
- __le32 unused_1;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 ring_type;
+ #define RING_FREE_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define RING_FREE_REQ_RING_TYPE_TX 0x1UL
+ #define RING_FREE_REQ_RING_TYPE_RX 0x2UL
+ #define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+ #define RING_FREE_REQ_RING_TYPE_LAST RING_FREE_REQ_RING_TYPE_ROCE_CMPL
+ u8 unused_0;
+ __le16 ring_id;
+ u8 unused_1[4];
+};
+
+/* hwrm_ring_free_output (size:128b/16B) */
struct hwrm_ring_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_ring_cmpl_ring_qaggint_params */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_cmpl_ring_qaggint_params_input (size:192b/24B) */
struct hwrm_ring_cmpl_ring_qaggint_params_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 ring_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 ring_id;
+ u8 unused_0[6];
};
-/* Output (32 bytes) */
+/* hwrm_ring_cmpl_ring_qaggint_params_output (size:256b/32B) */
struct hwrm_ring_cmpl_ring_qaggint_params_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 flags;
- #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL
- #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL
- __le16 num_cmpl_dma_aggr;
- __le16 num_cmpl_dma_aggr_during_int;
- __le16 cmpl_aggr_dma_tmr;
- __le16 cmpl_aggr_dma_tmr_during_int;
- __le16 int_lat_tmr_min;
- __le16 int_lat_tmr_max;
- __le16 num_cmpl_aggr_int;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_ring_cmpl_ring_cfg_aggint_params */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 flags;
+ #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL
+ #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL
+ __le16 num_cmpl_dma_aggr;
+ __le16 num_cmpl_dma_aggr_during_int;
+ __le16 cmpl_aggr_dma_tmr;
+ __le16 cmpl_aggr_dma_tmr_during_int;
+ __le16 int_lat_tmr_min;
+ __le16 int_lat_tmr_max;
+ __le16 num_cmpl_aggr_int;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_cmpl_ring_cfg_aggint_params_input (size:320b/40B) */
struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 ring_id;
- __le16 flags;
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL
- __le16 num_cmpl_dma_aggr;
- __le16 num_cmpl_dma_aggr_during_int;
- __le16 cmpl_aggr_dma_tmr;
- __le16 cmpl_aggr_dma_tmr_during_int;
- __le16 int_lat_tmr_min;
- __le16 int_lat_tmr_max;
- __le16 num_cmpl_aggr_int;
- __le16 unused_0[3];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 ring_id;
+ __le16 flags;
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL
+ __le16 num_cmpl_dma_aggr;
+ __le16 num_cmpl_dma_aggr_during_int;
+ __le16 cmpl_aggr_dma_tmr;
+ __le16 cmpl_aggr_dma_tmr_during_int;
+ __le16 int_lat_tmr_min;
+ __le16 int_lat_tmr_max;
+ __le16 num_cmpl_aggr_int;
+ u8 unused_0[6];
+};
+
+/* hwrm_ring_cmpl_ring_cfg_aggint_params_output (size:128b/16B) */
struct hwrm_ring_cmpl_ring_cfg_aggint_params_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_ring_reset */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_reset_input (size:192b/24B) */
struct hwrm_ring_reset_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 ring_type;
- #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL
- #define RING_RESET_REQ_RING_TYPE_TX 0x1UL
- #define RING_RESET_REQ_RING_TYPE_RX 0x2UL
- #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- u8 unused_0;
- __le16 ring_id;
- __le32 unused_1;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 ring_type;
+ #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define RING_RESET_REQ_RING_TYPE_TX 0x1UL
+ #define RING_RESET_REQ_RING_TYPE_RX 0x2UL
+ #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+ #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_ROCE_CMPL
+ u8 unused_0;
+ __le16 ring_id;
+ u8 unused_1[4];
+};
+
+/* hwrm_ring_reset_output (size:128b/16B) */
struct hwrm_ring_reset_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_ring_grp_alloc */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_grp_alloc_input (size:192b/24B) */
struct hwrm_ring_grp_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 cr;
- __le16 rr;
- __le16 ar;
- __le16 sc;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 cr;
+ __le16 rr;
+ __le16 ar;
+ __le16 sc;
+};
+
+/* hwrm_ring_grp_alloc_output (size:128b/16B) */
struct hwrm_ring_grp_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 ring_group_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_ring_grp_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 ring_group_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_ring_grp_free_input (size:192b/24B) */
struct hwrm_ring_grp_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 ring_group_id;
- __le32 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 ring_group_id;
+ u8 unused_0[4];
};
-/* Output (16 bytes) */
+/* hwrm_ring_grp_free_output (size:128b/16B) */
struct hwrm_ring_grp_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_l2_filter_alloc */
-/* Input (96 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_alloc_input (size:768b/96B) */
struct hwrm_cfa_l2_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX (0x0UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX (0x1UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL
- __le32 enables;
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK 0x8UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x10UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK 0x20UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR 0x40UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK 0x80UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN 0x100UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK 0x200UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN 0x400UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK 0x800UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE 0x1000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID 0x2000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
- u8 l2_addr[6];
- u8 unused_0;
- u8 unused_1;
- u8 l2_addr_mask[6];
- __le16 l2_ovlan;
- __le16 l2_ovlan_mask;
- __le16 l2_ivlan;
- __le16 l2_ivlan_mask;
- u8 unused_2;
- u8 unused_3;
- u8 t_l2_addr[6];
- u8 unused_4;
- u8 unused_5;
- u8 t_l2_addr_mask[6];
- __le16 t_l2_ovlan;
- __le16 t_l2_ovlan_mask;
- __le16 t_l2_ivlan;
- __le16 t_l2_ivlan_mask;
- u8 src_type;
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT 0x0UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC 0x3UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE 0x5UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO 0x6UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG 0x7UL
- u8 unused_6;
- __le32 src_id;
- u8 tunnel_type;
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- u8 unused_7;
- __le16 dst_id;
- __le16 mirror_vnic_id;
- u8 pri_hint;
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX 0x3UL
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN 0x4UL
- u8 unused_8;
- __le32 unused_9;
- __le64 l2_filter_id_hint;
-};
-
-/* Output (24 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL
+ __le32 enables;
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x10UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK 0x20UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR 0x40UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK 0x80UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN 0x100UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK 0x200UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN 0x400UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK 0x800UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE 0x1000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID 0x2000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
+ u8 l2_addr[6];
+ u8 unused_0[2];
+ u8 l2_addr_mask[6];
+ __le16 l2_ovlan;
+ __le16 l2_ovlan_mask;
+ __le16 l2_ivlan;
+ __le16 l2_ivlan_mask;
+ u8 unused_1[2];
+ u8 t_l2_addr[6];
+ u8 unused_2[2];
+ u8 t_l2_addr_mask[6];
+ __le16 t_l2_ovlan;
+ __le16 t_l2_ovlan_mask;
+ __le16 t_l2_ivlan;
+ __le16 t_l2_ivlan_mask;
+ u8 src_type;
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE 0x5UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO 0x6UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG 0x7UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG
+ u8 unused_3;
+ __le32 src_id;
+ u8 tunnel_type;
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ u8 unused_4;
+ __le16 dst_id;
+ __le16 mirror_vnic_id;
+ u8 pri_hint;
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN
+ u8 unused_5;
+ __le32 unused_6;
+ __le64 l2_filter_id_hint;
+};
+
+/* hwrm_cfa_l2_filter_alloc_output (size:192b/24B) */
struct hwrm_cfa_l2_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 l2_filter_id;
- __le32 flow_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_cfa_l2_filter_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 l2_filter_id;
+ __le32 flow_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_free_input (size:192b/24B) */
struct hwrm_cfa_l2_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 l2_filter_id;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 l2_filter_id;
};
-/* Output (16 bytes) */
+/* hwrm_cfa_l2_filter_free_output (size:128b/16B) */
struct hwrm_cfa_l2_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_l2_filter_cfg */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_cfg_input (size:320b/40B) */
struct hwrm_cfa_l2_filter_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
- __le32 enables;
- #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
- __le64 l2_filter_id;
- __le32 dst_id;
- __le32 new_mirror_vnic_id;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
+ __le32 enables;
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
+ __le64 l2_filter_id;
+ __le32 dst_id;
+ __le32 new_mirror_vnic_id;
+};
+
+/* hwrm_cfa_l2_filter_cfg_output (size:128b/16B) */
struct hwrm_cfa_l2_filter_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_l2_set_rx_mask */
-/* Input (56 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_set_rx_mask_input (size:448b/56B) */
struct hwrm_cfa_l2_set_rx_mask_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 vnic_id;
- __le32 mask;
- #define CFA_L2_SET_RX_MASK_REQ_MASK_RESERVED 0x1UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST 0x2UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST 0x4UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY 0x40UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN 0x80UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN 0x100UL
- __le64 mc_tbl_addr;
- __le32 num_mc_entries;
- __le32 unused_0;
- __le64 vlan_tag_tbl_addr;
- __le32 num_vlan_tags;
- __le32 unused_1;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 vnic_id;
+ __le32 mask;
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST 0x2UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST 0x4UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY 0x40UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN 0x80UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN 0x100UL
+ __le64 mc_tbl_addr;
+ __le32 num_mc_entries;
+ u8 unused_0[4];
+ __le64 vlan_tag_tbl_addr;
+ __le32 num_vlan_tags;
+ u8 unused_1[4];
+};
+
+/* hwrm_cfa_l2_set_rx_mask_output (size:128b/16B) */
struct hwrm_cfa_l2_set_rx_mask_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* Command specific Error Codes (8 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_set_rx_mask_cmd_err (size:64b/8B) */
struct hwrm_cfa_l2_set_rx_mask_cmd_err {
- u8 code;
- #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN 0x0UL
+ u8 code;
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN 0x0UL
#define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR 0x1UL
- u8 unused_0[7];
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_LAST CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR
+ u8 unused_0[7];
};
-/* hwrm_cfa_tunnel_filter_alloc */
-/* Input (88 bytes) */
+/* hwrm_cfa_tunnel_filter_alloc_input (size:704b/88B) */
struct hwrm_cfa_tunnel_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
- __le32 enables;
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x2UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x4UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR 0x8UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE 0x10UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR 0x40UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x80UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI 0x100UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x200UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL
- __le64 l2_filter_id;
- u8 l2_addr[6];
- __le16 l2_ivlan;
- __le32 l3_addr[4];
- __le32 t_l3_addr[4];
- u8 l3_addr_type;
- u8 t_l3_addr_type;
- u8 tunnel_type;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ __le32 enables;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x2UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x4UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR 0x8UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE 0x10UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR 0x40UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x80UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI 0x100UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x200UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL
+ __le64 l2_filter_id;
+ u8 l2_addr[6];
+ __le16 l2_ivlan;
+ __le32 l3_addr[4];
+ __le32 t_l3_addr[4];
+ u8 l3_addr_type;
+ u8 t_l3_addr_type;
+ u8 tunnel_type;
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
@@ -4174,158 +4372,204 @@ struct hwrm_cfa_tunnel_filter_alloc_input {
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- u8 unused_0;
- __le32 vni;
- __le32 dst_vnic_id;
- __le32 mirror_vnic_id;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ u8 tunnel_flags;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_OAM_CHECKSUM_EXPLHDR 0x1UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_CRITICAL_OPT_S1 0x2UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_EXTHDR_SEQNUM_S0 0x4UL
+ __le32 vni;
+ __le32 dst_vnic_id;
+ __le32 mirror_vnic_id;
};
-/* Output (24 bytes) */
+/* hwrm_cfa_tunnel_filter_alloc_output (size:192b/24B) */
struct hwrm_cfa_tunnel_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 tunnel_filter_id;
- __le32 flow_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_cfa_tunnel_filter_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tunnel_filter_id;
+ __le32 flow_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_tunnel_filter_free_input (size:192b/24B) */
struct hwrm_cfa_tunnel_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 tunnel_filter_id;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 tunnel_filter_id;
};
-/* Output (16 bytes) */
+/* hwrm_cfa_tunnel_filter_free_output (size:128b/16B) */
struct hwrm_cfa_tunnel_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_encap_record_alloc */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vxlan_ipv4_hdr (size:128b/16B) */
+struct hwrm_vxlan_ipv4_hdr {
+ u8 ver_hlen;
+ #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_MASK 0xfUL
+ #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT 0
+ #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_MASK 0xf0UL
+ #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT 4
+ u8 tos;
+ __be16 ip_id;
+ __be16 flags_frag_offset;
+ u8 ttl;
+ u8 protocol;
+ __be32 src_ip_addr;
+ __be32 dest_ip_addr;
+};
+
+/* hwrm_vxlan_ipv6_hdr (size:320b/40B) */
+struct hwrm_vxlan_ipv6_hdr {
+ __be32 ver_tc_flow_label;
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT 0x1cUL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_MASK 0xf0000000UL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_SFT 0x14UL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_MASK 0xff00000UL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_SFT 0x0UL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK 0xfffffUL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_LAST VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK
+ __be16 payload_len;
+ u8 next_hdr;
+ u8 ttl;
+ __be32 src_ip_addr[4];
+ __be32 dest_ip_addr[4];
+};
+
+/* hwrm_cfa_encap_data_vxlan (size:576b/72B) */
+struct hwrm_cfa_encap_data_vxlan {
+ u8 src_mac_addr[6];
+ __le16 unused_0;
+ u8 dst_mac_addr[6];
+ u8 num_vlan_tags;
+ u8 unused_1;
+ __be16 ovlan_tpid;
+ __be16 ovlan_tci;
+ __be16 ivlan_tpid;
+ __be16 ivlan_tci;
+ __le32 l3[10];
+ #define CFA_ENCAP_DATA_VXLAN_L3_VER_MASK 0xfUL
+ #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV4 0x4UL
+ #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 0x6UL
+ #define CFA_ENCAP_DATA_VXLAN_L3_LAST CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6
+ __be16 src_port;
+ __be16 dst_port;
+ __be32 vni;
+};
+
+/* hwrm_cfa_encap_record_alloc_input (size:832b/104B) */
struct hwrm_cfa_encap_record_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
- u8 encap_type;
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL
- u8 unused_0;
- __le16 unused_1;
- __le32 encap_data[20];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ u8 encap_type;
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE
+ u8 unused_0[3];
+ __le32 encap_data[20];
+};
+
+/* hwrm_cfa_encap_record_alloc_output (size:128b/16B) */
struct hwrm_cfa_encap_record_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 encap_record_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_cfa_encap_record_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 encap_record_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_encap_record_free_input (size:192b/24B) */
struct hwrm_cfa_encap_record_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 encap_record_id;
- __le32 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_record_id;
+ u8 unused_0[4];
};
-/* Output (16 bytes) */
+/* hwrm_cfa_encap_record_free_output (size:128b/16B) */
struct hwrm_cfa_encap_record_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_ntuple_filter_alloc */
-/* Input (128 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc_input (size:1024b/128B) */
struct hwrm_cfa_ntuple_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL
- __le32 enables;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x8UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x10UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x20UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK 0x40UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x80UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK 0x100UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x200UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x400UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK 0x800UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x1000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK 0x2000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT 0x4000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x10000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x40000UL
- __le64 l2_filter_id;
- u8 src_macaddr[6];
- __be16 ethertype;
- u8 ip_addr_type;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
- u8 ip_protocol;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
- __le16 dst_id;
- __le16 mirror_vnic_id;
- u8 tunnel_type;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL
+ __le32 enables;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x8UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x10UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x20UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK 0x40UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x80UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK 0x100UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x200UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x400UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK 0x800UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x1000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK 0x2000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT 0x4000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x10000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x40000UL
+ __le64 l2_filter_id;
+ u8 src_macaddr[6];
+ __be16 ethertype;
+ u8 ip_addr_type;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6
+ u8 ip_protocol;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP
+ __le16 dst_id;
+ __le16 mirror_vnic_id;
+ u8 tunnel_type;
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
@@ -4337,2221 +4581,1723 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- u8 pri_hint;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW 0x2UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST 0x3UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST 0x4UL
- __be32 src_ipaddr[4];
- __be32 src_ipaddr_mask[4];
- __be32 dst_ipaddr[4];
- __be32 dst_ipaddr_mask[4];
- __be16 src_port;
- __be16 src_port_mask;
- __be16 dst_port;
- __be16 dst_port_mask;
- __le64 ntuple_filter_id_hint;
-};
-
-/* Output (24 bytes) */
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ u8 pri_hint;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST 0x3UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST
+ __be32 src_ipaddr[4];
+ __be32 src_ipaddr_mask[4];
+ __be32 dst_ipaddr[4];
+ __be32 dst_ipaddr_mask[4];
+ __be16 src_port;
+ __be16 src_port_mask;
+ __be16 dst_port;
+ __be16 dst_port_mask;
+ __le64 ntuple_filter_id_hint;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */
struct hwrm_cfa_ntuple_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 ntuple_filter_id;
- __le32 flow_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* Command specific Error Codes (8 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 ntuple_filter_id;
+ __le32 flow_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc_cmd_err (size:64b/8B) */
struct hwrm_cfa_ntuple_filter_alloc_cmd_err {
- u8 code;
- #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
+ u8 code;
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
#define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR 0x1UL
- u8 unused_0[7];
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_LAST CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR
+ u8 unused_0[7];
};
-/* hwrm_cfa_ntuple_filter_free */
-/* Input (24 bytes) */
+/* hwrm_cfa_ntuple_filter_free_input (size:192b/24B) */
struct hwrm_cfa_ntuple_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 ntuple_filter_id;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 ntuple_filter_id;
};
-/* Output (16 bytes) */
+/* hwrm_cfa_ntuple_filter_free_output (size:128b/16B) */
struct hwrm_cfa_ntuple_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_ntuple_filter_cfg */
-/* Input (48 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_cfg_input (size:384b/48B) */
struct hwrm_cfa_ntuple_filter_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL
- #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
- #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL
- __le32 unused_0;
- __le64 ntuple_filter_id;
- __le32 new_dst_id;
- __le32 new_mirror_vnic_id;
- __le16 new_meter_instance_id;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL
+ u8 unused_0[4];
+ __le64 ntuple_filter_id;
+ __le32 new_dst_id;
+ __le32 new_mirror_vnic_id;
+ __le16 new_meter_instance_id;
#define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID 0xffffUL
- __le16 unused_1[3];
+ #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_LAST CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID
+ u8 unused_1[6];
};
-/* Output (16 bytes) */
+/* hwrm_cfa_ntuple_filter_cfg_output (size:128b/16B) */
struct hwrm_cfa_ntuple_filter_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_decap_filter_alloc */
-/* Input (104 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_decap_filter_alloc_input (size:832b/104B) */
struct hwrm_cfa_decap_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL 0x1UL
- __le32 enables;
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x1UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID 0x2UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x4UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x8UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_OVLAN_VID 0x10UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IVLAN_VID 0x20UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_OVLAN_VID 0x40UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID 0x80UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x100UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x200UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x400UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x800UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x1000UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x2000UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x4000UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
- __be32 tunnel_id;
- u8 tunnel_type;
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- u8 unused_0;
- __le16 unused_1;
- u8 src_macaddr[6];
- u8 unused_2;
- u8 unused_3;
- u8 dst_macaddr[6];
- __be16 ovlan_vid;
- __be16 ivlan_vid;
- __be16 t_ovlan_vid;
- __be16 t_ivlan_vid;
- __be16 ethertype;
- u8 ip_addr_type;
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
- u8 ip_protocol;
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
- u8 unused_4;
- u8 unused_5;
- u8 unused_6[3];
- u8 unused_7;
- __be32 src_ipaddr[4];
- __be32 dst_ipaddr[4];
- __be16 src_port;
- __be16 dst_port;
- __le16 dst_id;
- __le16 l2_ctxt_ref_id;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL 0x1UL
+ __le32 enables;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x1UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID 0x2UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x4UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x8UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_OVLAN_VID 0x10UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IVLAN_VID 0x20UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_OVLAN_VID 0x40UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID 0x80UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x100UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x200UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x400UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x800UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x1000UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x2000UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x4000UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
+ __be32 tunnel_id;
+ u8 tunnel_type;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ u8 unused_0;
+ __le16 unused_1;
+ u8 src_macaddr[6];
+ u8 unused_2[2];
+ u8 dst_macaddr[6];
+ __be16 ovlan_vid;
+ __be16 ivlan_vid;
+ __be16 t_ovlan_vid;
+ __be16 t_ivlan_vid;
+ __be16 ethertype;
+ u8 ip_addr_type;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6
+ u8 ip_protocol;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP
+ __le16 unused_3;
+ __le32 unused_4;
+ __be32 src_ipaddr[4];
+ __be32 dst_ipaddr[4];
+ __be16 src_port;
+ __be16 dst_port;
+ __le16 dst_id;
+ __le16 l2_ctxt_ref_id;
+};
+
+/* hwrm_cfa_decap_filter_alloc_output (size:128b/16B) */
struct hwrm_cfa_decap_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 decap_filter_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_cfa_decap_filter_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 decap_filter_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_decap_filter_free_input (size:192b/24B) */
struct hwrm_cfa_decap_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 decap_filter_id;
- __le32 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 decap_filter_id;
+ u8 unused_0[4];
};
-/* Output (16 bytes) */
+/* hwrm_cfa_decap_filter_free_output (size:128b/16B) */
struct hwrm_cfa_decap_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_flow_alloc */
-/* Input (128 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_flow_alloc_input (size:1024b/128B) */
struct hwrm_cfa_flow_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 flags;
- #define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL 0x1UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE (0x0UL << 1)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE (0x1UL << 1)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO (0x2UL << 1)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2 (0x0UL << 3)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 (0x1UL << 3)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 (0x2UL << 3)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6
- __le16 src_fid;
- __le32 tunnel_handle;
- __le16 action_flags;
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL
- __le16 dst_fid;
- __be16 l2_rewrite_vlan_tpid;
- __be16 l2_rewrite_vlan_tci;
- __le16 act_meter_id;
- __le16 ref_flow_handle;
- __be16 ethertype;
- __be16 outer_vlan_tci;
- __be16 dmac[3];
- __be16 inner_vlan_tci;
- __be16 smac[3];
- u8 ip_dst_mask_len;
- u8 ip_src_mask_len;
- __be32 ip_dst[4];
- __be32 ip_src[4];
- __be16 l4_src_port;
- __be16 l4_src_port_mask;
- __be16 l4_dst_port;
- __be16 l4_dst_port_mask;
- __be32 nat_ip_address[4];
- __be16 l2_rewrite_dmac[3];
- __be16 nat_port;
- __be16 l2_rewrite_smac[3];
- u8 ip_proto;
- u8 unused_0;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 flags;
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL 0x1UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE (0x0UL << 1)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE (0x1UL << 1)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO (0x2UL << 1)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2 (0x0UL << 3)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 (0x1UL << 3)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 (0x2UL << 3)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6
+ __le16 src_fid;
+ __le32 tunnel_handle;
+ __le16 action_flags;
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL
+ __le16 dst_fid;
+ __be16 l2_rewrite_vlan_tpid;
+ __be16 l2_rewrite_vlan_tci;
+ __le16 act_meter_id;
+ __le16 ref_flow_handle;
+ __be16 ethertype;
+ __be16 outer_vlan_tci;
+ __be16 dmac[3];
+ __be16 inner_vlan_tci;
+ __be16 smac[3];
+ u8 ip_dst_mask_len;
+ u8 ip_src_mask_len;
+ __be32 ip_dst[4];
+ __be32 ip_src[4];
+ __be16 l4_src_port;
+ __be16 l4_src_port_mask;
+ __be16 l4_dst_port;
+ __be16 l4_dst_port_mask;
+ __be32 nat_ip_address[4];
+ __be16 l2_rewrite_dmac[3];
+ __be16 nat_port;
+ __be16 l2_rewrite_smac[3];
+ u8 ip_proto;
+ u8 unused_0;
+};
+
+/* hwrm_cfa_flow_alloc_output (size:128b/16B) */
struct hwrm_cfa_flow_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 flow_handle;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_cfa_flow_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 flow_handle;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_cfa_flow_free_input (size:192b/24B) */
struct hwrm_cfa_flow_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 flow_handle;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 flow_handle;
+ u8 unused_0[6];
};
-/* Output (32 bytes) */
+/* hwrm_cfa_flow_free_output (size:256b/32B) */
struct hwrm_cfa_flow_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 packet;
- __le64 byte;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_flow_stats */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 packet;
+ __le64 byte;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_flow_stats_input (size:320b/40B) */
struct hwrm_cfa_flow_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 num_flows;
- __le16 flow_handle_0;
- __le16 flow_handle_1;
- __le16 flow_handle_2;
- __le16 flow_handle_3;
- __le16 flow_handle_4;
- __le16 flow_handle_5;
- __le16 flow_handle_6;
- __le16 flow_handle_7;
- __le16 flow_handle_8;
- __le16 flow_handle_9;
- __le16 unused_0;
-};
-
-/* Output (176 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 num_flows;
+ __le16 flow_handle_0;
+ __le16 flow_handle_1;
+ __le16 flow_handle_2;
+ __le16 flow_handle_3;
+ __le16 flow_handle_4;
+ __le16 flow_handle_5;
+ __le16 flow_handle_6;
+ __le16 flow_handle_7;
+ __le16 flow_handle_8;
+ __le16 flow_handle_9;
+ u8 unused_0[2];
+};
+
+/* hwrm_cfa_flow_stats_output (size:1408b/176B) */
struct hwrm_cfa_flow_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 packet_0;
- __le64 packet_1;
- __le64 packet_2;
- __le64 packet_3;
- __le64 packet_4;
- __le64 packet_5;
- __le64 packet_6;
- __le64 packet_7;
- __le64 packet_8;
- __le64 packet_9;
- __le64 byte_0;
- __le64 byte_1;
- __le64 byte_2;
- __le64 byte_3;
- __le64 byte_4;
- __le64 byte_5;
- __le64 byte_6;
- __le64 byte_7;
- __le64 byte_8;
- __le64 byte_9;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_vfr_alloc */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 packet_0;
+ __le64 packet_1;
+ __le64 packet_2;
+ __le64 packet_3;
+ __le64 packet_4;
+ __le64 packet_5;
+ __le64 packet_6;
+ __le64 packet_7;
+ __le64 packet_8;
+ __le64 packet_9;
+ __le64 byte_0;
+ __le64 byte_1;
+ __le64 byte_2;
+ __le64 byte_3;
+ __le64 byte_4;
+ __le64 byte_5;
+ __le64 byte_6;
+ __le64 byte_7;
+ __le64 byte_8;
+ __le64 byte_9;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_vfr_alloc_input (size:448b/56B) */
struct hwrm_cfa_vfr_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 vf_id;
- __le16 reserved;
- __le32 unused_0;
- char vfr_name[32];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ __le16 reserved;
+ u8 unused_0[4];
+ char vfr_name[32];
+};
+
+/* hwrm_cfa_vfr_alloc_output (size:128b/16B) */
struct hwrm_cfa_vfr_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 rx_cfa_code;
- __le16 tx_cfa_action;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_cfa_vfr_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 rx_cfa_code;
+ __le16 tx_cfa_action;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_vfr_free_input (size:384b/48B) */
struct hwrm_cfa_vfr_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- char vfr_name[32];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ char vfr_name[32];
};
-/* Output (16 bytes) */
+/* hwrm_cfa_vfr_free_output (size:128b/16B) */
struct hwrm_cfa_vfr_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_tunnel_dst_port_query */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */
struct hwrm_tunnel_dst_port_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 tunnel_type;
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- u8 unused_0[7];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4
+ u8 unused_0[7];
+};
+
+/* hwrm_tunnel_dst_port_query_output (size:128b/16B) */
struct hwrm_tunnel_dst_port_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 tunnel_dst_port_id;
- __be16 tunnel_dst_port_val;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_tunnel_dst_port_alloc */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tunnel_dst_port_id;
+ __be16 tunnel_dst_port_val;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_alloc_input (size:192b/24B) */
struct hwrm_tunnel_dst_port_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 tunnel_type;
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- u8 unused_0;
- __be16 tunnel_dst_port_val;
- __be32 unused_1;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4
+ u8 unused_0;
+ __be16 tunnel_dst_port_val;
+ u8 unused_1[4];
+};
+
+/* hwrm_tunnel_dst_port_alloc_output (size:128b/16B) */
struct hwrm_tunnel_dst_port_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 tunnel_dst_port_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_tunnel_dst_port_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tunnel_dst_port_id;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_free_input (size:192b/24B) */
struct hwrm_tunnel_dst_port_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 tunnel_type;
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- u8 unused_0;
- __le16 tunnel_dst_port_id;
- __le32 unused_1;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4
+ u8 unused_0;
+ __le16 tunnel_dst_port_id;
+ u8 unused_1[4];
+};
+
+/* hwrm_tunnel_dst_port_free_output (size:128b/16B) */
struct hwrm_tunnel_dst_port_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_stat_ctx_alloc */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* ctx_hw_stats (size:1280b/160B) */
+struct ctx_hw_stats {
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 tpa_pkts;
+ __le64 tpa_bytes;
+ __le64 tpa_events;
+ __le64 tpa_aborts;
+};
+
+/* hwrm_stat_ctx_alloc_input (size:256b/32B) */
struct hwrm_stat_ctx_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 stats_dma_addr;
- __le32 update_period_ms;
- u8 stat_ctx_flags;
- #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL
- u8 unused_0[3];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 stats_dma_addr;
+ __le32 update_period_ms;
+ u8 stat_ctx_flags;
+ #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL
+ u8 unused_0[3];
+};
+
+/* hwrm_stat_ctx_alloc_output (size:128b/16B) */
struct hwrm_stat_ctx_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 stat_ctx_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_stat_ctx_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 stat_ctx_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_free_input (size:192b/24B) */
struct hwrm_stat_ctx_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 stat_ctx_id;
- __le32 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 unused_0[4];
};
-/* Output (16 bytes) */
+/* hwrm_stat_ctx_free_output (size:128b/16B) */
struct hwrm_stat_ctx_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 stat_ctx_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_stat_ctx_query */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 stat_ctx_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_query_input (size:192b/24B) */
struct hwrm_stat_ctx_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 stat_ctx_id;
- __le32 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 unused_0[4];
};
-/* Output (176 bytes) */
+/* hwrm_stat_ctx_query_output (size:1408b/176B) */
struct hwrm_stat_ctx_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_err_pkts;
- __le64 tx_drop_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_err_pkts;
- __le64 rx_drop_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 rx_agg_pkts;
- __le64 rx_agg_bytes;
- __le64 rx_agg_events;
- __le64 rx_agg_aborts;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_stat_ctx_clr_stats */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_err_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_err_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 rx_agg_pkts;
+ __le64 rx_agg_bytes;
+ __le64 rx_agg_events;
+ __le64 rx_agg_aborts;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_clr_stats_input (size:192b/24B) */
struct hwrm_stat_ctx_clr_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 stat_ctx_id;
- __le32 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 unused_0[4];
};
-/* Output (16 bytes) */
+/* hwrm_stat_ctx_clr_stats_output (size:128b/16B) */
struct hwrm_stat_ctx_clr_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_fw_reset */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* tx_port_stats (size:3264b/408B) */
+struct tx_port_stats {
+ __le64 tx_64b_frames;
+ __le64 tx_65b_127b_frames;
+ __le64 tx_128b_255b_frames;
+ __le64 tx_256b_511b_frames;
+ __le64 tx_512b_1023b_frames;
+ __le64 tx_1024b_1518_frames;
+ __le64 tx_good_vlan_frames;
+ __le64 tx_1519b_2047_frames;
+ __le64 tx_2048b_4095b_frames;
+ __le64 tx_4096b_9216b_frames;
+ __le64 tx_9217b_16383b_frames;
+ __le64 tx_good_frames;
+ __le64 tx_total_frames;
+ __le64 tx_ucast_frames;
+ __le64 tx_mcast_frames;
+ __le64 tx_bcast_frames;
+ __le64 tx_pause_frames;
+ __le64 tx_pfc_frames;
+ __le64 tx_jabber_frames;
+ __le64 tx_fcs_err_frames;
+ __le64 tx_control_frames;
+ __le64 tx_oversz_frames;
+ __le64 tx_single_dfrl_frames;
+ __le64 tx_multi_dfrl_frames;
+ __le64 tx_single_coll_frames;
+ __le64 tx_multi_coll_frames;
+ __le64 tx_late_coll_frames;
+ __le64 tx_excessive_coll_frames;
+ __le64 tx_frag_frames;
+ __le64 tx_err;
+ __le64 tx_tagged_frames;
+ __le64 tx_dbl_tagged_frames;
+ __le64 tx_runt_frames;
+ __le64 tx_fifo_underruns;
+ __le64 tx_pfc_ena_frames_pri0;
+ __le64 tx_pfc_ena_frames_pri1;
+ __le64 tx_pfc_ena_frames_pri2;
+ __le64 tx_pfc_ena_frames_pri3;
+ __le64 tx_pfc_ena_frames_pri4;
+ __le64 tx_pfc_ena_frames_pri5;
+ __le64 tx_pfc_ena_frames_pri6;
+ __le64 tx_pfc_ena_frames_pri7;
+ __le64 tx_eee_lpi_events;
+ __le64 tx_eee_lpi_duration;
+ __le64 tx_llfc_logical_msgs;
+ __le64 tx_hcfc_msgs;
+ __le64 tx_total_collisions;
+ __le64 tx_bytes;
+ __le64 tx_xthol_frames;
+ __le64 tx_stat_discard;
+ __le64 tx_stat_error;
+};
+
+/* rx_port_stats (size:4224b/528B) */
+struct rx_port_stats {
+ __le64 rx_64b_frames;
+ __le64 rx_65b_127b_frames;
+ __le64 rx_128b_255b_frames;
+ __le64 rx_256b_511b_frames;
+ __le64 rx_512b_1023b_frames;
+ __le64 rx_1024b_1518_frames;
+ __le64 rx_good_vlan_frames;
+ __le64 rx_1519b_2047b_frames;
+ __le64 rx_2048b_4095b_frames;
+ __le64 rx_4096b_9216b_frames;
+ __le64 rx_9217b_16383b_frames;
+ __le64 rx_total_frames;
+ __le64 rx_ucast_frames;
+ __le64 rx_mcast_frames;
+ __le64 rx_bcast_frames;
+ __le64 rx_fcs_err_frames;
+ __le64 rx_ctrl_frames;
+ __le64 rx_pause_frames;
+ __le64 rx_pfc_frames;
+ __le64 rx_unsupported_opcode_frames;
+ __le64 rx_unsupported_da_pausepfc_frames;
+ __le64 rx_wrong_sa_frames;
+ __le64 rx_align_err_frames;
+ __le64 rx_oor_len_frames;
+ __le64 rx_code_err_frames;
+ __le64 rx_false_carrier_frames;
+ __le64 rx_ovrsz_frames;
+ __le64 rx_jbr_frames;
+ __le64 rx_mtu_err_frames;
+ __le64 rx_match_crc_frames;
+ __le64 rx_promiscuous_frames;
+ __le64 rx_tagged_frames;
+ __le64 rx_double_tagged_frames;
+ __le64 rx_trunc_frames;
+ __le64 rx_good_frames;
+ __le64 rx_pfc_xon2xoff_frames_pri0;
+ __le64 rx_pfc_xon2xoff_frames_pri1;
+ __le64 rx_pfc_xon2xoff_frames_pri2;
+ __le64 rx_pfc_xon2xoff_frames_pri3;
+ __le64 rx_pfc_xon2xoff_frames_pri4;
+ __le64 rx_pfc_xon2xoff_frames_pri5;
+ __le64 rx_pfc_xon2xoff_frames_pri6;
+ __le64 rx_pfc_xon2xoff_frames_pri7;
+ __le64 rx_pfc_ena_frames_pri0;
+ __le64 rx_pfc_ena_frames_pri1;
+ __le64 rx_pfc_ena_frames_pri2;
+ __le64 rx_pfc_ena_frames_pri3;
+ __le64 rx_pfc_ena_frames_pri4;
+ __le64 rx_pfc_ena_frames_pri5;
+ __le64 rx_pfc_ena_frames_pri6;
+ __le64 rx_pfc_ena_frames_pri7;
+ __le64 rx_sch_crc_err_frames;
+ __le64 rx_undrsz_frames;
+ __le64 rx_frag_frames;
+ __le64 rx_eee_lpi_events;
+ __le64 rx_eee_lpi_duration;
+ __le64 rx_llfc_physical_msgs;
+ __le64 rx_llfc_logical_msgs;
+ __le64 rx_llfc_msgs_with_crc_err;
+ __le64 rx_hcfc_msgs;
+ __le64 rx_hcfc_msgs_with_crc_err;
+ __le64 rx_bytes;
+ __le64 rx_runt_bytes;
+ __le64 rx_runt_frames;
+ __le64 rx_stat_discard;
+ __le64 rx_stat_err;
+};
+
+/* hwrm_fw_reset_input (size:192b/24B) */
struct hwrm_fw_reset_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 embedded_proc_type;
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
- u8 selfrst_status;
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
- u8 host_idx;
- u8 unused_0[5];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 embedded_proc_type;
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP
+ u8 selfrst_status;
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+ #define FW_RESET_REQ_SELFRST_STATUS_LAST FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST
+ u8 host_idx;
+ u8 unused_0[5];
+};
+
+/* hwrm_fw_reset_output (size:128b/16B) */
struct hwrm_fw_reset_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 selfrst_status;
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
- u8 unused_0;
- __le16 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_fw_qstatus */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 selfrst_status;
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+ #define FW_RESET_RESP_SELFRST_STATUS_LAST FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_fw_qstatus_input (size:192b/24B) */
struct hwrm_fw_qstatus_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 embedded_proc_type;
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
- u8 unused_0[7];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 embedded_proc_type;
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_LAST FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP
+ u8 unused_0[7];
+};
+
+/* hwrm_fw_qstatus_output (size:128b/16B) */
struct hwrm_fw_qstatus_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 selfrst_status;
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
- u8 unused_0;
- __le16 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_fw_set_time */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 selfrst_status;
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_LAST FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_fw_set_time_input (size:256b/32B) */
struct hwrm_fw_set_time_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 year;
- #define FW_SET_TIME_REQ_YEAR_UNKNOWN 0x0UL
- u8 month;
- u8 day;
- u8 hour;
- u8 minute;
- u8 second;
- u8 unused_0;
- __le16 millisecond;
- __le16 zone;
- #define FW_SET_TIME_REQ_ZONE_UTC 0x0UL
- #define FW_SET_TIME_REQ_ZONE_UNKNOWN 0xffffUL
- __le32 unused_1;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 year;
+ #define FW_SET_TIME_REQ_YEAR_UNKNOWN 0x0UL
+ #define FW_SET_TIME_REQ_YEAR_LAST FW_SET_TIME_REQ_YEAR_UNKNOWN
+ u8 month;
+ u8 day;
+ u8 hour;
+ u8 minute;
+ u8 second;
+ u8 unused_0;
+ __le16 millisecond;
+ __le16 zone;
+ #define FW_SET_TIME_REQ_ZONE_UTC 0x0UL
+ #define FW_SET_TIME_REQ_ZONE_UNKNOWN 0xffffUL
+ #define FW_SET_TIME_REQ_ZONE_LAST FW_SET_TIME_REQ_ZONE_UNKNOWN
+ u8 unused_1[4];
+};
+
+/* hwrm_fw_set_time_output (size:128b/16B) */
struct hwrm_fw_set_time_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_fw_set_structured_data */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_struct_hdr (size:128b/16B) */
+struct hwrm_struct_hdr {
+ __le16 struct_id;
+ #define STRUCT_HDR_STRUCT_ID_LLDP_CFG 0x41bUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_ETS 0x41dUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_PFC 0x41fUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_APP 0x421UL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL
+ #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL
+ #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
+ #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
+ #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
+ #define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
+ #define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_RSS_V2
+ __le16 len;
+ u8 version;
+ u8 count;
+ __le16 subtype;
+ __le16 next_offset;
+ #define STRUCT_HDR_NEXT_OFFSET_LAST 0x0UL
+ u8 unused_0[6];
+};
+
+/* hwrm_struct_data_dcbx_app (size:64b/8B) */
+struct hwrm_struct_data_dcbx_app {
+ __be16 protocol_id;
+ u8 protocol_selector;
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_PORT 0x2UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_UDP_PORT 0x3UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_LAST STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT
+ u8 priority;
+ u8 valid;
+ u8 unused_0[3];
+};
+
+/* hwrm_fw_set_structured_data_input (size:256b/32B) */
struct hwrm_fw_set_structured_data_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 src_data_addr;
- __le16 data_len;
- u8 hdr_cnt;
- u8 unused_0[5];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 src_data_addr;
+ __le16 data_len;
+ u8 hdr_cnt;
+ u8 unused_0[5];
+};
+
+/* hwrm_fw_set_structured_data_output (size:128b/16B) */
struct hwrm_fw_set_structured_data_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* Command specific Error Codes (8 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fw_set_structured_data_cmd_err (size:64b/8B) */
struct hwrm_fw_set_structured_data_cmd_err {
- u8 code;
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_HDR_CNT 0x1UL
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_FMT 0x2UL
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
- u8 unused_0[7];
+ u8 code;
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_HDR_CNT 0x1UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_FMT 0x2UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID
+ u8 unused_0[7];
};
-/* hwrm_fw_get_structured_data */
-/* Input (32 bytes) */
+/* hwrm_fw_get_structured_data_input (size:256b/32B) */
struct hwrm_fw_get_structured_data_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 dest_data_addr;
- __le16 data_len;
- __le16 structure_id;
- __le16 subtype;
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_ALL 0xffffUL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_ADMIN 0x100UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_PEER 0x101UL
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 dest_data_addr;
+ __le16 data_len;
+ __le16 structure_id;
+ __le16 subtype;
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_UNUSED 0x0UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_ALL 0xffffUL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_ADMIN 0x100UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_PEER 0x101UL
#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_OPERATIONAL 0x102UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_ADMIN 0x200UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_PEER 0x201UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL 0x300UL
- u8 count;
- u8 unused_0;
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_ADMIN 0x200UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_PEER 0x201UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL 0x300UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_LAST FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL
+ u8 count;
+ u8 unused_0;
};
-/* Output (16 bytes) */
+/* hwrm_fw_get_structured_data_output (size:128b/16B) */
struct hwrm_fw_get_structured_data_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 hdr_cnt;
- u8 unused_0;
- __le16 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* Command specific Error Codes (8 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 hdr_cnt;
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_fw_get_structured_data_cmd_err (size:64b/8B) */
struct hwrm_fw_get_structured_data_cmd_err {
- u8 code;
- #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
- u8 unused_0[7];
+ u8 code;
+ #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
+ #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID
+ u8 unused_0[7];
};
-/* hwrm_exec_fwd_resp */
-/* Input (128 bytes) */
+/* hwrm_exec_fwd_resp_input (size:1024b/128B) */
struct hwrm_exec_fwd_resp_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 encap_request[26];
- __le16 encap_resp_target_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_request[26];
+ __le16 encap_resp_target_id;
+ u8 unused_0[6];
};
-/* Output (16 bytes) */
+/* hwrm_exec_fwd_resp_output (size:128b/16B) */
struct hwrm_exec_fwd_resp_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_reject_fwd_resp */
-/* Input (128 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_reject_fwd_resp_input (size:1024b/128B) */
struct hwrm_reject_fwd_resp_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 encap_request[26];
- __le16 encap_resp_target_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_request[26];
+ __le16 encap_resp_target_id;
+ u8 unused_0[6];
};
-/* Output (16 bytes) */
+/* hwrm_reject_fwd_resp_output (size:128b/16B) */
struct hwrm_reject_fwd_resp_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_fwd_resp */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fwd_resp_input (size:1024b/128B) */
struct hwrm_fwd_resp_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 encap_resp_target_id;
- __le16 encap_resp_cmpl_ring;
- __le16 encap_resp_len;
- u8 unused_0;
- u8 unused_1;
- __le64 encap_resp_addr;
- __le32 encap_resp[24];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 encap_resp_target_id;
+ __le16 encap_resp_cmpl_ring;
+ __le16 encap_resp_len;
+ u8 unused_0;
+ u8 unused_1;
+ __le64 encap_resp_addr;
+ __le32 encap_resp[24];
+};
+
+/* hwrm_fwd_resp_output (size:128b/16B) */
struct hwrm_fwd_resp_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_fwd_async_event_cmpl */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fwd_async_event_cmpl_input (size:320b/40B) */
struct hwrm_fwd_async_event_cmpl_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 encap_async_event_target_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2[3];
- u8 unused_3;
- __le32 encap_async_event_cmpl[4];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 encap_async_event_target_id;
+ u8 unused_0[6];
+ __le32 encap_async_event_cmpl[4];
+};
+
+/* hwrm_fwd_async_event_cmpl_output (size:128b/16B) */
struct hwrm_fwd_async_event_cmpl_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_temp_monitor_query */
-/* Input (16 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_temp_monitor_query_input (size:128b/16B) */
struct hwrm_temp_monitor_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
};
-/* Output (16 bytes) */
+/* hwrm_temp_monitor_query_output (size:128b/16B) */
struct hwrm_temp_monitor_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 temp;
- u8 unused_0;
- __le16 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_wol_filter_alloc */
-/* Input (64 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 temp;
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_wol_filter_alloc_input (size:512b/64B) */
struct hwrm_wol_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- __le32 enables;
- #define WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS 0x1UL
- #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_OFFSET 0x2UL
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS 0x1UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_OFFSET 0x2UL
#define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_SIZE 0x4UL
#define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_ADDR 0x8UL
#define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_ADDR 0x10UL
#define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_SIZE 0x20UL
- __le16 port_id;
- u8 wol_type;
- #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT 0x0UL
- #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_BMP 0x1UL
- #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID 0xffUL
- u8 unused_0;
- __le32 unused_1;
- u8 mac_address[6];
- __le16 pattern_offset;
- __le16 pattern_buf_size;
- __le16 pattern_mask_size;
- __le32 unused_2;
- __le64 pattern_buf_addr;
- __le64 pattern_mask_addr;
-};
-
-/* Output (16 bytes) */
+ __le16 port_id;
+ u8 wol_type;
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT 0x0UL
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_BMP 0x1UL
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID 0xffUL
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_LAST WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID
+ u8 unused_0[5];
+ u8 mac_address[6];
+ __le16 pattern_offset;
+ __le16 pattern_buf_size;
+ __le16 pattern_mask_size;
+ u8 unused_1[4];
+ __le64 pattern_buf_addr;
+ __le64 pattern_mask_addr;
+};
+
+/* hwrm_wol_filter_alloc_output (size:128b/16B) */
struct hwrm_wol_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 wol_filter_id;
- u8 unused_0;
- __le16 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_wol_filter_free */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 wol_filter_id;
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_wol_filter_free_input (size:256b/32B) */
struct hwrm_wol_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
#define WOL_FILTER_FREE_REQ_FLAGS_FREE_ALL_WOL_FILTERS 0x1UL
- __le32 enables;
- #define WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID 0x1UL
- __le16 port_id;
- u8 wol_filter_id;
- u8 unused_0[5];
+ __le32 enables;
+ #define WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID 0x1UL
+ __le16 port_id;
+ u8 wol_filter_id;
+ u8 unused_0[5];
};
-/* Output (16 bytes) */
+/* hwrm_wol_filter_free_output (size:128b/16B) */
struct hwrm_wol_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_wol_filter_qcfg */
-/* Input (56 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_wol_filter_qcfg_input (size:448b/56B) */
struct hwrm_wol_filter_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 handle;
- __le32 unused_0;
- __le64 pattern_buf_addr;
- __le16 pattern_buf_size;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3[3];
- u8 unused_4;
- __le64 pattern_mask_addr;
- __le16 pattern_mask_size;
- __le16 unused_5[3];
-};
-
-/* Output (32 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 handle;
+ u8 unused_0[4];
+ __le64 pattern_buf_addr;
+ __le16 pattern_buf_size;
+ u8 unused_1[6];
+ __le64 pattern_mask_addr;
+ __le16 pattern_mask_size;
+ u8 unused_2[6];
+};
+
+/* hwrm_wol_filter_qcfg_output (size:256b/32B) */
struct hwrm_wol_filter_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 next_handle;
- u8 wol_filter_id;
- u8 wol_type;
- #define WOL_FILTER_QCFG_RESP_WOL_TYPE_MAGICPKT 0x0UL
- #define WOL_FILTER_QCFG_RESP_WOL_TYPE_BMP 0x1UL
- #define WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID 0xffUL
- __le32 unused_0;
- u8 mac_address[6];
- __le16 pattern_offset;
- __le16 pattern_size;
- __le16 pattern_mask_size;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_wol_reason_qcfg */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 next_handle;
+ u8 wol_filter_id;
+ u8 wol_type;
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_MAGICPKT 0x0UL
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_BMP 0x1UL
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID 0xffUL
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_LAST WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID
+ __le32 unused_0;
+ u8 mac_address[6];
+ __le16 pattern_offset;
+ __le16 pattern_size;
+ __le16 pattern_mask_size;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_wol_reason_qcfg_input (size:320b/40B) */
struct hwrm_wol_reason_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2[3];
- u8 unused_3;
- __le64 wol_pkt_buf_addr;
- __le16 wol_pkt_buf_size;
- __le16 unused_4[3];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+ __le64 wol_pkt_buf_addr;
+ __le16 wol_pkt_buf_size;
+ u8 unused_1[6];
+};
+
+/* hwrm_wol_reason_qcfg_output (size:128b/16B) */
struct hwrm_wol_reason_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 wol_filter_id;
- u8 wol_reason;
- #define WOL_REASON_QCFG_RESP_WOL_REASON_MAGICPKT 0x0UL
- #define WOL_REASON_QCFG_RESP_WOL_REASON_BMP 0x1UL
- #define WOL_REASON_QCFG_RESP_WOL_REASON_INVALID 0xffUL
- u8 wol_pkt_len;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_dbg_read_direct */
-/* Input (32 bytes) */
-struct hwrm_dbg_read_direct_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_dest_addr;
- __le32 read_addr;
- __le32 read_len32;
-};
-
-/* Output (16 bytes) */
-struct hwrm_dbg_read_direct_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_nvm_read */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 wol_filter_id;
+ u8 wol_reason;
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_MAGICPKT 0x0UL
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_BMP 0x1UL
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_INVALID 0xffUL
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_LAST WOL_REASON_QCFG_RESP_WOL_REASON_INVALID
+ u8 wol_pkt_len;
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_nvm_read_input (size:320b/40B) */
struct hwrm_nvm_read_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_dest_addr;
- __le16 dir_idx;
- u8 unused_0;
- u8 unused_1;
- __le32 offset;
- __le32 len;
- __le32 unused_2;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le16 dir_idx;
+ u8 unused_0[2];
+ __le32 offset;
+ __le32 len;
+ u8 unused_1[4];
+};
+
+/* hwrm_nvm_read_output (size:128b/16B) */
struct hwrm_nvm_read_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_nvm_get_dir_entries */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dir_entries_input (size:192b/24B) */
struct hwrm_nvm_get_dir_entries_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_dest_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
};
-/* Output (16 bytes) */
+/* hwrm_nvm_get_dir_entries_output (size:128b/16B) */
struct hwrm_nvm_get_dir_entries_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_nvm_get_dir_info */
-/* Input (16 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dir_info_input (size:128b/16B) */
struct hwrm_nvm_get_dir_info_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
};
-/* Output (24 bytes) */
+/* hwrm_nvm_get_dir_info_output (size:192b/24B) */
struct hwrm_nvm_get_dir_info_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 entries;
- __le32 entry_length;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_nvm_write */
-/* Input (48 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 entries;
+ __le32 entry_length;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_write_input (size:384b/48B) */
struct hwrm_nvm_write_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_src_addr;
- __le16 dir_type;
- __le16 dir_ordinal;
- __le16 dir_ext;
- __le16 dir_attr;
- __le32 dir_data_length;
- __le16 option;
- __le16 flags;
- #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL
- __le32 dir_item_length;
- __le32 unused_0;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_src_addr;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ __le16 dir_attr;
+ __le32 dir_data_length;
+ __le16 option;
+ __le16 flags;
+ #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL
+ __le32 dir_item_length;
+ __le32 unused_0;
+};
+
+/* hwrm_nvm_write_output (size:128b/16B) */
struct hwrm_nvm_write_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 dir_item_length;
- __le16 dir_idx;
- u8 unused_0;
- u8 valid;
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 dir_item_length;
+ __le16 dir_idx;
+ u8 unused_0;
+ u8 valid;
};
-/* Command specific Error Codes (8 bytes) */
+/* hwrm_nvm_write_cmd_err (size:64b/8B) */
struct hwrm_nvm_write_cmd_err {
- u8 code;
- #define NVM_WRITE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_WRITE_CMD_ERR_CODE_FRAG_ERR 0x1UL
- #define NVM_WRITE_CMD_ERR_CODE_NO_SPACE 0x2UL
- u8 unused_0[7];
+ u8 code;
+ #define NVM_WRITE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_WRITE_CMD_ERR_CODE_FRAG_ERR 0x1UL
+ #define NVM_WRITE_CMD_ERR_CODE_NO_SPACE 0x2UL
+ #define NVM_WRITE_CMD_ERR_CODE_LAST NVM_WRITE_CMD_ERR_CODE_NO_SPACE
+ u8 unused_0[7];
};
-/* hwrm_nvm_modify */
-/* Input (40 bytes) */
+/* hwrm_nvm_modify_input (size:320b/40B) */
struct hwrm_nvm_modify_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_src_addr;
- __le16 dir_idx;
- u8 unused_0;
- u8 unused_1;
- __le32 offset;
- __le32 len;
- __le32 unused_2;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_src_addr;
+ __le16 dir_idx;
+ u8 unused_0[2];
+ __le32 offset;
+ __le32 len;
+ u8 unused_1[4];
+};
+
+/* hwrm_nvm_modify_output (size:128b/16B) */
struct hwrm_nvm_modify_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_nvm_find_dir_entry */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_find_dir_entry_input (size:256b/32B) */
struct hwrm_nvm_find_dir_entry_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID 0x1UL
- __le16 dir_idx;
- __le16 dir_type;
- __le16 dir_ordinal;
- __le16 dir_ext;
- u8 opt_ordinal;
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK 0x3UL
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT 0
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ 0x0UL
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE 0x1UL
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT 0x2UL
- u8 unused_1[3];
-};
-
-/* Output (32 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID 0x1UL
+ __le16 dir_idx;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ u8 opt_ordinal;
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK 0x3UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT 0
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ 0x0UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE 0x1UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT 0x2UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_LAST NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT
+ u8 unused_0[3];
+};
+
+/* hwrm_nvm_find_dir_entry_output (size:256b/32B) */
struct hwrm_nvm_find_dir_entry_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 dir_item_length;
- __le32 dir_data_length;
- __le32 fw_ver;
- __le16 dir_ordinal;
- __le16 dir_idx;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_nvm_erase_dir_entry */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 dir_item_length;
+ __le32 dir_data_length;
+ __le32 fw_ver;
+ __le16 dir_ordinal;
+ __le16 dir_idx;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_erase_dir_entry_input (size:192b/24B) */
struct hwrm_nvm_erase_dir_entry_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 dir_idx;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 dir_idx;
+ u8 unused_0[6];
};
-/* Output (16 bytes) */
+/* hwrm_nvm_erase_dir_entry_output (size:128b/16B) */
struct hwrm_nvm_erase_dir_entry_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_nvm_get_dev_info */
-/* Input (16 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dev_info_input (size:128b/16B) */
struct hwrm_nvm_get_dev_info_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
};
-/* Output (32 bytes) */
+/* hwrm_nvm_get_dev_info_output (size:256b/32B) */
struct hwrm_nvm_get_dev_info_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 manufacturer_id;
- __le16 device_id;
- __le32 sector_size;
- __le32 nvram_size;
- __le32 reserved_size;
- __le32 available_size;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_nvm_mod_dir_entry */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 manufacturer_id;
+ __le16 device_id;
+ __le32 sector_size;
+ __le32 nvram_size;
+ __le32 reserved_size;
+ __le32 available_size;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_nvm_mod_dir_entry_input (size:256b/32B) */
struct hwrm_nvm_mod_dir_entry_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM 0x1UL
- __le16 dir_idx;
- __le16 dir_ordinal;
- __le16 dir_ext;
- __le16 dir_attr;
- __le32 checksum;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM 0x1UL
+ __le16 dir_idx;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ __le16 dir_attr;
+ __le32 checksum;
+};
+
+/* hwrm_nvm_mod_dir_entry_output (size:128b/16B) */
struct hwrm_nvm_mod_dir_entry_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_nvm_verify_update */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_verify_update_input (size:192b/24B) */
struct hwrm_nvm_verify_update_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 dir_type;
- __le16 dir_ordinal;
- __le16 dir_ext;
- __le16 unused_0;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ u8 unused_0[2];
+};
+
+/* hwrm_nvm_verify_update_output (size:128b/16B) */
struct hwrm_nvm_verify_update_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_nvm_install_update */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_install_update_input (size:192b/24B) */
struct hwrm_nvm_install_update_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 install_type;
- #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL
- #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL 0xffffffffUL
- __le16 flags;
- #define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE 0x1UL
- #define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG 0x2UL
- #define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG 0x4UL
- __le16 unused_0;
-};
-
-/* Output (24 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 install_type;
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL 0xffffffffUL
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_LAST NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL
+ __le16 flags;
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE 0x1UL
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG 0x2UL
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG 0x4UL
+ u8 unused_0[2];
+};
+
+/* hwrm_nvm_install_update_output (size:192b/24B) */
struct hwrm_nvm_install_update_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 installed_items;
- u8 result;
- #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL
- u8 problem_item;
- #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE 0x0UL
- #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE 0xffUL
- u8 reset_required;
- #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_NONE 0x0UL
- #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_PCI 0x1UL
- #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER 0x2UL
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* Command specific Error Codes (8 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 installed_items;
+ u8 result;
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_LAST NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS
+ u8 problem_item;
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE 0xffUL
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_LAST NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE
+ u8 reset_required;
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_NONE 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_PCI 0x1UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER 0x2UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_LAST NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_nvm_install_update_cmd_err (size:64b/8B) */
struct hwrm_nvm_install_update_cmd_err {
- u8 code;
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL
- u8 unused_0[7];
+ u8 code;
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE
+ u8 unused_0[7];
};
-/* hwrm_nvm_get_variable */
-/* Input (40 bytes) */
+/* hwrm_nvm_get_variable_input (size:320b/40B) */
struct hwrm_nvm_get_variable_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 dest_data_addr;
- __le16 data_len;
- __le16 option_num;
- #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
- #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
- __le16 dimensions;
- __le16 index_0;
- __le16 index_1;
- __le16 index_2;
- __le16 index_3;
- u8 flags;
- #define NVM_GET_VARIABLE_REQ_FLAGS_FACTORY_DFLT 0x1UL
- u8 unused_0;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 dest_data_addr;
+ __le16 data_len;
+ __le16 option_num;
+ #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
+ #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
+ #define NVM_GET_VARIABLE_REQ_OPTION_NUM_LAST NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF
+ __le16 dimensions;
+ __le16 index_0;
+ __le16 index_1;
+ __le16 index_2;
+ __le16 index_3;
+ u8 flags;
+ #define NVM_GET_VARIABLE_REQ_FLAGS_FACTORY_DFLT 0x1UL
+ u8 unused_0;
+};
+
+/* hwrm_nvm_get_variable_output (size:128b/16B) */
struct hwrm_nvm_get_variable_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 data_len;
- __le16 option_num;
- #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_0 0x0UL
- #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF 0xffffUL
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* Command specific Error Codes (8 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 data_len;
+ __le16 option_num;
+ #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_0 0x0UL
+ #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF 0xffffUL
+ #define NVM_GET_VARIABLE_RESP_OPTION_NUM_LAST NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_variable_cmd_err (size:64b/8B) */
struct hwrm_nvm_get_variable_cmd_err {
- u8 code;
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL
- u8 unused_0[7];
+ u8 code;
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_LAST NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT
+ u8 unused_0[7];
};
-/* hwrm_nvm_set_variable */
-/* Input (40 bytes) */
+/* hwrm_nvm_set_variable_input (size:320b/40B) */
struct hwrm_nvm_set_variable_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 src_data_addr;
- __le16 data_len;
- __le16 option_num;
- #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
- #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
- __le16 dimensions;
- __le16 index_0;
- __le16 index_1;
- __le16 index_2;
- __le16 index_3;
- u8 flags;
- #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1)
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1)
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1
- u8 unused_0;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 src_data_addr;
+ __le16 data_len;
+ __le16 option_num;
+ #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
+ #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
+ #define NVM_SET_VARIABLE_REQ_OPTION_NUM_LAST NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF
+ __le16 dimensions;
+ __le16 index_0;
+ __le16 index_1;
+ __le16 index_2;
+ __le16 index_3;
+ u8 flags;
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1
+ u8 unused_0;
+};
+
+/* hwrm_nvm_set_variable_output (size:128b/16B) */
struct hwrm_nvm_set_variable_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* Command specific Error Codes (8 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_set_variable_cmd_err (size:64b/8B) */
struct hwrm_nvm_set_variable_cmd_err {
- u8 code;
- #define NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
- #define NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
- u8 unused_0[7];
+ u8 code;
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_LAST NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR
+ u8 unused_0[7];
};
-/* hwrm_selftest_qlist */
-/* Input (16 bytes) */
+/* hwrm_selftest_qlist_input (size:128b/16B) */
struct hwrm_selftest_qlist_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
};
-/* Output (280 bytes) */
+/* hwrm_selftest_qlist_output (size:2240b/280B) */
struct hwrm_selftest_qlist_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 num_tests;
- u8 available_tests;
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_NVM_TEST 0x1UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_SERDES_TEST 0x20UL
- u8 offline_tests;
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_SERDES_TEST 0x20UL
- u8 unused_0;
- __le16 test_timeout;
- u8 unused_1;
- u8 unused_2;
- char test0_name[32];
- char test1_name[32];
- char test2_name[32];
- char test3_name[32];
- char test4_name[32];
- char test5_name[32];
- char test6_name[32];
- char test7_name[32];
- __le32 unused_3;
- u8 unused_4;
- u8 unused_5;
- u8 unused_6;
- u8 valid;
-};
-
-/* hwrm_selftest_exec */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_tests;
+ u8 available_tests;
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_SERDES_TEST 0x20UL
+ u8 offline_tests;
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_SERDES_TEST 0x20UL
+ u8 unused_0;
+ __le16 test_timeout;
+ u8 unused_1[2];
+ char test0_name[32];
+ char test1_name[32];
+ char test2_name[32];
+ char test3_name[32];
+ char test4_name[32];
+ char test5_name[32];
+ char test6_name[32];
+ char test7_name[32];
+ u8 unused_2[7];
+ u8 valid;
+};
+
+/* hwrm_selftest_exec_input (size:192b/24B) */
struct hwrm_selftest_exec_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 flags;
- #define SELFTEST_EXEC_REQ_FLAGS_NVM_TEST 0x1UL
- #define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL
- #define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL
- #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL
- #define SELFTEST_EXEC_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL
- u8 unused_0[7];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 flags;
+ #define SELFTEST_EXEC_REQ_FLAGS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL
+ #define SELFTEST_EXEC_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL
+ u8 pcie_lane_num;
+ u8 unused_0[6];
+};
+
+/* hwrm_selftest_exec_output (size:128b/16B) */
struct hwrm_selftest_exec_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 requested_tests;
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_NVM_TEST 0x1UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_SERDES_TEST 0x20UL
- u8 test_success;
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_SERDES_TEST 0x20UL
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_selftest_irq */
-/* Input (16 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 requested_tests;
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_SERDES_TEST 0x20UL
+ u8 test_success;
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_SERDES_TEST 0x20UL
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_selftest_irq_input (size:128b/16B) */
struct hwrm_selftest_irq_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
};
-/* Output (16 bytes) */
+/* hwrm_selftest_irq_output (size:128b/16B) */
struct hwrm_selftest_irq_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_selftest_retrieve_serdes_data */
-/* Input (32 bytes) */
-struct hwrm_selftest_retrieve_serdes_data_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 resp_data_addr;
- __le32 resp_data_offset;
- __le16 data_len;
- u8 flags;
- #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_UNUSED_TEST_MASK 0xfUL
- #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_UNUSED_TEST_SFT 0
- #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL
- u8 unused_0;
-};
-
-/* Output (16 bytes) */
-struct hwrm_selftest_retrieve_serdes_data_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 total_data_len;
- __le16 copied_data_len;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* Hardware Resource Manager Specification */
-/* Input (16 bytes) */
-struct input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* Output (8 bytes) */
-struct output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
-};
-
-/* Short Command Structure (16 bytes) */
-struct hwrm_short_input {
- __le16 req_type;
- __le16 signature;
- #define SHORT_REQ_SIGNATURE_SHORT_CMD 0x4321UL
- __le16 unused_0;
- __le16 size;
- __le64 req_addr;
-};
-
-/* Command numbering (8 bytes) */
-struct cmd_nums {
- __le16 req_type;
- #define HWRM_VER_GET (0x0UL)
- #define HWRM_FUNC_BUF_UNRGTR (0xeUL)
- #define HWRM_FUNC_VF_CFG (0xfUL)
- #define RESERVED1 (0x10UL)
- #define HWRM_FUNC_RESET (0x11UL)
- #define HWRM_FUNC_GETFID (0x12UL)
- #define HWRM_FUNC_VF_ALLOC (0x13UL)
- #define HWRM_FUNC_VF_FREE (0x14UL)
- #define HWRM_FUNC_QCAPS (0x15UL)
- #define HWRM_FUNC_QCFG (0x16UL)
- #define HWRM_FUNC_CFG (0x17UL)
- #define HWRM_FUNC_QSTATS (0x18UL)
- #define HWRM_FUNC_CLR_STATS (0x19UL)
- #define HWRM_FUNC_DRV_UNRGTR (0x1aUL)
- #define HWRM_FUNC_VF_RESC_FREE (0x1bUL)
- #define HWRM_FUNC_VF_VNIC_IDS_QUERY (0x1cUL)
- #define HWRM_FUNC_DRV_RGTR (0x1dUL)
- #define HWRM_FUNC_DRV_QVER (0x1eUL)
- #define HWRM_FUNC_BUF_RGTR (0x1fUL)
- #define HWRM_PORT_PHY_CFG (0x20UL)
- #define HWRM_PORT_MAC_CFG (0x21UL)
- #define HWRM_PORT_TS_QUERY (0x22UL)
- #define HWRM_PORT_QSTATS (0x23UL)
- #define HWRM_PORT_LPBK_QSTATS (0x24UL)
- #define HWRM_PORT_CLR_STATS (0x25UL)
- #define HWRM_PORT_LPBK_CLR_STATS (0x26UL)
- #define HWRM_PORT_PHY_QCFG (0x27UL)
- #define HWRM_PORT_MAC_QCFG (0x28UL)
- #define HWRM_PORT_MAC_PTP_QCFG (0x29UL)
- #define HWRM_PORT_PHY_QCAPS (0x2aUL)
- #define HWRM_PORT_PHY_I2C_WRITE (0x2bUL)
- #define HWRM_PORT_PHY_I2C_READ (0x2cUL)
- #define HWRM_PORT_LED_CFG (0x2dUL)
- #define HWRM_PORT_LED_QCFG (0x2eUL)
- #define HWRM_PORT_LED_QCAPS (0x2fUL)
- #define HWRM_QUEUE_QPORTCFG (0x30UL)
- #define HWRM_QUEUE_QCFG (0x31UL)
- #define HWRM_QUEUE_CFG (0x32UL)
- #define HWRM_FUNC_VLAN_CFG (0x33UL)
- #define HWRM_FUNC_VLAN_QCFG (0x34UL)
- #define HWRM_QUEUE_PFCENABLE_QCFG (0x35UL)
- #define HWRM_QUEUE_PFCENABLE_CFG (0x36UL)
- #define HWRM_QUEUE_PRI2COS_QCFG (0x37UL)
- #define HWRM_QUEUE_PRI2COS_CFG (0x38UL)
- #define HWRM_QUEUE_COS2BW_QCFG (0x39UL)
- #define HWRM_QUEUE_COS2BW_CFG (0x3aUL)
- #define HWRM_QUEUE_DSCP_QCAPS (0x3bUL)
- #define HWRM_QUEUE_DSCP2PRI_QCFG (0x3cUL)
- #define HWRM_QUEUE_DSCP2PRI_CFG (0x3dUL)
- #define HWRM_VNIC_ALLOC (0x40UL)
- #define HWRM_VNIC_FREE (0x41UL)
- #define HWRM_VNIC_CFG (0x42UL)
- #define HWRM_VNIC_QCFG (0x43UL)
- #define HWRM_VNIC_TPA_CFG (0x44UL)
- #define HWRM_VNIC_TPA_QCFG (0x45UL)
- #define HWRM_VNIC_RSS_CFG (0x46UL)
- #define HWRM_VNIC_RSS_QCFG (0x47UL)
- #define HWRM_VNIC_PLCMODES_CFG (0x48UL)
- #define HWRM_VNIC_PLCMODES_QCFG (0x49UL)
- #define HWRM_VNIC_QCAPS (0x4aUL)
- #define HWRM_RING_ALLOC (0x50UL)
- #define HWRM_RING_FREE (0x51UL)
- #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS (0x52UL)
- #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS (0x53UL)
- #define HWRM_RING_RESET (0x5eUL)
- #define HWRM_RING_GRP_ALLOC (0x60UL)
- #define HWRM_RING_GRP_FREE (0x61UL)
- #define RESERVED5 (0x64UL)
- #define RESERVED6 (0x65UL)
- #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC (0x70UL)
- #define HWRM_VNIC_RSS_COS_LB_CTX_FREE (0x71UL)
- #define HWRM_CFA_L2_FILTER_ALLOC (0x90UL)
- #define HWRM_CFA_L2_FILTER_FREE (0x91UL)
- #define HWRM_CFA_L2_FILTER_CFG (0x92UL)
- #define HWRM_CFA_L2_SET_RX_MASK (0x93UL)
- #define HWRM_CFA_VLAN_ANTISPOOF_CFG (0x94UL)
- #define HWRM_CFA_TUNNEL_FILTER_ALLOC (0x95UL)
- #define HWRM_CFA_TUNNEL_FILTER_FREE (0x96UL)
- #define HWRM_CFA_ENCAP_RECORD_ALLOC (0x97UL)
- #define HWRM_CFA_ENCAP_RECORD_FREE (0x98UL)
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC (0x99UL)
- #define HWRM_CFA_NTUPLE_FILTER_FREE (0x9aUL)
- #define HWRM_CFA_NTUPLE_FILTER_CFG (0x9bUL)
- #define HWRM_CFA_EM_FLOW_ALLOC (0x9cUL)
- #define HWRM_CFA_EM_FLOW_FREE (0x9dUL)
- #define HWRM_CFA_EM_FLOW_CFG (0x9eUL)
- #define HWRM_TUNNEL_DST_PORT_QUERY (0xa0UL)
- #define HWRM_TUNNEL_DST_PORT_ALLOC (0xa1UL)
- #define HWRM_TUNNEL_DST_PORT_FREE (0xa2UL)
- #define HWRM_STAT_CTX_ALLOC (0xb0UL)
- #define HWRM_STAT_CTX_FREE (0xb1UL)
- #define HWRM_STAT_CTX_QUERY (0xb2UL)
- #define HWRM_STAT_CTX_CLR_STATS (0xb3UL)
- #define HWRM_FW_RESET (0xc0UL)
- #define HWRM_FW_QSTATUS (0xc1UL)
- #define HWRM_FW_SET_TIME (0xc8UL)
- #define HWRM_FW_GET_TIME (0xc9UL)
- #define HWRM_FW_SET_STRUCTURED_DATA (0xcaUL)
- #define HWRM_FW_GET_STRUCTURED_DATA (0xcbUL)
- #define HWRM_FW_IPC_MAILBOX (0xccUL)
- #define HWRM_EXEC_FWD_RESP (0xd0UL)
- #define HWRM_REJECT_FWD_RESP (0xd1UL)
- #define HWRM_FWD_RESP (0xd2UL)
- #define HWRM_FWD_ASYNC_EVENT_CMPL (0xd3UL)
- #define HWRM_TEMP_MONITOR_QUERY (0xe0UL)
- #define HWRM_WOL_FILTER_ALLOC (0xf0UL)
- #define HWRM_WOL_FILTER_FREE (0xf1UL)
- #define HWRM_WOL_FILTER_QCFG (0xf2UL)
- #define HWRM_WOL_REASON_QCFG (0xf3UL)
- #define HWRM_CFA_METER_PROFILE_ALLOC (0xf5UL)
- #define HWRM_CFA_METER_PROFILE_FREE (0xf6UL)
- #define HWRM_CFA_METER_PROFILE_CFG (0xf7UL)
- #define HWRM_CFA_METER_INSTANCE_ALLOC (0xf8UL)
- #define HWRM_CFA_METER_INSTANCE_FREE (0xf9UL)
- #define HWRM_CFA_VFR_ALLOC (0xfdUL)
- #define HWRM_CFA_VFR_FREE (0xfeUL)
- #define HWRM_CFA_VF_PAIR_ALLOC (0x100UL)
- #define HWRM_CFA_VF_PAIR_FREE (0x101UL)
- #define HWRM_CFA_VF_PAIR_INFO (0x102UL)
- #define HWRM_CFA_FLOW_ALLOC (0x103UL)
- #define HWRM_CFA_FLOW_FREE (0x104UL)
- #define HWRM_CFA_FLOW_FLUSH (0x105UL)
- #define HWRM_CFA_FLOW_STATS (0x106UL)
- #define HWRM_CFA_FLOW_INFO (0x107UL)
- #define HWRM_CFA_DECAP_FILTER_ALLOC (0x108UL)
- #define HWRM_CFA_DECAP_FILTER_FREE (0x109UL)
- #define HWRM_CFA_VLAN_ANTISPOOF_QCFG (0x10aUL)
- #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC (0x10bUL)
- #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE (0x10cUL)
- #define HWRM_CFA_PAIR_ALLOC (0x10dUL)
- #define HWRM_CFA_PAIR_FREE (0x10eUL)
- #define HWRM_CFA_PAIR_INFO (0x10fUL)
- #define HWRM_FW_IPC_MSG (0x110UL)
- #define HWRM_SELFTEST_QLIST (0x200UL)
- #define HWRM_SELFTEST_EXEC (0x201UL)
- #define HWRM_SELFTEST_IRQ (0x202UL)
- #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA (0x203UL)
- #define HWRM_DBG_READ_DIRECT (0xff10UL)
- #define HWRM_DBG_READ_INDIRECT (0xff11UL)
- #define HWRM_DBG_WRITE_DIRECT (0xff12UL)
- #define HWRM_DBG_WRITE_INDIRECT (0xff13UL)
- #define HWRM_DBG_DUMP (0xff14UL)
- #define HWRM_DBG_ERASE_NVM (0xff15UL)
- #define HWRM_DBG_CFG (0xff16UL)
- #define HWRM_DBG_COREDUMP_LIST (0xff17UL)
- #define HWRM_DBG_COREDUMP_INITIATE (0xff18UL)
- #define HWRM_DBG_COREDUMP_RETRIEVE (0xff19UL)
- #define HWRM_NVM_FACTORY_DEFAULTS (0xffeeUL)
- #define HWRM_NVM_VALIDATE_OPTION (0xffefUL)
- #define HWRM_NVM_FLUSH (0xfff0UL)
- #define HWRM_NVM_GET_VARIABLE (0xfff1UL)
- #define HWRM_NVM_SET_VARIABLE (0xfff2UL)
- #define HWRM_NVM_INSTALL_UPDATE (0xfff3UL)
- #define HWRM_NVM_MODIFY (0xfff4UL)
- #define HWRM_NVM_VERIFY_UPDATE (0xfff5UL)
- #define HWRM_NVM_GET_DEV_INFO (0xfff6UL)
- #define HWRM_NVM_ERASE_DIR_ENTRY (0xfff7UL)
- #define HWRM_NVM_MOD_DIR_ENTRY (0xfff8UL)
- #define HWRM_NVM_FIND_DIR_ENTRY (0xfff9UL)
- #define HWRM_NVM_GET_DIR_ENTRIES (0xfffaUL)
- #define HWRM_NVM_GET_DIR_INFO (0xfffbUL)
- #define HWRM_NVM_RAW_DUMP (0xfffcUL)
- #define HWRM_NVM_READ (0xfffdUL)
- #define HWRM_NVM_WRITE (0xfffeUL)
- #define HWRM_NVM_RAW_WRITE_BLK (0xffffUL)
- __le16 unused_0[3];
-};
-
-/* Return Codes (8 bytes) */
-struct ret_codes {
- __le16 error_code;
- #define HWRM_ERR_CODE_SUCCESS (0x0UL)
- #define HWRM_ERR_CODE_FAIL (0x1UL)
- #define HWRM_ERR_CODE_INVALID_PARAMS (0x2UL)
- #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED (0x3UL)
- #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR (0x4UL)
- #define HWRM_ERR_CODE_INVALID_FLAGS (0x5UL)
- #define HWRM_ERR_CODE_INVALID_ENABLES (0x6UL)
- #define HWRM_ERR_CODE_HWRM_ERROR (0xfUL)
- #define HWRM_ERR_CODE_UNKNOWN_ERR (0xfffeUL)
- #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED (0xffffUL)
- __le16 unused_0[3];
-};
-
-/* Output (16 bytes) */
-struct hwrm_err_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 opaque_0;
- __le16 opaque_1;
- u8 cmd_err;
- u8 valid;
-};
-
-/* Port Tx Statistics Formats (408 bytes) */
-struct tx_port_stats {
- __le64 tx_64b_frames;
- __le64 tx_65b_127b_frames;
- __le64 tx_128b_255b_frames;
- __le64 tx_256b_511b_frames;
- __le64 tx_512b_1023b_frames;
- __le64 tx_1024b_1518_frames;
- __le64 tx_good_vlan_frames;
- __le64 tx_1519b_2047_frames;
- __le64 tx_2048b_4095b_frames;
- __le64 tx_4096b_9216b_frames;
- __le64 tx_9217b_16383b_frames;
- __le64 tx_good_frames;
- __le64 tx_total_frames;
- __le64 tx_ucast_frames;
- __le64 tx_mcast_frames;
- __le64 tx_bcast_frames;
- __le64 tx_pause_frames;
- __le64 tx_pfc_frames;
- __le64 tx_jabber_frames;
- __le64 tx_fcs_err_frames;
- __le64 tx_control_frames;
- __le64 tx_oversz_frames;
- __le64 tx_single_dfrl_frames;
- __le64 tx_multi_dfrl_frames;
- __le64 tx_single_coll_frames;
- __le64 tx_multi_coll_frames;
- __le64 tx_late_coll_frames;
- __le64 tx_excessive_coll_frames;
- __le64 tx_frag_frames;
- __le64 tx_err;
- __le64 tx_tagged_frames;
- __le64 tx_dbl_tagged_frames;
- __le64 tx_runt_frames;
- __le64 tx_fifo_underruns;
- __le64 tx_pfc_ena_frames_pri0;
- __le64 tx_pfc_ena_frames_pri1;
- __le64 tx_pfc_ena_frames_pri2;
- __le64 tx_pfc_ena_frames_pri3;
- __le64 tx_pfc_ena_frames_pri4;
- __le64 tx_pfc_ena_frames_pri5;
- __le64 tx_pfc_ena_frames_pri6;
- __le64 tx_pfc_ena_frames_pri7;
- __le64 tx_eee_lpi_events;
- __le64 tx_eee_lpi_duration;
- __le64 tx_llfc_logical_msgs;
- __le64 tx_hcfc_msgs;
- __le64 tx_total_collisions;
- __le64 tx_bytes;
- __le64 tx_xthol_frames;
- __le64 tx_stat_discard;
- __le64 tx_stat_error;
-};
-
-/* Port Rx Statistics Formats (528 bytes) */
-struct rx_port_stats {
- __le64 rx_64b_frames;
- __le64 rx_65b_127b_frames;
- __le64 rx_128b_255b_frames;
- __le64 rx_256b_511b_frames;
- __le64 rx_512b_1023b_frames;
- __le64 rx_1024b_1518_frames;
- __le64 rx_good_vlan_frames;
- __le64 rx_1519b_2047b_frames;
- __le64 rx_2048b_4095b_frames;
- __le64 rx_4096b_9216b_frames;
- __le64 rx_9217b_16383b_frames;
- __le64 rx_total_frames;
- __le64 rx_ucast_frames;
- __le64 rx_mcast_frames;
- __le64 rx_bcast_frames;
- __le64 rx_fcs_err_frames;
- __le64 rx_ctrl_frames;
- __le64 rx_pause_frames;
- __le64 rx_pfc_frames;
- __le64 rx_unsupported_opcode_frames;
- __le64 rx_unsupported_da_pausepfc_frames;
- __le64 rx_wrong_sa_frames;
- __le64 rx_align_err_frames;
- __le64 rx_oor_len_frames;
- __le64 rx_code_err_frames;
- __le64 rx_false_carrier_frames;
- __le64 rx_ovrsz_frames;
- __le64 rx_jbr_frames;
- __le64 rx_mtu_err_frames;
- __le64 rx_match_crc_frames;
- __le64 rx_promiscuous_frames;
- __le64 rx_tagged_frames;
- __le64 rx_double_tagged_frames;
- __le64 rx_trunc_frames;
- __le64 rx_good_frames;
- __le64 rx_pfc_xon2xoff_frames_pri0;
- __le64 rx_pfc_xon2xoff_frames_pri1;
- __le64 rx_pfc_xon2xoff_frames_pri2;
- __le64 rx_pfc_xon2xoff_frames_pri3;
- __le64 rx_pfc_xon2xoff_frames_pri4;
- __le64 rx_pfc_xon2xoff_frames_pri5;
- __le64 rx_pfc_xon2xoff_frames_pri6;
- __le64 rx_pfc_xon2xoff_frames_pri7;
- __le64 rx_pfc_ena_frames_pri0;
- __le64 rx_pfc_ena_frames_pri1;
- __le64 rx_pfc_ena_frames_pri2;
- __le64 rx_pfc_ena_frames_pri3;
- __le64 rx_pfc_ena_frames_pri4;
- __le64 rx_pfc_ena_frames_pri5;
- __le64 rx_pfc_ena_frames_pri6;
- __le64 rx_pfc_ena_frames_pri7;
- __le64 rx_sch_crc_err_frames;
- __le64 rx_undrsz_frames;
- __le64 rx_frag_frames;
- __le64 rx_eee_lpi_events;
- __le64 rx_eee_lpi_duration;
- __le64 rx_llfc_physical_msgs;
- __le64 rx_llfc_logical_msgs;
- __le64 rx_llfc_msgs_with_crc_err;
- __le64 rx_hcfc_msgs;
- __le64 rx_hcfc_msgs_with_crc_err;
- __le64 rx_bytes;
- __le64 rx_runt_bytes;
- __le64 rx_runt_frames;
- __le64 rx_stat_discard;
- __le64 rx_stat_err;
-};
-
-/* VXLAN IPv4 encapsulation structure (16 bytes) */
-struct hwrm_vxlan_ipv4_hdr {
- u8 ver_hlen;
- #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_MASK 0xfUL
- #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT 0
- #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_MASK 0xf0UL
- #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT 4
- u8 tos;
- __be16 ip_id;
- __be16 flags_frag_offset;
- u8 ttl;
- u8 protocol;
- __be32 src_ip_addr;
- __be32 dest_ip_addr;
-};
-
-/* VXLAN IPv6 encapsulation structure (32 bytes) */
-struct hwrm_vxlan_ipv6_hdr {
- __be32 ver_tc_flow_label;
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT 0x1cUL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_MASK 0xf0000000UL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_SFT 0x14UL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_MASK 0xff00000UL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_SFT 0x0UL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK 0xfffffUL
- __be16 payload_len;
- u8 next_hdr;
- u8 ttl;
- __be32 src_ip_addr[4];
- __be32 dest_ip_addr[4];
-};
-
-/* VXLAN encapsulation structure (72 bytes) */
-struct hwrm_cfa_encap_data_vxlan {
- u8 src_mac_addr[6];
- __le16 unused_0;
- u8 dst_mac_addr[6];
- u8 num_vlan_tags;
- u8 unused_1;
- __be16 ovlan_tpid;
- __be16 ovlan_tci;
- __be16 ivlan_tpid;
- __be16 ivlan_tci;
- __le32 l3[10];
- #define CFA_ENCAP_DATA_VXLAN_L3_VER_MASK 0xfUL
- #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV4 0x4UL
- #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 0x6UL
- __be16 src_port;
- __be16 dst_port;
- __be32 vni;
-};
-
-/* Periodic Statistics Context DMA to host (160 bytes) */
-struct ctx_hw_stats {
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_discard_pkts;
- __le64 rx_drop_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_discard_pkts;
- __le64 tx_drop_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 tpa_pkts;
- __le64 tpa_bytes;
- __le64 tpa_events;
- __le64 tpa_aborts;
-};
-
-/* Structure data header (16 bytes) */
-struct hwrm_struct_hdr {
- __le16 struct_id;
- #define STRUCT_HDR_STRUCT_ID_LLDP_CFG 0x41bUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_ETS 0x41dUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_PFC 0x41fUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_APP 0x421UL
- #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL
- #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL
- #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
- #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
- #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
- #define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
- __le16 len;
- u8 version;
- u8 count;
- __le16 subtype;
- __le16 next_offset;
- #define STRUCT_HDR_NEXT_OFFSET_LAST 0x0UL
- __le16 unused_0[3];
-};
-
-/* DCBX Application configuration structure (1057) (8 bytes) */
-struct hwrm_struct_data_dcbx_app {
- __be16 protocol_id;
- u8 protocol_selector;
- #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL
- #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_PORT 0x2UL
- #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_UDP_PORT 0x3UL
- #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL
- u8 priority;
- u8 valid;
- u8 unused_0[3];
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
};
-#endif
+#endif /* _BNXT_HSI_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index c9617675f934..d87faad901fe 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -135,7 +135,10 @@ int bnxt_get_vf_config(struct net_device *dev, int vf_id,
ivi->vf = vf_id;
vf = &bp->pf.vf[vf_id];
- memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
+ if (is_valid_ether_addr(vf->mac_addr))
+ memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
+ else
+ memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN);
ivi->max_tx_rate = vf->max_tx_rate;
ivi->min_tx_rate = vf->min_tx_rate;
ivi->vlan = vf->vlan;
@@ -416,29 +419,126 @@ static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
-/* only call by PF to reserve resources for VF */
+/* Only called by PF to reserve resources for VFs, returns actual number of
+ * VFs configured, or < 0 on error.
+ */
+static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
+{
+ struct hwrm_func_vf_resource_cfg_input req = {0};
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ u16 vf_tx_rings, vf_rx_rings, vf_cp_rings;
+ u16 vf_stat_ctx, vf_vnics, vf_ring_grps;
+ struct bnxt_pf_info *pf = &bp->pf;
+ int i, rc = 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
+
+ vf_cp_rings = hw_resc->max_cp_rings - bp->cp_nr_rings;
+ vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
+ else
+ vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings;
+ vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings;
+ vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings;
+ vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
+ vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
+
+ req.min_rsscos_ctx = cpu_to_le16(1);
+ req.max_rsscos_ctx = cpu_to_le16(1);
+ if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL) {
+ req.min_cmpl_rings = cpu_to_le16(1);
+ req.min_tx_rings = cpu_to_le16(1);
+ req.min_rx_rings = cpu_to_le16(1);
+ req.min_l2_ctxs = cpu_to_le16(1);
+ req.min_vnics = cpu_to_le16(1);
+ req.min_stat_ctx = cpu_to_le16(1);
+ req.min_hw_ring_grps = cpu_to_le16(1);
+ } else {
+ vf_cp_rings /= num_vfs;
+ vf_tx_rings /= num_vfs;
+ vf_rx_rings /= num_vfs;
+ vf_vnics /= num_vfs;
+ vf_stat_ctx /= num_vfs;
+ vf_ring_grps /= num_vfs;
+
+ req.min_cmpl_rings = cpu_to_le16(vf_cp_rings);
+ req.min_tx_rings = cpu_to_le16(vf_tx_rings);
+ req.min_rx_rings = cpu_to_le16(vf_rx_rings);
+ req.min_l2_ctxs = cpu_to_le16(4);
+ req.min_vnics = cpu_to_le16(vf_vnics);
+ req.min_stat_ctx = cpu_to_le16(vf_stat_ctx);
+ req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
+ }
+ req.max_cmpl_rings = cpu_to_le16(vf_cp_rings);
+ req.max_tx_rings = cpu_to_le16(vf_tx_rings);
+ req.max_rx_rings = cpu_to_le16(vf_rx_rings);
+ req.max_l2_ctxs = cpu_to_le16(4);
+ req.max_vnics = cpu_to_le16(vf_vnics);
+ req.max_stat_ctx = cpu_to_le16(vf_stat_ctx);
+ req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = 0; i < num_vfs; i++) {
+ req.vf_id = cpu_to_le16(pf->first_vf_id + i);
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (rc) {
+ rc = -ENOMEM;
+ break;
+ }
+ pf->active_vfs = i + 1;
+ pf->vf[i].fw_fid = pf->first_vf_id + i;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ if (pf->active_vfs) {
+ u16 n = 1;
+
+ if (pf->vf_resv_strategy != BNXT_VF_RESV_STRATEGY_MINIMAL)
+ n = pf->active_vfs;
+
+ hw_resc->max_tx_rings -= vf_tx_rings * n;
+ hw_resc->max_rx_rings -= vf_rx_rings * n;
+ hw_resc->max_hw_ring_grps -= vf_ring_grps * n;
+ hw_resc->max_cp_rings -= vf_cp_rings * n;
+ hw_resc->max_rsscos_ctxs -= pf->active_vfs;
+ hw_resc->max_stat_ctxs -= vf_stat_ctx * n;
+ hw_resc->max_vnics -= vf_vnics * n;
+
+ rc = pf->active_vfs;
+ }
+ return rc;
+}
+
+/* Only called by PF to reserve resources for VFs, returns actual number of
+ * VFs configured, or < 0 on error.
+ */
static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
{
u32 rc = 0, mtu, i;
u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
- u16 vf_ring_grps;
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ u16 vf_ring_grps, max_stat_ctxs;
struct hwrm_func_cfg_input req = {0};
struct bnxt_pf_info *pf = &bp->pf;
int total_vf_tx_rings = 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ max_stat_ctxs = hw_resc->max_stat_ctxs;
+
/* Remaining rings are distributed equally amongs VF's for now */
- vf_cp_rings = (pf->max_cp_rings - bp->cp_nr_rings) / num_vfs;
- vf_stat_ctx = (pf->max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
+ vf_cp_rings = (hw_resc->max_cp_rings - bp->cp_nr_rings) / num_vfs;
+ vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
- vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings * 2) /
+ vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) /
num_vfs;
else
- vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings) / num_vfs;
- vf_ring_grps = (bp->pf.max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
- vf_tx_rings = (pf->max_tx_rings - bp->tx_nr_rings) / num_vfs;
- vf_vnics = (pf->max_vnics - bp->nr_vnics) / num_vfs;
+ vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings) /
+ num_vfs;
+ vf_ring_grps = (hw_resc->max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
+ vf_tx_rings = (hw_resc->max_tx_rings - bp->tx_nr_rings) / num_vfs;
+ vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs;
vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
@@ -485,22 +585,34 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
total_vf_tx_rings += vf_tx_rsvd;
}
mutex_unlock(&bp->hwrm_cmd_lock);
- if (!rc) {
- pf->max_tx_rings -= total_vf_tx_rings;
- pf->max_rx_rings -= vf_rx_rings * num_vfs;
- pf->max_hw_ring_grps -= vf_ring_grps * num_vfs;
- pf->max_cp_rings -= vf_cp_rings * num_vfs;
- pf->max_rsscos_ctxs -= num_vfs;
- pf->max_stat_ctxs -= vf_stat_ctx * num_vfs;
- pf->max_vnics -= vf_vnics * num_vfs;
+ if (rc)
+ rc = -ENOMEM;
+ if (pf->active_vfs) {
+ hw_resc->max_tx_rings -= total_vf_tx_rings;
+ hw_resc->max_rx_rings -= vf_rx_rings * num_vfs;
+ hw_resc->max_hw_ring_grps -= vf_ring_grps * num_vfs;
+ hw_resc->max_cp_rings -= vf_cp_rings * num_vfs;
+ hw_resc->max_rsscos_ctxs -= num_vfs;
+ hw_resc->max_stat_ctxs -= vf_stat_ctx * num_vfs;
+ hw_resc->max_vnics -= vf_vnics * num_vfs;
+ rc = pf->active_vfs;
}
return rc;
}
+static int bnxt_func_cfg(struct bnxt *bp, int num_vfs)
+{
+ if (bp->flags & BNXT_FLAG_NEW_RM)
+ return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs);
+ else
+ return bnxt_hwrm_func_cfg(bp, num_vfs);
+}
+
static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
{
int rc = 0, vfs_supported;
int min_rx_rings, min_tx_rings, min_rss_ctxs;
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
int tx_ok = 0, rx_ok = 0, rss_ok = 0;
int avail_cp, avail_stat;
@@ -510,8 +622,8 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
*/
vfs_supported = *num_vfs;
- avail_cp = bp->pf.max_cp_rings - bp->cp_nr_rings;
- avail_stat = bp->pf.max_stat_ctxs - bp->num_stat_ctxs;
+ avail_cp = hw_resc->max_cp_rings - bp->cp_nr_rings;
+ avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
avail_cp = min_t(int, avail_cp, avail_stat);
while (vfs_supported) {
@@ -520,23 +632,24 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
min_rss_ctxs = vfs_supported;
if (bp->flags & BNXT_FLAG_AGG_RINGS) {
- if (bp->pf.max_rx_rings - bp->rx_nr_rings * 2 >=
+ if (hw_resc->max_rx_rings - bp->rx_nr_rings * 2 >=
min_rx_rings)
rx_ok = 1;
} else {
- if (bp->pf.max_rx_rings - bp->rx_nr_rings >=
+ if (hw_resc->max_rx_rings - bp->rx_nr_rings >=
min_rx_rings)
rx_ok = 1;
}
- if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings ||
+ if (hw_resc->max_vnics - bp->nr_vnics < min_rx_rings ||
avail_cp < min_rx_rings)
rx_ok = 0;
- if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
+ if (hw_resc->max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
avail_cp >= min_tx_rings)
tx_ok = 1;
- if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs)
+ if (hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs >=
+ min_rss_ctxs)
rss_ok = 1;
if (tx_ok && rx_ok && rss_ok)
@@ -561,9 +674,16 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
goto err_out1;
/* Reserve resources for VFs */
- rc = bnxt_hwrm_func_cfg(bp, *num_vfs);
- if (rc)
- goto err_out2;
+ rc = bnxt_func_cfg(bp, *num_vfs);
+ if (rc != *num_vfs) {
+ if (rc <= 0) {
+ netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n");
+ *num_vfs = 0;
+ goto err_out2;
+ }
+ netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n", rc);
+ *num_vfs = rc;
+ }
/* Register buffers for VFs */
rc = bnxt_hwrm_func_buf_rgtr(bp);
@@ -766,17 +886,51 @@ exec_fwd_resp_exit:
return rc;
}
+static int bnxt_vf_store_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
+{
+ u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input);
+ struct hwrm_func_vf_cfg_input *req =
+ (struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr;
+
+ /* Only allow VF to set a valid MAC address if the PF assigned MAC
+ * address is zero
+ */
+ if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) {
+ if (is_valid_ether_addr(req->dflt_mac_addr) &&
+ !is_valid_ether_addr(vf->mac_addr)) {
+ ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr);
+ return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
+ }
+ return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
+ }
+ return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
+}
+
static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
{
u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
struct hwrm_cfa_l2_filter_alloc_input *req =
(struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
+ bool mac_ok = false;
- if (!is_valid_ether_addr(vf->mac_addr) ||
- ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
+ /* VF MAC address must first match PF MAC address, if it is valid.
+ * Otherwise, it must match the VF MAC address if firmware spec >=
+ * 1.2.2
+ */
+ if (is_valid_ether_addr(vf->mac_addr)) {
+ if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
+ mac_ok = true;
+ } else if (is_valid_ether_addr(vf->vf_mac_addr)) {
+ if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr))
+ mac_ok = true;
+ } else if (bp->hwrm_spec_code < 0x10202) {
+ mac_ok = true;
+ } else {
+ mac_ok = true;
+ }
+ if (mac_ok)
return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
- else
- return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
+ return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
}
static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
@@ -838,6 +992,9 @@ static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
u32 req_type = le16_to_cpu(encap_req->req_type);
switch (req_type) {
+ case HWRM_FUNC_VF_CFG:
+ rc = bnxt_vf_store_mac(bp, vf);
+ break;
case HWRM_CFA_L2_FILTER_ALLOC:
rc = bnxt_vf_validate_set_mac(bp, vf);
break;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 1d9b08c20f95..2ece1645f55d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -43,7 +43,7 @@ static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev)
}
/* Is dev a VF-rep? */
- if (dev != pf_bp->dev)
+ if (bnxt_dev_is_vf_rep(dev))
return bnxt_vf_rep_get_fid(dev);
bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 69186d188c43..2ca11be64182 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -241,6 +241,11 @@ static const struct net_device_ops bnxt_vf_rep_netdev_ops = {
.ndo_get_phys_port_name = bnxt_vf_rep_get_phys_port_name
};
+bool bnxt_dev_is_vf_rep(struct net_device *dev)
+{
+ return dev->netdev_ops == &bnxt_vf_rep_netdev_ops;
+}
+
/* Called when the parent PF interface is closed:
* As the mode transition from SWITCHDEV to LEGACY
* happens under the rtnl_lock() this routine is safe
@@ -376,6 +381,26 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
ether_addr_copy(dev->dev_addr, dev->perm_addr);
}
+static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
+{
+ struct pci_dev *pdev = bp->pdev;
+ int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
+ u32 dw;
+
+ if (!pos) {
+ netdev_info(bp->dev, "Unable do read adapter's DSN");
+ return -EOPNOTSUPP;
+ }
+
+ /* DSN (two dw) is at an offset of 4 from the cap pos */
+ pos += 4;
+ pci_read_config_dword(pdev, pos, &dw);
+ put_unaligned_le32(dw, &dsn[0]);
+ pci_read_config_dword(pdev, pos + 4, &dw);
+ put_unaligned_le32(dw, &dsn[4]);
+ return 0;
+}
+
static int bnxt_vf_reps_create(struct bnxt *bp)
{
u16 *cfa_code_map = NULL, num_vfs = pci_num_vf(bp->pdev);
@@ -440,6 +465,11 @@ static int bnxt_vf_reps_create(struct bnxt *bp)
}
}
+ /* Read the adapter's DSN to use as the eswitch switch_id */
+ rc = bnxt_pcie_dsn_get(bp, bp->switch_id);
+ if (rc)
+ goto err;
+
/* publish cfa_code_map only after all VF-reps have been initialized */
bp->cfa_code_map = cfa_code_map;
bp->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
index fb06bbe70e42..38b9a75ad724 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
@@ -28,6 +28,7 @@ static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev)
return bp->pf.vf[vf_rep->vf_idx].fw_fid;
}
+bool bnxt_dev_is_vf_rep(struct net_device *dev);
int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode);
int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode);
@@ -54,5 +55,10 @@ static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev)
{
return 0;
}
+
+static inline bool bnxt_dev_is_vf_rep(struct net_device *dev)
+{
+ return false;
+}
#endif /* CONFIG_BNXT_SRIOV */
#endif /* BNXT_VFR_H */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 24b4f4ceceef..b1e35a9accf1 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2527,9 +2527,10 @@ static void bcmgenet_irq_task(struct work_struct *work)
spin_unlock_irq(&priv->lock);
/* Link UP/DOWN event */
- if (status & UMAC_IRQ_LINK_EVENT)
- phy_mac_interrupt(priv->dev->phydev,
- !!(status & UMAC_IRQ_LINK_UP));
+ if (status & UMAC_IRQ_LINK_EVENT) {
+ priv->dev->phydev->link = !!(status & UMAC_IRQ_LINK_UP);
+ phy_mac_interrupt(priv->dev->phydev);
+ }
}
/* bcmgenet_isr1: handle Rx and Tx priority queues */
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index 63be75eb34d2..043e3c11c42b 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -27,6 +27,7 @@ config THUNDER_NIC_PF
config THUNDER_NIC_VF
tristate "Thunder Virtual function driver"
+ imply CAVIUM_PTP
depends on 64BIT
---help---
This driver supports Thunder's NIC virtual function
@@ -50,6 +51,18 @@ config THUNDER_NIC_RGX
This driver supports configuring XCV block of RGX interface
present on CN81XX chip.
+config CAVIUM_PTP
+ tristate "Cavium PTP coprocessor as PTP clock"
+ depends on 64BIT
+ imply PTP_1588_CLOCK
+ default y
+ ---help---
+ This driver adds support for the Precision Time Protocol Clocks and
+ Timestamping coprocessor (PTP) found on Cavium processors.
+ PTP provides timestamping mechanism that is suitable for use in IEEE 1588
+ Precision Time Protocol or other purposes. Timestamps can be used in
+ BGX, TNS, GTI, and NIC blocks.
+
config LIQUIDIO
tristate "Cavium LiquidIO support"
depends on 64BIT
diff --git a/drivers/net/ethernet/cavium/Makefile b/drivers/net/ethernet/cavium/Makefile
index 872da9f7c31a..946bba84e81d 100644
--- a/drivers/net/ethernet/cavium/Makefile
+++ b/drivers/net/ethernet/cavium/Makefile
@@ -1,6 +1,7 @@
#
# Makefile for the Cavium ethernet device drivers.
#
+obj-$(CONFIG_NET_VENDOR_CAVIUM) += common/
obj-$(CONFIG_NET_VENDOR_CAVIUM) += thunder/
obj-$(CONFIG_NET_VENDOR_CAVIUM) += liquidio/
obj-$(CONFIG_NET_VENDOR_CAVIUM) += octeon/
diff --git a/drivers/net/ethernet/cavium/common/Makefile b/drivers/net/ethernet/cavium/common/Makefile
new file mode 100644
index 000000000000..dd8561b8060b
--- /dev/null
+++ b/drivers/net/ethernet/cavium/common/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_CAVIUM_PTP) += cavium_ptp.o
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c
new file mode 100644
index 000000000000..c87c9c684a33
--- /dev/null
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: GPL-2.0
+/* cavium_ptp.c - PTP 1588 clock on Cavium hardware
+ * Copyright (c) 2003-2015, 2017 Cavium, Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/timecounter.h>
+#include <linux/pci.h>
+
+#include "cavium_ptp.h"
+
+#define DRV_NAME "Cavium PTP Driver"
+
+#define PCI_DEVICE_ID_CAVIUM_PTP 0xA00C
+#define PCI_DEVICE_ID_CAVIUM_RST 0xA00E
+
+#define PCI_PTP_BAR_NO 0
+#define PCI_RST_BAR_NO 0
+
+#define PTP_CLOCK_CFG 0xF00ULL
+#define PTP_CLOCK_CFG_PTP_EN BIT(0)
+#define PTP_CLOCK_LO 0xF08ULL
+#define PTP_CLOCK_HI 0xF10ULL
+#define PTP_CLOCK_COMP 0xF18ULL
+
+#define RST_BOOT 0x1600ULL
+#define CLOCK_BASE_RATE 50000000ULL
+
+static u64 ptp_cavium_clock_get(void)
+{
+ struct pci_dev *pdev;
+ void __iomem *base;
+ u64 ret = CLOCK_BASE_RATE * 16;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_CAVIUM_RST, NULL);
+ if (!pdev)
+ goto error;
+
+ base = pci_ioremap_bar(pdev, PCI_RST_BAR_NO);
+ if (!base)
+ goto error_put_pdev;
+
+ ret = CLOCK_BASE_RATE * ((readq(base + RST_BOOT) >> 33) & 0x3f);
+
+ iounmap(base);
+
+error_put_pdev:
+ pci_dev_put(pdev);
+
+error:
+ return ret;
+}
+
+struct cavium_ptp *cavium_ptp_get(void)
+{
+ struct cavium_ptp *ptp;
+ struct pci_dev *pdev;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_CAVIUM_PTP, NULL);
+ if (!pdev)
+ return ERR_PTR(-ENODEV);
+
+ ptp = pci_get_drvdata(pdev);
+ if (!ptp)
+ ptp = ERR_PTR(-EPROBE_DEFER);
+ if (IS_ERR(ptp))
+ pci_dev_put(pdev);
+
+ return ptp;
+}
+EXPORT_SYMBOL(cavium_ptp_get);
+
+void cavium_ptp_put(struct cavium_ptp *ptp)
+{
+ pci_dev_put(ptp->pdev);
+}
+EXPORT_SYMBOL(cavium_ptp_put);
+
+/**
+ * cavium_ptp_adjfine() - Adjust ptp frequency
+ * @ptp: PTP clock info
+ * @scaled_ppm: how much to adjust by, in parts per million, but with a
+ * 16 bit binary fractional field
+ */
+static int cavium_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
+{
+ struct cavium_ptp *clock =
+ container_of(ptp_info, struct cavium_ptp, ptp_info);
+ unsigned long flags;
+ u64 comp;
+ u64 adj;
+ bool neg_adj = false;
+
+ if (scaled_ppm < 0) {
+ neg_adj = true;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ /* The hardware adds the clock compensation value to the PTP clock
+ * on every coprocessor clock cycle. Typical convention is that it
+ * represent number of nanosecond betwen each cycle. In this
+ * convention compensation value is in 64 bit fixed-point
+ * representation where upper 32 bits are number of nanoseconds
+ * and lower is fractions of nanosecond.
+ * The scaled_ppm represent the ratio in "parts per bilion" by which the
+ * compensation value should be corrected.
+ * To calculate new compenstation value we use 64bit fixed point
+ * arithmetic on following formula
+ * comp = tbase + tbase * scaled_ppm / (1M * 2^16)
+ * where tbase is the basic compensation value calculated initialy
+ * in cavium_ptp_init() -> tbase = 1/Hz. Then we use endian
+ * independent structure definition to write data to PTP register.
+ */
+ comp = ((u64)1000000000ull << 32) / clock->clock_rate;
+ adj = comp * scaled_ppm;
+ adj >>= 16;
+ adj = div_u64(adj, 1000000ull);
+ comp = neg_adj ? comp - adj : comp + adj;
+
+ spin_lock_irqsave(&clock->spin_lock, flags);
+ writeq(comp, clock->reg_base + PTP_CLOCK_COMP);
+ spin_unlock_irqrestore(&clock->spin_lock, flags);
+
+ return 0;
+}
+
+/**
+ * cavium_ptp_adjtime() - Adjust ptp time
+ * @ptp: PTP clock info
+ * @delta: how much to adjust by, in nanosecs
+ */
+static int cavium_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
+{
+ struct cavium_ptp *clock =
+ container_of(ptp_info, struct cavium_ptp, ptp_info);
+ unsigned long flags;
+
+ spin_lock_irqsave(&clock->spin_lock, flags);
+ timecounter_adjtime(&clock->time_counter, delta);
+ spin_unlock_irqrestore(&clock->spin_lock, flags);
+
+ /* Sync, for network driver to get latest value */
+ smp_mb();
+
+ return 0;
+}
+
+/**
+ * cavium_ptp_gettime() - Get hardware clock time with adjustment
+ * @ptp: PTP clock info
+ * @ts: timespec
+ */
+static int cavium_ptp_gettime(struct ptp_clock_info *ptp_info,
+ struct timespec64 *ts)
+{
+ struct cavium_ptp *clock =
+ container_of(ptp_info, struct cavium_ptp, ptp_info);
+ unsigned long flags;
+ u64 nsec;
+
+ spin_lock_irqsave(&clock->spin_lock, flags);
+ nsec = timecounter_read(&clock->time_counter);
+ spin_unlock_irqrestore(&clock->spin_lock, flags);
+
+ *ts = ns_to_timespec64(nsec);
+
+ return 0;
+}
+
+/**
+ * cavium_ptp_settime() - Set hardware clock time. Reset adjustment
+ * @ptp: PTP clock info
+ * @ts: timespec
+ */
+static int cavium_ptp_settime(struct ptp_clock_info *ptp_info,
+ const struct timespec64 *ts)
+{
+ struct cavium_ptp *clock =
+ container_of(ptp_info, struct cavium_ptp, ptp_info);
+ unsigned long flags;
+ u64 nsec;
+
+ nsec = timespec64_to_ns(ts);
+
+ spin_lock_irqsave(&clock->spin_lock, flags);
+ timecounter_init(&clock->time_counter, &clock->cycle_counter, nsec);
+ spin_unlock_irqrestore(&clock->spin_lock, flags);
+
+ return 0;
+}
+
+/**
+ * cavium_ptp_enable() - Request to enable or disable an ancillary feature.
+ * @ptp: PTP clock info
+ * @rq: request
+ * @on: is it on
+ */
+static int cavium_ptp_enable(struct ptp_clock_info *ptp_info,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static u64 cavium_ptp_cc_read(const struct cyclecounter *cc)
+{
+ struct cavium_ptp *clock =
+ container_of(cc, struct cavium_ptp, cycle_counter);
+
+ return readq(clock->reg_base + PTP_CLOCK_HI);
+}
+
+static int cavium_ptp_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct cavium_ptp *clock;
+ struct cyclecounter *cc;
+ u64 clock_cfg;
+ u64 clock_comp;
+ int err;
+
+ clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
+ if (!clock) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ clock->pdev = pdev;
+
+ err = pcim_enable_device(pdev);
+ if (err)
+ goto error_free;
+
+ err = pcim_iomap_regions(pdev, 1 << PCI_PTP_BAR_NO, pci_name(pdev));
+ if (err)
+ goto error_free;
+
+ clock->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO];
+
+ spin_lock_init(&clock->spin_lock);
+
+ cc = &clock->cycle_counter;
+ cc->read = cavium_ptp_cc_read;
+ cc->mask = CYCLECOUNTER_MASK(64);
+ cc->mult = 1;
+ cc->shift = 0;
+
+ timecounter_init(&clock->time_counter, &clock->cycle_counter,
+ ktime_to_ns(ktime_get_real()));
+
+ clock->clock_rate = ptp_cavium_clock_get();
+
+ clock->ptp_info = (struct ptp_clock_info) {
+ .owner = THIS_MODULE,
+ .name = "ThunderX PTP",
+ .max_adj = 1000000000ull,
+ .n_ext_ts = 0,
+ .n_pins = 0,
+ .pps = 0,
+ .adjfine = cavium_ptp_adjfine,
+ .adjtime = cavium_ptp_adjtime,
+ .gettime64 = cavium_ptp_gettime,
+ .settime64 = cavium_ptp_settime,
+ .enable = cavium_ptp_enable,
+ };
+
+ clock_cfg = readq(clock->reg_base + PTP_CLOCK_CFG);
+ clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
+ writeq(clock_cfg, clock->reg_base + PTP_CLOCK_CFG);
+
+ clock_comp = ((u64)1000000000ull << 32) / clock->clock_rate;
+ writeq(clock_comp, clock->reg_base + PTP_CLOCK_COMP);
+
+ clock->ptp_clock = ptp_clock_register(&clock->ptp_info, dev);
+ if (!clock->ptp_clock) {
+ err = -ENODEV;
+ goto error_stop;
+ }
+ if (IS_ERR(clock->ptp_clock)) {
+ err = PTR_ERR(clock->ptp_clock);
+ goto error_stop;
+ }
+
+ pci_set_drvdata(pdev, clock);
+ return 0;
+
+error_stop:
+ clock_cfg = readq(clock->reg_base + PTP_CLOCK_CFG);
+ clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
+ writeq(clock_cfg, clock->reg_base + PTP_CLOCK_CFG);
+ pcim_iounmap_regions(pdev, 1 << PCI_PTP_BAR_NO);
+
+error_free:
+ devm_kfree(dev, clock);
+
+error:
+ /* For `cavium_ptp_get()` we need to differentiate between the case
+ * when the core has not tried to probe this device and the case when
+ * the probe failed. In the later case we pretend that the
+ * initialization was successful and keep the error in
+ * `dev->driver_data`.
+ */
+ pci_set_drvdata(pdev, ERR_PTR(err));
+ return 0;
+}
+
+static void cavium_ptp_remove(struct pci_dev *pdev)
+{
+ struct cavium_ptp *clock = pci_get_drvdata(pdev);
+ u64 clock_cfg;
+
+ if (IS_ERR_OR_NULL(clock))
+ return;
+
+ ptp_clock_unregister(clock->ptp_clock);
+
+ clock_cfg = readq(clock->reg_base + PTP_CLOCK_CFG);
+ clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
+ writeq(clock_cfg, clock->reg_base + PTP_CLOCK_CFG);
+}
+
+static const struct pci_device_id cavium_ptp_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_CAVIUM_PTP) },
+ { 0, }
+};
+
+static struct pci_driver cavium_ptp_driver = {
+ .name = DRV_NAME,
+ .id_table = cavium_ptp_id_table,
+ .probe = cavium_ptp_probe,
+ .remove = cavium_ptp_remove,
+};
+
+static int __init cavium_ptp_init_module(void)
+{
+ return pci_register_driver(&cavium_ptp_driver);
+}
+
+static void __exit cavium_ptp_cleanup_module(void)
+{
+ pci_unregister_driver(&cavium_ptp_driver);
+}
+
+module_init(cavium_ptp_init_module);
+module_exit(cavium_ptp_cleanup_module);
+
+MODULE_DESCRIPTION(DRV_NAME);
+MODULE_AUTHOR("Cavium Networks <[email protected]>");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, cavium_ptp_id_table);
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.h b/drivers/net/ethernet/cavium/common/cavium_ptp.h
new file mode 100644
index 000000000000..be2bafc7beeb
--- /dev/null
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.h
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/* cavium_ptp.h - PTP 1588 clock on Cavium hardware
+ * Copyright (c) 2003-2015, 2017 Cavium, Inc.
+ */
+
+#ifndef CAVIUM_PTP_H
+#define CAVIUM_PTP_H
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
+
+struct cavium_ptp {
+ struct pci_dev *pdev;
+
+ /* Serialize access to cycle_counter, time_counter and hw_registers */
+ spinlock_t spin_lock;
+ struct cyclecounter cycle_counter;
+ struct timecounter time_counter;
+ void __iomem *reg_base;
+
+ u32 clock_rate;
+
+ struct ptp_clock_info ptp_info;
+ struct ptp_clock *ptp_clock;
+};
+
+#if IS_ENABLED(CONFIG_CAVIUM_PTP)
+
+struct cavium_ptp *cavium_ptp_get(void);
+void cavium_ptp_put(struct cavium_ptp *ptp);
+
+static inline u64 cavium_ptp_tstamp2time(struct cavium_ptp *ptp, u64 tstamp)
+{
+ unsigned long flags;
+ u64 ret;
+
+ spin_lock_irqsave(&ptp->spin_lock, flags);
+ ret = timecounter_cyc2time(&ptp->time_counter, tstamp);
+ spin_unlock_irqrestore(&ptp->spin_lock, flags);
+
+ return ret;
+}
+
+static inline int cavium_ptp_clock_index(struct cavium_ptp *clock)
+{
+ return ptp_clock_index(clock->ptp_clock);
+}
+
+#else
+
+static inline struct cavium_ptp *cavium_ptp_get(void)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void cavium_ptp_put(struct cavium_ptp *ptp) {}
+
+static inline u64 cavium_ptp_tstamp2time(struct cavium_ptp *ptp, u64 tstamp)
+{
+ return 0;
+}
+
+static inline int cavium_ptp_clock_index(struct cavium_ptp *clock)
+{
+ return -1;
+}
+
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 4a02e618e318..4cacce5d2b16 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -263,6 +263,8 @@ struct nicvf_drv_stats {
struct u64_stats_sync syncp;
};
+struct cavium_ptp;
+
struct nicvf {
struct nicvf *pnicvf;
struct net_device *netdev;
@@ -312,6 +314,33 @@ struct nicvf {
struct tasklet_struct qs_err_task;
struct work_struct reset_task;
+ /* PTP timestamp */
+ struct cavium_ptp *ptp_clock;
+ /* Inbound timestamping is on */
+ bool hw_rx_tstamp;
+ /* When the packet that requires timestamping is sent, hardware inserts
+ * two entries to the completion queue. First is the regular
+ * CQE_TYPE_SEND entry that signals that the packet was sent.
+ * The second is CQE_TYPE_SEND_PTP that contains the actual timestamp
+ * for that packet.
+ * `ptp_skb` is initialized in the handler for the CQE_TYPE_SEND
+ * entry and is used and zeroed in the handler for the CQE_TYPE_SEND_PTP
+ * entry.
+ * So `ptp_skb` is used to hold the pointer to the packet between
+ * the calls to CQE_TYPE_SEND and CQE_TYPE_SEND_PTP handlers.
+ */
+ struct sk_buff *ptp_skb;
+ /* `tx_ptp_skbs` is set when the hardware is sending a packet that
+ * requires timestamping. Cavium hardware can not process more than one
+ * such packet at once so this is set each time the driver submits
+ * a packet that requires timestamping to the send queue and clears
+ * each time it receives the entry on the completion queue saying
+ * that such packet was sent.
+ * So `tx_ptp_skbs` prevents driver from submitting more than one
+ * packet that requires timestamping to the hardware for transmitting.
+ */
+ atomic_t tx_ptp_skbs;
+
/* Interrupt coalescing settings */
u32 cq_coalesce_usecs;
u32 msg_enable;
@@ -371,6 +400,7 @@ struct nicvf {
#define NIC_MBOX_MSG_LOOPBACK 0x16 /* Set interface in loopback */
#define NIC_MBOX_MSG_RESET_STAT_COUNTER 0x17 /* Reset statistics counters */
#define NIC_MBOX_MSG_PFC 0x18 /* Pause frame control */
+#define NIC_MBOX_MSG_PTP_CFG 0x19 /* HW packet timestamp */
#define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */
#define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */
@@ -521,6 +551,11 @@ struct pfc {
u8 fc_tx;
};
+struct set_ptp {
+ u8 msg;
+ bool enable;
+};
+
/* 128 bit shared memory between PF and each VF */
union nic_mbx {
struct { u8 msg; } msg;
@@ -540,6 +575,7 @@ union nic_mbx {
struct set_loopback lbk;
struct reset_stat_cfg reset_stat;
struct pfc pfc;
+ struct set_ptp ptp;
};
#define NIC_NODE_ID_MASK 0x03
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 8f1dd55b3e08..8325577d7442 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -426,13 +426,22 @@ static void nic_init_hw(struct nicpf *nic)
/* Enable backpressure */
nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03);
- /* TNS and TNS bypass modes are present only on 88xx */
+ /* TNS and TNS bypass modes are present only on 88xx
+ * Also offset of this CSR has changed in 81xx and 83xx.
+ */
if (nic->pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF) {
/* Disable TNS mode on both interfaces */
nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG,
- (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK);
+ (NIC_TNS_BYPASS_MODE << 7) |
+ BGX0_BLOCK | (1ULL << 16));
nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8),
- (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK);
+ (NIC_TNS_BYPASS_MODE << 7) |
+ BGX1_BLOCK | (1ULL << 16));
+ } else {
+ /* Configure timestamp generation timeout to 10us */
+ for (i = 0; i < nic->hw->bgx_cnt; i++)
+ nic_reg_write(nic, NIC_PF_INTFX_SEND_CFG | (i << 3),
+ (1ULL << 16));
}
nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG,
@@ -880,6 +889,44 @@ static void nic_pause_frame(struct nicpf *nic, int vf, struct pfc *cfg)
}
}
+/* Enable or disable HW timestamping by BGX for pkts received on a LMAC */
+static void nic_config_timestamp(struct nicpf *nic, int vf, struct set_ptp *ptp)
+{
+ struct pkind_cfg *pkind;
+ u8 lmac, bgx_idx;
+ u64 pkind_val, pkind_idx;
+
+ if (vf >= nic->num_vf_en)
+ return;
+
+ bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+
+ pkind_idx = lmac + bgx_idx * MAX_LMAC_PER_BGX;
+ pkind_val = nic_reg_read(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3));
+ pkind = (struct pkind_cfg *)&pkind_val;
+
+ if (ptp->enable && !pkind->hdr_sl) {
+ /* Skiplen to exclude 8byte timestamp while parsing pkt
+ * If not configured, will result in L2 errors.
+ */
+ pkind->hdr_sl = 4;
+ /* Adjust max packet length allowed */
+ pkind->maxlen += (pkind->hdr_sl * 2);
+ bgx_config_timestamping(nic->node, bgx_idx, lmac, true);
+ nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7 | (1 << 3),
+ (ETYPE_ALG_ENDPARSE << 16) | ETH_P_1588);
+ } else if (!ptp->enable && pkind->hdr_sl) {
+ pkind->maxlen -= (pkind->hdr_sl * 2);
+ pkind->hdr_sl = 0;
+ bgx_config_timestamping(nic->node, bgx_idx, lmac, false);
+ nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7 | (1 << 3),
+ (ETYPE_ALG_SKIP << 16) | ETH_P_8021Q);
+ }
+
+ nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3), pkind_val);
+}
+
/* Interrupt handler to handle mailbox messages from VFs */
static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
{
@@ -1022,6 +1069,9 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
case NIC_MBOX_MSG_PFC:
nic_pause_frame(nic, vf, &mbx.pfc);
goto unlock;
+ case NIC_MBOX_MSG_PTP_CFG:
+ nic_config_timestamp(nic, vf, &mbx.ptp);
+ break;
default:
dev_err(&nic->pdev->dev,
"Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h
index 80d46337cf29..a16c48a1ebb2 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_reg.h
+++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h
@@ -99,6 +99,7 @@
#define NIC_PF_ECC3_DBE_INT_W1S (0x2708)
#define NIC_PF_ECC3_DBE_ENA_W1C (0x2710)
#define NIC_PF_ECC3_DBE_ENA_W1S (0x2718)
+#define NIC_PF_INTFX_SEND_CFG (0x4000)
#define NIC_PF_MCAM_0_191_ENA (0x100000)
#define NIC_PF_MCAM_0_191_M_0_5_DATA (0x110000)
#define NIC_PF_MCAM_CTRL (0x120000)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index b9ece9cbf98b..ed9f10bdf41e 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -9,12 +9,14 @@
/* ETHTOOL Support for VNIC_VF Device*/
#include <linux/pci.h>
+#include <linux/net_tstamp.h>
#include "nic_reg.h"
#include "nic.h"
#include "nicvf_queues.h"
#include "q_struct.h"
#include "thunder_bgx.h"
+#include "../common/cavium_ptp.h"
#define DRV_NAME "thunder-nicvf"
#define DRV_VERSION "1.0"
@@ -824,6 +826,31 @@ static int nicvf_set_pauseparam(struct net_device *dev,
return 0;
}
+static int nicvf_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ if (!nic->ptp_clock)
+ return ethtool_op_get_ts_info(netdev, info);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->phc_index = cavium_ptp_clock_index(nic->ptp_clock);
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
static const struct ethtool_ops nicvf_ethtool_ops = {
.get_link = nicvf_get_link,
.get_drvinfo = nicvf_get_drvinfo,
@@ -847,7 +874,7 @@ static const struct ethtool_ops nicvf_ethtool_ops = {
.set_channels = nicvf_set_channels,
.get_pauseparam = nicvf_get_pauseparam,
.set_pauseparam = nicvf_set_pauseparam,
- .get_ts_info = ethtool_op_get_ts_info,
+ .get_ts_info = nicvf_get_ts_info,
.get_link_ksettings = nicvf_get_link_ksettings,
};
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 21618d0d694f..881af8a120f5 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -20,11 +20,13 @@
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/filter.h>
+#include <linux/net_tstamp.h>
#include "nic_reg.h"
#include "nic.h"
#include "nicvf_queues.h"
#include "thunder_bgx.h"
+#include "../common/cavium_ptp.h"
#define DRV_NAME "thunder-nicvf"
#define DRV_VERSION "1.0"
@@ -602,6 +604,44 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
return false;
}
+static void nicvf_snd_ptp_handler(struct net_device *netdev,
+ struct cqe_send_t *cqe_tx)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ struct skb_shared_hwtstamps ts;
+ u64 ns;
+
+ nic = nic->pnicvf;
+
+ /* Sync for 'ptp_skb' */
+ smp_rmb();
+
+ /* New timestamp request can be queued now */
+ atomic_set(&nic->tx_ptp_skbs, 0);
+
+ /* Check for timestamp requested skb */
+ if (!nic->ptp_skb)
+ return;
+
+ /* Check if timestamping is timedout, which is set to 10us */
+ if (cqe_tx->send_status == CQ_TX_ERROP_TSTMP_TIMEOUT ||
+ cqe_tx->send_status == CQ_TX_ERROP_TSTMP_CONFLICT)
+ goto no_tstamp;
+
+ /* Get the timestamp */
+ memset(&ts, 0, sizeof(ts));
+ ns = cavium_ptp_tstamp2time(nic->ptp_clock, cqe_tx->ptp_timestamp);
+ ts.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(nic->ptp_skb, &ts);
+
+no_tstamp:
+ /* Free the original skb */
+ dev_kfree_skb_any(nic->ptp_skb);
+ nic->ptp_skb = NULL;
+ /* Sync 'ptp_skb' */
+ smp_wmb();
+}
+
static void nicvf_snd_pkt_handler(struct net_device *netdev,
struct cqe_send_t *cqe_tx,
int budget, int *subdesc_cnt,
@@ -658,7 +698,12 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
prefetch(skb);
(*tx_pkts)++;
*tx_bytes += skb->len;
- napi_consume_skb(skb, budget);
+ /* If timestamp is requested for this skb, don't free it */
+ if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
+ !nic->pnicvf->ptp_skb)
+ nic->pnicvf->ptp_skb = skb;
+ else
+ napi_consume_skb(skb, budget);
sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
} else {
/* In case of SW TSO on 88xx, only last segment will have
@@ -697,6 +742,21 @@ static inline void nicvf_set_rxhash(struct net_device *netdev,
skb_set_hash(skb, hash, hash_type);
}
+static inline void nicvf_set_rxtstamp(struct nicvf *nic, struct sk_buff *skb)
+{
+ u64 ns;
+
+ if (!nic->ptp_clock || !nic->hw_rx_tstamp)
+ return;
+
+ /* The first 8 bytes is the timestamp */
+ ns = cavium_ptp_tstamp2time(nic->ptp_clock,
+ be64_to_cpu(*(__be64 *)skb->data));
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
+
+ __skb_pull(skb, 8);
+}
+
static void nicvf_rcv_pkt_handler(struct net_device *netdev,
struct napi_struct *napi,
struct cqe_rx_t *cqe_rx,
@@ -748,6 +808,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
return;
}
+ nicvf_set_rxtstamp(nic, skb);
nicvf_set_rxhash(netdev, cqe_rx, skb);
skb_record_rx_queue(skb, rq_idx);
@@ -823,10 +884,12 @@ loop:
&tx_pkts, &tx_bytes);
tx_done++;
break;
+ case CQE_TYPE_SEND_PTP:
+ nicvf_snd_ptp_handler(netdev, (void *)cq_desc);
+ break;
case CQE_TYPE_INVALID:
case CQE_TYPE_RX_SPLIT:
case CQE_TYPE_RX_TCP:
- case CQE_TYPE_SEND_PTP:
/* Ignore for now */
break;
}
@@ -1322,12 +1385,28 @@ int nicvf_stop(struct net_device *netdev)
nicvf_free_cq_poll(nic);
+ /* Free any pending SKB saved to receive timestamp */
+ if (nic->ptp_skb) {
+ dev_kfree_skb_any(nic->ptp_skb);
+ nic->ptp_skb = NULL;
+ }
+
/* Clear multiqset info */
nic->pnicvf = nic;
return 0;
}
+static int nicvf_config_hw_rx_tstamp(struct nicvf *nic, bool enable)
+{
+ union nic_mbx mbx = {};
+
+ mbx.ptp.msg = NIC_MBOX_MSG_PTP_CFG;
+ mbx.ptp.enable = enable;
+
+ return nicvf_send_msg_to_pf(nic, &mbx);
+}
+
static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
{
union nic_mbx mbx = {};
@@ -1397,6 +1476,12 @@ int nicvf_open(struct net_device *netdev)
if (nic->sqs_mode)
nicvf_get_primary_vf_struct(nic);
+ /* Configure PTP timestamp */
+ if (nic->ptp_clock)
+ nicvf_config_hw_rx_tstamp(nic, nic->hw_rx_tstamp);
+ atomic_set(&nic->tx_ptp_skbs, 0);
+ nic->ptp_skb = NULL;
+
/* Configure receive side scaling and MTU */
if (!nic->sqs_mode) {
nicvf_rss_init(nic);
@@ -1823,6 +1908,73 @@ static void nicvf_xdp_flush(struct net_device *dev)
return;
}
+static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ struct nicvf *nic = netdev_priv(netdev);
+
+ if (!nic->ptp_clock)
+ return -ENODEV;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ nic->hw_rx_tstamp = false;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ nic->hw_rx_tstamp = true;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (netif_running(netdev))
+ nicvf_config_hw_rx_tstamp(nic, nic->hw_rx_tstamp);
+
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int nicvf_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
+{
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return nicvf_config_hwtstamp(netdev, req);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops nicvf_netdev_ops = {
.ndo_open = nicvf_open,
.ndo_stop = nicvf_stop,
@@ -1836,6 +1988,7 @@ static const struct net_device_ops nicvf_netdev_ops = {
.ndo_bpf = nicvf_xdp,
.ndo_xdp_xmit = nicvf_xdp_xmit,
.ndo_xdp_flush = nicvf_xdp_flush,
+ .ndo_do_ioctl = nicvf_ioctl,
};
static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -1845,6 +1998,16 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct nicvf *nic;
int err, qcount;
u16 sdevid;
+ struct cavium_ptp *ptp_clock;
+
+ ptp_clock = cavium_ptp_get();
+ if (IS_ERR(ptp_clock)) {
+ if (PTR_ERR(ptp_clock) == -ENODEV)
+ /* In virtualized environment we proceed without ptp */
+ ptp_clock = NULL;
+ else
+ return PTR_ERR(ptp_clock);
+ }
err = pci_enable_device(pdev);
if (err) {
@@ -1899,6 +2062,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
if (!nic->t88)
nic->max_queues *= 2;
+ nic->ptp_clock = ptp_clock;
/* MAP VF's configuration registers */
nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
@@ -2012,6 +2176,7 @@ static void nicvf_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
if (nic->drv_stats)
free_percpu(nic->drv_stats);
+ cavium_ptp_put(nic->ptp_clock);
free_netdev(netdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 14e62c6ac342..3eae9ff9b53a 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -982,6 +982,9 @@ void nicvf_qset_config(struct nicvf *nic, bool enable)
qs_cfg->be = 1;
#endif
qs_cfg->vnic = qs->vnic_id;
+ /* Enable Tx timestamping capability */
+ if (nic->ptp_clock)
+ qs_cfg->send_tstmp_ena = 1;
}
nicvf_send_msg_to_pf(nic, &mbx);
}
@@ -1389,6 +1392,29 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
hdr->inner_l3_offset = skb_network_offset(skb) - 2;
this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
}
+
+ /* Check if timestamp is requested */
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ skb_tx_timestamp(skb);
+ return;
+ }
+
+ /* Tx timestamping not supported along with TSO, so ignore request */
+ if (skb_shinfo(skb)->gso_size)
+ return;
+
+ /* HW supports only a single outstanding packet to timestamp */
+ if (!atomic_add_unless(&nic->pnicvf->tx_ptp_skbs, 1, 1))
+ return;
+
+ /* Mark the SKB for later reference */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ /* Finally enable timestamp generation
+ * Since 'post_cqe' is also set, two CQEs will be posted
+ * for this packet i.e CQE_TYPE_SEND and CQE_TYPE_SEND_PTP.
+ */
+ hdr->tstmp = 1;
}
/* SQ GATHER subdescriptor
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 5e5c4d7796b8..0f23999c5bcf 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -245,6 +245,35 @@ void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
}
EXPORT_SYMBOL(bgx_lmac_rx_tx_enable);
+/* Enables or disables timestamp insertion by BGX for Rx packets */
+void bgx_config_timestamping(int node, int bgx_idx, int lmacid, bool enable)
+{
+ struct bgx *bgx = get_bgx(node, bgx_idx);
+ struct lmac *lmac;
+ u64 csr_offset, cfg;
+
+ if (!bgx)
+ return;
+
+ lmac = &bgx->lmac[lmacid];
+
+ if (lmac->lmac_type == BGX_MODE_SGMII ||
+ lmac->lmac_type == BGX_MODE_QSGMII ||
+ lmac->lmac_type == BGX_MODE_RGMII)
+ csr_offset = BGX_GMP_GMI_RXX_FRM_CTL;
+ else
+ csr_offset = BGX_SMUX_RX_FRM_CTL;
+
+ cfg = bgx_reg_read(bgx, lmacid, csr_offset);
+
+ if (enable)
+ cfg |= BGX_PKT_RX_PTP_EN;
+ else
+ cfg &= ~BGX_PKT_RX_PTP_EN;
+ bgx_reg_write(bgx, lmacid, csr_offset, cfg);
+}
+EXPORT_SYMBOL(bgx_config_timestamping);
+
void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause)
{
struct pfc *pfc = (struct pfc *)pause;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index 23acdc5ab896..5a7567d31138 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -122,6 +122,8 @@
#define SPU_DBG_CTL_AN_NONCE_MCT_DIS BIT_ULL(29)
#define BGX_SMUX_RX_INT 0x20000
+#define BGX_SMUX_RX_FRM_CTL 0x20020
+#define BGX_PKT_RX_PTP_EN BIT_ULL(12)
#define BGX_SMUX_RX_JABBER 0x20030
#define BGX_SMUX_RX_CTL 0x20048
#define SMU_RX_CTL_STATUS (3ull << 0)
@@ -172,6 +174,7 @@
#define GMI_PORT_CFG_SPEED_MSB BIT_ULL(8)
#define GMI_PORT_CFG_RX_IDLE BIT_ULL(12)
#define GMI_PORT_CFG_TX_IDLE BIT_ULL(13)
+#define BGX_GMP_GMI_RXX_FRM_CTL 0x38028
#define BGX_GMP_GMI_RXX_JABBER 0x38038
#define BGX_GMP_GMI_TXX_THRESH 0x38210
#define BGX_GMP_GMI_TXX_APPEND 0x38218
@@ -223,6 +226,7 @@ void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac);
void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status);
void bgx_lmac_internal_loopback(int node, int bgx_idx,
int lmac_idx, bool enable);
+void bgx_config_timestamping(int node, int bgx_idx, int lmacid, bool enable);
void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause);
void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
index 8c9c6b0d2e5d..5df923798669 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -12,3 +12,4 @@ cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o
cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o
cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
+cxgb4-$(CONFIG_ZLIB_DEFLATE) += cudbg_zlib.o
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c
index f78ba1743b5a..8edc49827af0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c
@@ -19,7 +19,8 @@
#include "cudbg_if.h"
#include "cudbg_lib_common.h"
-int cudbg_get_buff(struct cudbg_buffer *pdbg_buff, u32 size,
+int cudbg_get_buff(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pdbg_buff, u32 size,
struct cudbg_buffer *pin_buff)
{
u32 offset;
@@ -28,17 +29,30 @@ int cudbg_get_buff(struct cudbg_buffer *pdbg_buff, u32 size,
if (offset + size > pdbg_buff->size)
return CUDBG_STATUS_NO_MEM;
+ if (pdbg_init->compress_type != CUDBG_COMPRESSION_NONE) {
+ if (size > pdbg_init->compress_buff_size)
+ return CUDBG_STATUS_NO_MEM;
+
+ pin_buff->data = (char *)pdbg_init->compress_buff;
+ pin_buff->offset = 0;
+ pin_buff->size = size;
+ return 0;
+ }
+
pin_buff->data = (char *)pdbg_buff->data + offset;
pin_buff->offset = offset;
pin_buff->size = size;
- pdbg_buff->size -= size;
return 0;
}
-void cudbg_put_buff(struct cudbg_buffer *pin_buff,
- struct cudbg_buffer *pdbg_buff)
+void cudbg_put_buff(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pin_buff)
{
- pdbg_buff->size += pin_buff->size;
+ /* Clear compression buffer for re-use */
+ if (pdbg_init->compress_type != CUDBG_COMPRESSION_NONE)
+ memset(pdbg_init->compress_buff, 0,
+ pdbg_init->compress_buff_size);
+
pin_buff->data = NULL;
pin_buff->offset = 0;
pin_buff->size = 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
index 88e740082a02..8568a51f6414 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
@@ -87,6 +87,10 @@ struct cudbg_init {
struct adapter *adap; /* Pointer to adapter structure */
void *outbuf; /* Output buffer */
u32 outbuf_size; /* Output buffer size */
+ u8 compress_type; /* Type of compression to use */
+ void *compress_buff; /* Compression buffer */
+ u32 compress_buff_size; /* Compression buffer size */
+ void *workspace; /* Workspace for zlib */
};
static inline unsigned int cudbg_mbytes_to_bytes(unsigned int size)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index 0a3871f10787..8b95117c2923 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -23,12 +23,57 @@
#include "cudbg_lib_common.h"
#include "cudbg_entity.h"
#include "cudbg_lib.h"
+#include "cudbg_zlib.h"
-static void cudbg_write_and_release_buff(struct cudbg_buffer *pin_buff,
- struct cudbg_buffer *dbg_buff)
+static int cudbg_do_compression(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pin_buff,
+ struct cudbg_buffer *dbg_buff)
{
- cudbg_update_buff(pin_buff, dbg_buff);
- cudbg_put_buff(pin_buff, dbg_buff);
+ struct cudbg_buffer temp_in_buff = { 0 };
+ int bytes_left, bytes_read, bytes;
+ u32 offset = dbg_buff->offset;
+ int rc;
+
+ temp_in_buff.offset = pin_buff->offset;
+ temp_in_buff.data = pin_buff->data;
+ temp_in_buff.size = pin_buff->size;
+
+ bytes_left = pin_buff->size;
+ bytes_read = 0;
+ while (bytes_left > 0) {
+ /* Do compression in smaller chunks */
+ bytes = min_t(unsigned long, bytes_left,
+ (unsigned long)CUDBG_CHUNK_SIZE);
+ temp_in_buff.data = (char *)pin_buff->data + bytes_read;
+ temp_in_buff.size = bytes;
+ rc = cudbg_compress_buff(pdbg_init, &temp_in_buff, dbg_buff);
+ if (rc)
+ return rc;
+ bytes_left -= bytes;
+ bytes_read += bytes;
+ }
+
+ pin_buff->size = dbg_buff->offset - offset;
+ return 0;
+}
+
+static int cudbg_write_and_release_buff(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pin_buff,
+ struct cudbg_buffer *dbg_buff)
+{
+ int rc = 0;
+
+ if (pdbg_init->compress_type == CUDBG_COMPRESSION_NONE) {
+ cudbg_update_buff(pin_buff, dbg_buff);
+ } else {
+ rc = cudbg_do_compression(pdbg_init, pin_buff, dbg_buff);
+ if (rc)
+ goto out;
+ }
+
+out:
+ cudbg_put_buff(pdbg_init, pin_buff);
+ return rc;
}
static int is_fw_attached(struct cudbg_init *pdbg_init)
@@ -371,12 +416,11 @@ int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init,
else if (is_t5(padap->params.chip) || is_t6(padap->params.chip))
buf_size = T5_REGMAP_SIZE;
- rc = cudbg_get_buff(dbg_buff, buf_size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, buf_size, &temp_buff);
if (rc)
return rc;
t4_get_regs(padap, (void *)temp_buff.data, temp_buff.size);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
@@ -395,7 +439,7 @@ int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
}
dparams = &padap->params.devlog;
- rc = cudbg_get_buff(dbg_buff, dparams->size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, dparams->size, &temp_buff);
if (rc)
return rc;
@@ -410,12 +454,11 @@ int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
spin_unlock(&padap->win0_lock);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
@@ -436,14 +479,14 @@ int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
}
size += sizeof(cfg);
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
rc = t4_cim_read(padap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
@@ -453,11 +496,10 @@ int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
NULL);
if (rc < 0) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init,
@@ -469,7 +511,7 @@ int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init,
int size, rc;
size = 2 * CIM_MALA_SIZE * 5 * sizeof(u32);
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -477,8 +519,7 @@ int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init,
(u32 *)temp_buff.data,
(u32 *)((char *)temp_buff.data +
5 * CIM_MALA_SIZE));
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
@@ -490,7 +531,7 @@ int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
struct cudbg_cim_qcfg *cim_qcfg_data;
int rc;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_cim_qcfg),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_cim_qcfg),
&temp_buff);
if (rc)
return rc;
@@ -501,7 +542,7 @@ int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
@@ -510,14 +551,13 @@ int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
cim_qcfg_data->obq_wr);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
t4_read_cimq_cfg(padap, cim_qcfg_data->base, cim_qcfg_data->size,
cim_qcfg_data->thres);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init,
@@ -531,7 +571,7 @@ static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init,
/* collect CIM IBQ */
qsize = CIM_IBQ_SIZE * 4 * sizeof(u32);
- rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, qsize, &temp_buff);
if (rc)
return rc;
@@ -545,11 +585,10 @@ static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init,
else
rc = no_of_read_words;
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
@@ -616,7 +655,7 @@ static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init,
/* collect CIM OBQ */
qsize = cudbg_cim_obq_size(padap, qid);
- rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, qsize, &temp_buff);
if (rc)
return rc;
@@ -630,11 +669,10 @@ static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init,
else
rc = no_of_read_words;
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_cim_obq_ulp0(struct cudbg_init *pdbg_init,
@@ -887,7 +925,7 @@ static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
bytes = min_t(unsigned long, bytes_left,
(unsigned long)CUDBG_CHUNK_SIZE);
- rc = cudbg_get_buff(dbg_buff, bytes, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, bytes, &temp_buff);
if (rc)
return rc;
@@ -906,14 +944,19 @@ static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
spin_unlock(&padap->win0_lock);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
skip_read:
bytes_left -= bytes;
bytes_read += bytes;
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ rc = cudbg_write_and_release_buff(pdbg_init, &temp_buff,
+ dbg_buff);
+ if (rc) {
+ cudbg_put_buff(pdbg_init, &temp_buff);
+ return rc;
+ }
}
return rc;
}
@@ -1007,18 +1050,18 @@ int cudbg_collect_rss(struct cudbg_init *pdbg_init,
int rc, nentries;
nentries = t4_chip_rss_size(padap);
- rc = cudbg_get_buff(dbg_buff, nentries * sizeof(u16), &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, nentries * sizeof(u16),
+ &temp_buff);
if (rc)
return rc;
rc = t4_read_rss(padap, (u16 *)temp_buff.data);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init,
@@ -1031,7 +1074,7 @@ int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init,
int vf, rc, vf_count;
vf_count = padap->params.arch.vfcount;
- rc = cudbg_get_buff(dbg_buff,
+ rc = cudbg_get_buff(pdbg_init, dbg_buff,
vf_count * sizeof(struct cudbg_rss_vf_conf),
&temp_buff);
if (rc)
@@ -1041,8 +1084,7 @@ int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init,
for (vf = 0; vf < vf_count; vf++)
t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl,
&vfconf[vf].rss_vf_vfh, true);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_path_mtu(struct cudbg_init *pdbg_init,
@@ -1053,13 +1095,13 @@ int cudbg_collect_path_mtu(struct cudbg_init *pdbg_init,
struct cudbg_buffer temp_buff = { 0 };
int rc;
- rc = cudbg_get_buff(dbg_buff, NMTUS * sizeof(u16), &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, NMTUS * sizeof(u16),
+ &temp_buff);
if (rc)
return rc;
t4_read_mtu_tbl(padap, (u16 *)temp_buff.data, NULL);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init,
@@ -1071,7 +1113,7 @@ int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init,
struct cudbg_pm_stats *pm_stats_buff;
int rc;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_pm_stats),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_pm_stats),
&temp_buff);
if (rc)
return rc;
@@ -1079,8 +1121,7 @@ int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init,
pm_stats_buff = (struct cudbg_pm_stats *)temp_buff.data;
t4_pmtx_get_stats(padap, pm_stats_buff->tx_cnt, pm_stats_buff->tx_cyc);
t4_pmrx_get_stats(padap, pm_stats_buff->rx_cnt, pm_stats_buff->rx_cyc);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init,
@@ -1095,7 +1136,7 @@ int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init,
if (!padap->params.vpd.cclk)
return CUDBG_STATUS_CCLK_NOT_DEFINED;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_hw_sched),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_hw_sched),
&temp_buff);
hw_sched_buff = (struct cudbg_hw_sched *)temp_buff.data;
hw_sched_buff->map = t4_read_reg(padap, TP_TX_MOD_QUEUE_REQ_MAP_A);
@@ -1104,8 +1145,7 @@ int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init,
for (i = 0; i < NTX_SCHED; ++i)
t4_get_tx_sched(padap, i, &hw_sched_buff->kbps[i],
&hw_sched_buff->ipg[i], true);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
@@ -1129,7 +1169,7 @@ int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
n = n / (IREG_NUM_ELEM * sizeof(u32));
size = sizeof(struct ireg_buf) * n;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -1218,8 +1258,7 @@ int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
tp_pio->ireg_local_offset, true);
ch_tp_pio++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
@@ -1231,7 +1270,8 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
struct ireg_buf *ch_sge_dbg;
int i, rc;
- rc = cudbg_get_buff(dbg_buff, sizeof(*ch_sge_dbg) * 2, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(*ch_sge_dbg) * 2,
+ &temp_buff);
if (rc)
return rc;
@@ -1252,8 +1292,7 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
sge_pio->ireg_local_offset);
ch_sge_dbg++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
@@ -1265,7 +1304,7 @@ int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
struct cudbg_ulprx_la *ulprx_la_buff;
int rc;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulprx_la),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_ulprx_la),
&temp_buff);
if (rc)
return rc;
@@ -1273,8 +1312,7 @@ int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
ulprx_la_buff = (struct cudbg_ulprx_la *)temp_buff.data;
t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data);
ulprx_la_buff->size = ULPRX_LA_SIZE;
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
@@ -1287,15 +1325,14 @@ int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
int size, rc;
size = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64);
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
tp_la_buff = (struct cudbg_tp_la *)temp_buff.data;
tp_la_buff->mode = DBGLAMODE_G(t4_read_reg(padap, TP_DBG_LA_CONFIG_A));
t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_meminfo(struct cudbg_init *pdbg_init,
@@ -1307,7 +1344,8 @@ int cudbg_collect_meminfo(struct cudbg_init *pdbg_init,
struct cudbg_meminfo *meminfo_buff;
int rc;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_meminfo), &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_meminfo),
+ &temp_buff);
if (rc)
return rc;
@@ -1315,12 +1353,11 @@ int cudbg_collect_meminfo(struct cudbg_init *pdbg_init,
rc = cudbg_fill_meminfo(padap, meminfo_buff);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
@@ -1334,7 +1371,7 @@ int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
size = sizeof(struct cudbg_cim_pif_la) +
2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -1343,8 +1380,7 @@ int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data,
(u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE,
NULL, NULL);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_clk_info(struct cudbg_init *pdbg_init,
@@ -1360,7 +1396,7 @@ int cudbg_collect_clk_info(struct cudbg_init *pdbg_init,
if (!padap->params.vpd.cclk)
return CUDBG_STATUS_CCLK_NOT_DEFINED;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_clk_info),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_clk_info),
&temp_buff);
if (rc)
return rc;
@@ -1392,8 +1428,7 @@ int cudbg_collect_clk_info(struct cudbg_init *pdbg_init,
clk_info_buff->finwait2_timer =
tp_tick_us * t4_read_reg(padap, TP_FINWAIT2_TIMER_A);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
@@ -1408,7 +1443,7 @@ int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
size = sizeof(struct ireg_buf) * n * 2;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -1449,8 +1484,7 @@ int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
pcie_pio->ireg_local_offset);
ch_pcie++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
@@ -1465,7 +1499,7 @@ int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32));
size = sizeof(struct ireg_buf) * n * 2;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -1506,8 +1540,7 @@ int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
pm_pio->ireg_local_offset);
ch_pm++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_tid(struct cudbg_init *pdbg_init,
@@ -1521,7 +1554,8 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init,
u32 para[2], val[2];
int rc;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_tid_info_region_rev1),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff,
+ sizeof(struct cudbg_tid_info_region_rev1),
&temp_buff);
if (rc)
return rc;
@@ -1544,7 +1578,7 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init,
rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2, para, val);
if (rc < 0) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
tid->uotid_base = val[0];
@@ -1563,7 +1597,7 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init,
para, val);
if (rc < 0) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
tid->hpftid_base = val[0];
@@ -1591,8 +1625,7 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init,
#undef FW_PARAM_PFVF_A
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_pcie_config(struct cudbg_init *pdbg_init,
@@ -1606,7 +1639,7 @@ int cudbg_collect_pcie_config(struct cudbg_init *pdbg_init,
size = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS;
n = sizeof(t5_pcie_config_array) / (2 * sizeof(u32));
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -1618,8 +1651,7 @@ int cudbg_collect_pcie_config(struct cudbg_init *pdbg_init,
value++;
}
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
static int cudbg_sge_ctxt_check_valid(u32 *buf, int type)
@@ -1799,7 +1831,7 @@ int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
return CUDBG_STATUS_ENTITY_NOT_FOUND;
size = rc;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -1813,7 +1845,7 @@ int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
ctx_buf = kvzalloc(max_ctx_size, GFP_KERNEL);
if (!ctx_buf) {
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return -ENOMEM;
}
@@ -1876,8 +1908,7 @@ int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
*/
cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, CTXT_FLM, &buff);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
@@ -2038,7 +2069,7 @@ int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init,
n = padap->params.arch.mps_tcam_size;
size = sizeof(struct cudbg_mps_tcam) * n;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -2047,7 +2078,7 @@ int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init,
rc = cudbg_collect_tcam_index(padap, tcam, i);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
total_size += sizeof(struct cudbg_mps_tcam);
@@ -2057,11 +2088,10 @@ int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init,
if (!total_size) {
rc = CUDBG_SYSTEM_ERROR;
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
@@ -2112,7 +2142,7 @@ int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
if (rc)
return rc;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_vpd_data),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_vpd_data),
&temp_buff);
if (rc)
return rc;
@@ -2128,8 +2158,7 @@ int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
vpd_data->fw_minor = FW_HDR_FW_VER_MINOR_G(fw_vers);
vpd_data->fw_micro = FW_HDR_FW_VER_MICRO_G(fw_vers);
vpd_data->fw_build = FW_HDR_FW_VER_BUILD_G(fw_vers);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
static int cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid,
@@ -2280,7 +2309,7 @@ int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init,
size = sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
size += sizeof(struct cudbg_tcam);
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -2292,7 +2321,7 @@ int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init,
rc = cudbg_read_tid(pdbg_init, i, tid_data);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
@@ -2303,8 +2332,7 @@ int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init,
bytes += sizeof(struct cudbg_tid_data);
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_cctrl(struct cudbg_init *pdbg_init,
@@ -2317,13 +2345,12 @@ int cudbg_collect_cctrl(struct cudbg_init *pdbg_init,
int rc;
size = sizeof(u16) * NMTUS * NCCTRL_WIN;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
t4_read_cong_tbl(padap, (void *)temp_buff.data);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
@@ -2341,7 +2368,7 @@ int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
n = sizeof(t6_ma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
size = sizeof(struct ireg_buf) * n * 2;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -2377,8 +2404,7 @@ int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
}
ma_indr++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
@@ -2391,7 +2417,7 @@ int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
u32 i, j;
int rc;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulptx_la),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_ulptx_la),
&temp_buff);
if (rc)
return rc;
@@ -2412,8 +2438,7 @@ int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
t4_read_reg(padap,
ULP_TX_LA_RDDATA_0_A + 0x10 * i);
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
@@ -2438,7 +2463,7 @@ int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
return CUDBG_STATUS_NOT_IMPLEMENTED;
size = sizeof(struct ireg_buf) * n;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -2488,14 +2513,13 @@ int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
up_cim_reg->ireg_local_offset +
(j * local_offset), local_range, buff);
if (rc) {
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
}
up_cim++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
@@ -2508,7 +2532,8 @@ int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
int i, rc;
u32 addr;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_pbt_tables),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff,
+ sizeof(struct cudbg_pbt_tables),
&temp_buff);
if (rc)
return rc;
@@ -2521,7 +2546,7 @@ int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
&pbt->pbt_dynamic[i]);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
}
@@ -2534,7 +2559,7 @@ int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
&pbt->pbt_static[i]);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
}
@@ -2546,7 +2571,7 @@ int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
&pbt->lrf_table[i]);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
}
@@ -2558,12 +2583,11 @@ int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
&pbt->pbt_data[i]);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
@@ -2584,7 +2608,7 @@ int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
log = padap->mbox_log;
mbox_cmds = padap->mbox_log->size;
size = sizeof(struct cudbg_mbox_log) * mbox_cmds;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -2607,8 +2631,7 @@ int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
}
mboxlog++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
@@ -2626,7 +2649,7 @@ int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
n = sizeof(t6_hma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
size = sizeof(struct ireg_buf) * n;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -2644,6 +2667,5 @@ int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
hma_fli->ireg_local_offset);
hma_indr++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h
index 24b33f28e548..8150ea85d6a5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h
@@ -26,6 +26,7 @@ enum cudbg_dump_type {
enum cudbg_compression_type {
CUDBG_COMPRESSION_NONE = 1,
+ CUDBG_COMPRESSION_ZLIB,
};
struct cudbg_hdr {
@@ -78,10 +79,11 @@ struct cudbg_error {
#define CDUMP_MAX_COMP_BUF_SIZE ((64 * 1024) - 1)
#define CUDBG_CHUNK_SIZE ((CDUMP_MAX_COMP_BUF_SIZE / 1024) * 1024)
-int cudbg_get_buff(struct cudbg_buffer *pdbg_buff, u32 size,
+int cudbg_get_buff(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pdbg_buff, u32 size,
struct cudbg_buffer *pin_buff);
-void cudbg_put_buff(struct cudbg_buffer *pin_buff,
- struct cudbg_buffer *pdbg_buff);
+void cudbg_put_buff(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pin_buff);
void cudbg_update_buff(struct cudbg_buffer *pin_buff,
struct cudbg_buffer *pout_buff);
#endif /* __CUDBG_LIB_COMMON_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.c
new file mode 100644
index 000000000000..4c3854cbeb6c
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2018 Chelsio Communications. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include <linux/zlib.h>
+
+#include "cxgb4.h"
+#include "cudbg_if.h"
+#include "cudbg_lib_common.h"
+#include "cudbg_zlib.h"
+
+static int cudbg_get_compress_hdr(struct cudbg_buffer *pdbg_buff,
+ struct cudbg_buffer *pin_buff)
+{
+ if (pdbg_buff->offset + sizeof(struct cudbg_compress_hdr) >
+ pdbg_buff->size)
+ return CUDBG_STATUS_NO_MEM;
+
+ pin_buff->data = (char *)pdbg_buff->data + pdbg_buff->offset;
+ pin_buff->offset = 0;
+ pin_buff->size = sizeof(struct cudbg_compress_hdr);
+ pdbg_buff->offset += sizeof(struct cudbg_compress_hdr);
+ return 0;
+}
+
+int cudbg_compress_buff(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pin_buff,
+ struct cudbg_buffer *pout_buff)
+{
+ struct z_stream_s compress_stream = { 0 };
+ struct cudbg_buffer temp_buff = { 0 };
+ struct cudbg_compress_hdr *c_hdr;
+ int rc;
+
+ /* Write compression header to output buffer before compression */
+ rc = cudbg_get_compress_hdr(pout_buff, &temp_buff);
+ if (rc)
+ return rc;
+
+ c_hdr = (struct cudbg_compress_hdr *)temp_buff.data;
+ c_hdr->compress_id = CUDBG_ZLIB_COMPRESS_ID;
+
+ compress_stream.workspace = pdbg_init->workspace;
+ rc = zlib_deflateInit2(&compress_stream, Z_DEFAULT_COMPRESSION,
+ Z_DEFLATED, CUDBG_ZLIB_WIN_BITS,
+ CUDBG_ZLIB_MEM_LVL, Z_DEFAULT_STRATEGY);
+ if (rc != Z_OK)
+ return CUDBG_SYSTEM_ERROR;
+
+ compress_stream.next_in = pin_buff->data;
+ compress_stream.avail_in = pin_buff->size;
+ compress_stream.next_out = pout_buff->data + pout_buff->offset;
+ compress_stream.avail_out = pout_buff->size - pout_buff->offset;
+
+ rc = zlib_deflate(&compress_stream, Z_FINISH);
+ if (rc != Z_STREAM_END)
+ return CUDBG_SYSTEM_ERROR;
+
+ rc = zlib_deflateEnd(&compress_stream);
+ if (rc != Z_OK)
+ return CUDBG_SYSTEM_ERROR;
+
+ c_hdr->compress_size = compress_stream.total_out;
+ c_hdr->decompress_size = pin_buff->size;
+ pout_buff->offset += compress_stream.total_out;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.h
new file mode 100644
index 000000000000..9d55c4c38c84
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2018 Chelsio Communications. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#ifndef __CUDBG_ZLIB_H__
+#define __CUDBG_ZLIB_H__
+
+#include <linux/zlib.h>
+
+#define CUDBG_ZLIB_COMPRESS_ID 17
+#define CUDBG_ZLIB_WIN_BITS 12
+#define CUDBG_ZLIB_MEM_LVL 4
+
+struct cudbg_compress_hdr {
+ u32 compress_id;
+ u64 decompress_size;
+ u64 compress_size;
+ u64 rsvd[32];
+};
+
+static inline int cudbg_get_workspace_size(void)
+{
+#ifdef CONFIG_ZLIB_DEFLATE
+ return zlib_deflate_workspacesize(CUDBG_ZLIB_WIN_BITS,
+ CUDBG_ZLIB_MEM_LVL);
+#else
+ return 0;
+#endif /* CONFIG_ZLIB_DEFLATE */
+}
+
+#ifndef CONFIG_ZLIB_DEFLATE
+static inline int cudbg_compress_buff(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pin_buff,
+ struct cudbg_buffer *pout_buff)
+{
+ return 0;
+}
+#else
+int cudbg_compress_buff(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pin_buff,
+ struct cudbg_buffer *pout_buff);
+#endif /* CONFIG_ZLIB_DEFLATE */
+#endif /* __CUDBG_ZLIB_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 1ff71825868c..f05b58f74c7a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -312,6 +312,7 @@ struct vpd_params {
};
struct pci_params {
+ unsigned int vpd_cap_addr;
unsigned char speed;
unsigned char width;
};
@@ -825,6 +826,10 @@ struct mbox_list {
struct list_head list;
};
+struct mps_encap_entry {
+ atomic_t refcnt;
+};
+
struct adapter {
void __iomem *regs;
void __iomem *bar2;
@@ -839,6 +844,8 @@ struct adapter {
enum chip_type chip;
int msg_enable;
+ __be16 vxlan_port;
+ u8 vxlan_port_cnt;
struct adapter_params params;
struct cxgb4_virt_res vres;
@@ -868,7 +875,10 @@ struct adapter {
unsigned int clipt_start;
unsigned int clipt_end;
struct clip_tbl *clipt;
+ unsigned int rawf_start;
+ unsigned int rawf_cnt;
struct smt_data *smt;
+ struct mps_encap_entry *mps_encap;
struct cxgb4_uld_info *uld;
void *uld_handle[CXGB4_ULD_MAX];
unsigned int num_uld;
@@ -1305,6 +1315,7 @@ void t4_sge_start(struct adapter *adap);
void t4_sge_stop(struct adapter *adap);
void cxgb4_set_ethtool_ops(struct net_device *netdev);
int cxgb4_write_rss(const struct port_info *pi, const u16 *queues);
+enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb);
extern int dbfifo_int_thresh;
#define for_each_port(adapter, iter) \
@@ -1637,6 +1648,12 @@ int t4_free_vi(struct adapter *adap, unsigned int mbox,
int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
int mtu, int promisc, int all_multi, int bcast, int vlanex,
bool sleep_ok);
+int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask, unsigned int idx,
+ u8 lookup_type, u8 port_id, bool sleep_ok);
+int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask, unsigned int idx,
+ u8 lookup_type, u8 port_id, bool sleep_ok);
int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
unsigned int viid, bool free, unsigned int naddr,
const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
index a2d6c8a69c52..9e0a8a8186c4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
@@ -18,6 +18,7 @@
#include "t4_regs.h"
#include "cxgb4.h"
#include "cxgb4_cudbg.h"
+#include "cudbg_zlib.h"
static const struct cxgb4_collect_entity cxgb4_collect_mem_dump[] = {
{ CUDBG_EDC0, cudbg_collect_edc0_meminfo },
@@ -318,6 +319,7 @@ u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag)
{
u32 i, entity;
u32 len = 0;
+ u32 wsize;
if (flag & CXGB4_ETH_DUMP_HW) {
for (i = 0; i < ARRAY_SIZE(cxgb4_collect_hw_dump); i++) {
@@ -333,6 +335,11 @@ u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag)
}
}
+ /* If compression is enabled, a smaller destination buffer is enough */
+ wsize = cudbg_get_workspace_size();
+ if (wsize && len > CUDBG_DUMP_BUFF_SIZE)
+ len = CUDBG_DUMP_BUFF_SIZE;
+
return len;
}
@@ -341,22 +348,14 @@ static void cxgb4_cudbg_collect_entity(struct cudbg_init *pdbg_init,
const struct cxgb4_collect_entity *e_arr,
u32 arr_size, void *buf, u32 *tot_size)
{
- struct adapter *adap = pdbg_init->adap;
struct cudbg_error cudbg_err = { 0 };
struct cudbg_entity_hdr *entity_hdr;
- u32 entity_size, i;
- u32 total_size = 0;
+ u32 i, total_size = 0;
int ret;
for (i = 0; i < arr_size; i++) {
const struct cxgb4_collect_entity *e = &e_arr[i];
- /* Skip entities that won't fit in output buffer */
- entity_size = cxgb4_get_entity_length(adap, e->entity);
- if (entity_size >
- pdbg_init->outbuf_size - *tot_size - total_size)
- continue;
-
entity_hdr = cudbg_get_entity_hdr(buf, e->entity);
entity_hdr->entity_type = e->entity;
entity_hdr->start_offset = dbg_buff->offset;
@@ -382,6 +381,28 @@ static void cxgb4_cudbg_collect_entity(struct cudbg_init *pdbg_init,
*tot_size += total_size;
}
+static int cudbg_alloc_compress_buff(struct cudbg_init *pdbg_init)
+{
+ u32 workspace_size;
+
+ workspace_size = cudbg_get_workspace_size();
+ pdbg_init->compress_buff = vzalloc(CUDBG_COMPRESS_BUFF_SIZE +
+ workspace_size);
+ if (!pdbg_init->compress_buff)
+ return -ENOMEM;
+
+ pdbg_init->compress_buff_size = CUDBG_COMPRESS_BUFF_SIZE;
+ pdbg_init->workspace = (u8 *)pdbg_init->compress_buff +
+ CUDBG_COMPRESS_BUFF_SIZE - workspace_size;
+ return 0;
+}
+
+static void cudbg_free_compress_buff(struct cudbg_init *pdbg_init)
+{
+ if (pdbg_init->compress_buff)
+ vfree(pdbg_init->compress_buff);
+}
+
int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
u32 flag)
{
@@ -389,6 +410,7 @@ int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
struct cudbg_buffer dbg_buff = { 0 };
u32 size, min_size, total_size = 0;
struct cudbg_hdr *cudbg_hdr;
+ int rc;
size = *buf_size;
@@ -408,7 +430,6 @@ int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
cudbg_hdr->max_entities = CUDBG_MAX_ENTITY;
cudbg_hdr->chip_ver = adap->params.chip;
cudbg_hdr->dump_type = CUDBG_DUMP_TYPE_MINI;
- cudbg_hdr->compress_type = CUDBG_COMPRESSION_NONE;
min_size = sizeof(struct cudbg_hdr) +
sizeof(struct cudbg_entity_hdr) *
@@ -416,6 +437,24 @@ int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
if (size < min_size)
return -ENOMEM;
+ rc = cudbg_get_workspace_size();
+ if (rc) {
+ /* Zlib available. So, use zlib deflate */
+ cudbg_init.compress_type = CUDBG_COMPRESSION_ZLIB;
+ rc = cudbg_alloc_compress_buff(&cudbg_init);
+ if (rc) {
+ /* Ignore error and continue without compression. */
+ dev_warn(adap->pdev_dev,
+ "Fail allocating compression buffer ret: %d. Continuing without compression.\n",
+ rc);
+ cudbg_init.compress_type = CUDBG_COMPRESSION_NONE;
+ rc = 0;
+ }
+ } else {
+ cudbg_init.compress_type = CUDBG_COMPRESSION_NONE;
+ }
+
+ cudbg_hdr->compress_type = cudbg_init.compress_type;
dbg_buff.offset += min_size;
total_size = dbg_buff.offset;
@@ -433,8 +472,12 @@ int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
buf,
&total_size);
+ cudbg_free_compress_buff(&cudbg_init);
cudbg_hdr->data_len = total_size;
- *buf_size = total_size;
+ if (cudbg_init.compress_type != CUDBG_COMPRESSION_NONE)
+ *buf_size = size;
+ else
+ *buf_size = total_size;
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h
index 7ceeb0bc9fa8..ce1ac9a1c878 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h
@@ -23,6 +23,9 @@
#include "cudbg_entity.h"
#include "cudbg_lib.h"
+#define CUDBG_DUMP_BUFF_SIZE (32 * 1024 * 1024) /* 32 MB */
+#define CUDBG_COMPRESS_BUFF_SIZE (4 * 1024 * 1024) /* 4 MB */
+
typedef int (*cudbg_collect_callback_t)(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 677a3ba83c1f..3177b0c9bd2d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -439,19 +439,32 @@ int cxgb4_get_free_ftid(struct net_device *dev, int family)
if (ftid >= t->nftids)
ftid = -1;
} else {
- ftid = bitmap_find_free_region(t->ftid_bmap, t->nftids, 2);
- if (ftid < 0)
- goto out_unlock;
+ if (is_t6(adap->params.chip)) {
+ ftid = bitmap_find_free_region(t->ftid_bmap,
+ t->nftids, 1);
+ if (ftid < 0)
+ goto out_unlock;
+
+ /* this is only a lookup, keep the found region
+ * unallocated
+ */
+ bitmap_release_region(t->ftid_bmap, ftid, 1);
+ } else {
+ ftid = bitmap_find_free_region(t->ftid_bmap,
+ t->nftids, 2);
+ if (ftid < 0)
+ goto out_unlock;
- /* this is only a lookup, keep the found region unallocated */
- bitmap_release_region(t->ftid_bmap, ftid, 2);
+ bitmap_release_region(t->ftid_bmap, ftid, 2);
+ }
}
out_unlock:
spin_unlock_bh(&t->ftid_lock);
return ftid;
}
-static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family)
+static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family,
+ unsigned int chip_ver)
{
spin_lock_bh(&t->ftid_lock);
@@ -460,22 +473,31 @@ static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family)
return -EBUSY;
}
- if (family == PF_INET)
+ if (family == PF_INET) {
__set_bit(fidx, t->ftid_bmap);
- else
- bitmap_allocate_region(t->ftid_bmap, fidx, 2);
+ } else {
+ if (chip_ver < CHELSIO_T6)
+ bitmap_allocate_region(t->ftid_bmap, fidx, 2);
+ else
+ bitmap_allocate_region(t->ftid_bmap, fidx, 1);
+ }
spin_unlock_bh(&t->ftid_lock);
return 0;
}
-static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family)
+static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family,
+ unsigned int chip_ver)
{
spin_lock_bh(&t->ftid_lock);
- if (family == PF_INET)
+ if (family == PF_INET) {
__clear_bit(fidx, t->ftid_bmap);
- else
- bitmap_release_region(t->ftid_bmap, fidx, 2);
+ } else {
+ if (chip_ver < CHELSIO_T6)
+ bitmap_release_region(t->ftid_bmap, fidx, 2);
+ else
+ bitmap_release_region(t->ftid_bmap, fidx, 1);
+ }
spin_unlock_bh(&t->ftid_lock);
}
@@ -1249,23 +1271,42 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
}
}
} else { /* IPv6 */
- /* Ensure that the IPv6 filter is aligned on a
- * multiple of 4 boundary.
- */
- if (filter_id & 0x3) {
- dev_err(adapter->pdev_dev,
- "Invalid location. IPv6 must be aligned on a 4-slot boundary\n");
- return -EINVAL;
- }
+ if (chip_ver < CHELSIO_T6) {
+ /* Ensure that the IPv6 filter is aligned on a
+ * multiple of 4 boundary.
+ */
+ if (filter_id & 0x3) {
+ dev_err(adapter->pdev_dev,
+ "Invalid location. IPv6 must be aligned on a 4-slot boundary\n");
+ return -EINVAL;
+ }
- /* Check all except the base overlapping IPv4 filter slots. */
- for (fidx = filter_id + 1; fidx < filter_id + 4; fidx++) {
+ /* Check all except the base overlapping IPv4 filter
+ * slots.
+ */
+ for (fidx = filter_id + 1; fidx < filter_id + 4;
+ fidx++) {
+ f = &adapter->tids.ftid_tab[fidx];
+ if (f->valid) {
+ dev_err(adapter->pdev_dev,
+ "Invalid location. IPv6 requires 4 slots and an IPv4 filter exists at %u\n",
+ fidx);
+ return -EBUSY;
+ }
+ }
+ } else {
+ /* For T6, CLIP being enabled, IPv6 filter would occupy
+ * 2 entries.
+ */
+ if (filter_id & 0x1)
+ return -EINVAL;
+ /* Check overlapping IPv4 filter slot */
+ fidx = filter_id + 1;
f = &adapter->tids.ftid_tab[fidx];
if (f->valid) {
- dev_err(adapter->pdev_dev,
- "Invalid location. IPv6 requires 4 slots and an IPv4 filter exists at %u\n",
- fidx);
- return -EINVAL;
+ pr_err("%s: IPv6 filter requires 2 indices. IPv4 filter already present at %d. Please remove IPv4 filter first.\n",
+ __func__, fidx);
+ return -EBUSY;
}
}
}
@@ -1279,16 +1320,18 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
fidx = filter_id + adapter->tids.ftid_base;
ret = cxgb4_set_ftid(&adapter->tids, filter_id,
- fs->type ? PF_INET6 : PF_INET);
+ fs->type ? PF_INET6 : PF_INET,
+ chip_ver);
if (ret)
return ret;
- /* Check to make sure the filter requested is writable ... */
+ /* Check t make sure the filter requested is writable ... */
ret = writable_filter(f);
if (ret) {
/* Clear the bits we have set above */
cxgb4_clear_ftid(&adapter->tids, filter_id,
- fs->type ? PF_INET6 : PF_INET);
+ fs->type ? PF_INET6 : PF_INET,
+ chip_ver);
return ret;
}
@@ -1303,7 +1346,8 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
IPV6_ADDR_ANY) {
ret = cxgb4_clip_get(dev, (const u32 *)&fs->val.lip, 1);
if (ret) {
- cxgb4_clear_ftid(&adapter->tids, filter_id, PF_INET6);
+ cxgb4_clear_ftid(&adapter->tids, filter_id, PF_INET6,
+ chip_ver);
return ret;
}
}
@@ -1333,7 +1377,8 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
ret = set_filter_wr(adapter, filter_id);
if (ret) {
cxgb4_clear_ftid(&adapter->tids, filter_id,
- fs->type ? PF_INET6 : PF_INET);
+ fs->type ? PF_INET6 : PF_INET,
+ chip_ver);
clear_filter(adapter, f);
}
@@ -1411,6 +1456,7 @@ int __cxgb4_del_filter(struct net_device *dev, int filter_id,
struct filter_ctx *ctx)
{
struct adapter *adapter = netdev2adap(dev);
+ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
struct filter_entry *f;
unsigned int max_fidx;
int ret;
@@ -1436,7 +1482,8 @@ int __cxgb4_del_filter(struct net_device *dev, int filter_id,
if (f->valid) {
f->ctx = ctx;
cxgb4_clear_ftid(&adapter->tids, filter_id,
- f->fs.type ? PF_INET6 : PF_INET);
+ f->fs.type ? PF_INET6 : PF_INET,
+ chip_ver);
return del_filter_wr(adapter, filter_id);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 87ac1e4dafc1..11fe5961040a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -65,6 +65,7 @@
#include <net/addrconf.h>
#include <linux/uaccess.h>
#include <linux/crash_dump.h>
+#include <net/udp_tunnel.h>
#include "cxgb4.h"
#include "cxgb4_filter.h"
@@ -101,7 +102,9 @@ const char cxgb4_driver_version[] = DRV_VERSION;
*/
#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
static const struct pci_device_id cxgb4_pci_tbl[] = {
-#define CH_PCI_DEVICE_ID_FUNCTION 0x4
+#define CXGB4_UNIFIED_PF 0x4
+
+#define CH_PCI_DEVICE_ID_FUNCTION CXGB4_UNIFIED_PF
/* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
* called for both.
@@ -109,7 +112,7 @@ const char cxgb4_driver_version[] = DRV_VERSION;
#define CH_PCI_DEVICE_ID_FUNCTION2 0x0
#define CH_PCI_ID_TABLE_ENTRY(devid) \
- {PCI_VDEVICE(CHELSIO, (devid)), 4}
+ {PCI_VDEVICE(CHELSIO, (devid)), CXGB4_UNIFIED_PF}
#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
{ 0, } \
@@ -2604,7 +2607,7 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
}
#ifdef CONFIG_PCI_IOV
-static int dummy_open(struct net_device *dev)
+static int cxgb4_mgmt_open(struct net_device *dev)
{
/* Turn carrier off since we don't have to transmit anything on this
* interface.
@@ -2614,39 +2617,44 @@ static int dummy_open(struct net_device *dev)
}
/* Fill MAC address that will be assigned by the FW */
-static void fill_vf_station_mac_addr(struct adapter *adap)
+static void cxgb4_mgmt_fill_vf_station_mac_addr(struct adapter *adap)
{
- unsigned int i;
u8 hw_addr[ETH_ALEN], macaddr[ETH_ALEN];
+ unsigned int i, vf, nvfs;
+ u16 a, b;
int err;
u8 *na;
- u16 a, b;
+ adap->params.pci.vpd_cap_addr = pci_find_capability(adap->pdev,
+ PCI_CAP_ID_VPD);
err = t4_get_raw_vpd_params(adap, &adap->params.vpd);
- if (!err) {
- na = adap->params.vpd.na;
- for (i = 0; i < ETH_ALEN; i++)
- hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
- hex2val(na[2 * i + 1]));
- a = (hw_addr[0] << 8) | hw_addr[1];
- b = (hw_addr[1] << 8) | hw_addr[2];
- a ^= b;
- a |= 0x0200; /* locally assigned Ethernet MAC address */
- a &= ~0x0100; /* not a multicast Ethernet MAC address */
- macaddr[0] = a >> 8;
- macaddr[1] = a & 0xff;
-
- for (i = 2; i < 5; i++)
- macaddr[i] = hw_addr[i + 1];
-
- for (i = 0; i < adap->num_vfs; i++) {
- macaddr[5] = adap->pf * 16 + i;
- ether_addr_copy(adap->vfinfo[i].vf_mac_addr, macaddr);
- }
+ if (err)
+ return;
+
+ na = adap->params.vpd.na;
+ for (i = 0; i < ETH_ALEN; i++)
+ hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
+ hex2val(na[2 * i + 1]));
+
+ a = (hw_addr[0] << 8) | hw_addr[1];
+ b = (hw_addr[1] << 8) | hw_addr[2];
+ a ^= b;
+ a |= 0x0200; /* locally assigned Ethernet MAC address */
+ a &= ~0x0100; /* not a multicast Ethernet MAC address */
+ macaddr[0] = a >> 8;
+ macaddr[1] = a & 0xff;
+
+ for (i = 2; i < 5; i++)
+ macaddr[i] = hw_addr[i + 1];
+
+ for (vf = 0, nvfs = pci_sriov_get_totalvfs(adap->pdev);
+ vf < nvfs; vf++) {
+ macaddr[5] = adap->pf * 16 + vf;
+ ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, macaddr);
}
}
-static int cxgb_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
+static int cxgb4_mgmt_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
@@ -2668,8 +2676,8 @@ static int cxgb_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
return ret;
}
-static int cxgb_get_vf_config(struct net_device *dev,
- int vf, struct ifla_vf_info *ivi)
+static int cxgb4_mgmt_get_vf_config(struct net_device *dev,
+ int vf, struct ifla_vf_info *ivi)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
@@ -2683,8 +2691,8 @@ static int cxgb_get_vf_config(struct net_device *dev,
return 0;
}
-static int cxgb_get_phys_port_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid)
+static int cxgb4_mgmt_get_phys_port_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
{
struct port_info *pi = netdev_priv(dev);
unsigned int phy_port_id;
@@ -2695,8 +2703,8 @@ static int cxgb_get_phys_port_id(struct net_device *dev,
return 0;
}
-static int cxgb_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
- int max_tx_rate)
+static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf,
+ int min_tx_rate, int max_tx_rate)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
@@ -2987,6 +2995,151 @@ static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
}
}
+static void cxgb_del_udp_tunnel(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
+{
+ struct port_info *pi = netdev_priv(netdev);
+ struct adapter *adapter = pi->adapter;
+ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
+ u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
+ int ret = 0, i;
+
+ if (chip_ver < CHELSIO_T6)
+ return;
+
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ if (!adapter->vxlan_port_cnt ||
+ adapter->vxlan_port != ti->port)
+ return; /* Invalid VxLAN destination port */
+
+ adapter->vxlan_port_cnt--;
+ if (adapter->vxlan_port_cnt)
+ return;
+
+ adapter->vxlan_port = 0;
+ t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, 0);
+ break;
+ default:
+ return;
+ }
+
+ /* Matchall mac entries can be deleted only after all tunnel ports
+ * are brought down or removed.
+ */
+ if (!adapter->rawf_cnt)
+ return;
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+ ret = t4_free_raw_mac_filt(adapter, pi->viid,
+ match_all_mac, match_all_mac,
+ adapter->rawf_start +
+ pi->port_id,
+ 1, pi->port_id, true);
+ if (ret < 0) {
+ netdev_info(netdev, "Failed to free mac filter entry, for port %d\n",
+ i);
+ return;
+ }
+ atomic_dec(&adapter->mps_encap[adapter->rawf_start +
+ pi->port_id].refcnt);
+ }
+}
+
+static void cxgb_add_udp_tunnel(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
+{
+ struct port_info *pi = netdev_priv(netdev);
+ struct adapter *adapter = pi->adapter;
+ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
+ u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
+ int i, ret;
+
+ if (chip_ver < CHELSIO_T6)
+ return;
+
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ /* For T6 fw reserves last 2 entries for
+ * storing match all mac filter (config file entry).
+ */
+ if (!adapter->rawf_cnt)
+ return;
+
+ /* Callback for adding vxlan port can be called with the same
+ * port for both IPv4 and IPv6. We should not disable the
+ * offloading when the same port for both protocols is added
+ * and later one of them is removed.
+ */
+ if (adapter->vxlan_port_cnt &&
+ adapter->vxlan_port == ti->port) {
+ adapter->vxlan_port_cnt++;
+ return;
+ }
+
+ /* We will support only one VxLAN port */
+ if (adapter->vxlan_port_cnt) {
+ netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n",
+ be16_to_cpu(adapter->vxlan_port),
+ be16_to_cpu(ti->port));
+ return;
+ }
+
+ adapter->vxlan_port = ti->port;
+ adapter->vxlan_port_cnt = 1;
+
+ t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A,
+ VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F);
+ break;
+ default:
+ return;
+ }
+
+ /* Create a 'match all' mac filter entry for inner mac,
+ * if raw mac interface is supported. Once the linux kernel provides
+ * driver entry points for adding/deleting the inner mac addresses,
+ * we will remove this 'match all' entry and fallback to adding
+ * exact match filters.
+ */
+ if (adapter->rawf_cnt) {
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+
+ ret = t4_alloc_raw_mac_filt(adapter, pi->viid,
+ match_all_mac,
+ match_all_mac,
+ adapter->rawf_start +
+ pi->port_id,
+ 1, pi->port_id, true);
+ if (ret < 0) {
+ netdev_info(netdev, "Failed to allocate a mac filter entry, not adding port %d\n",
+ be16_to_cpu(ti->port));
+ cxgb_del_udp_tunnel(netdev, ti);
+ return;
+ }
+ atomic_inc(&adapter->mps_encap[ret].refcnt);
+ }
+ }
+}
+
+static netdev_features_t cxgb_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
+ return features;
+
+ /* Check if hw supports offload for this packet */
+ if (!skb->encapsulation || cxgb_encap_offload_supported(skb))
+ return features;
+
+ /* Offload is not supported for this encapsulated packet */
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+}
+
static netdev_features_t cxgb_fix_features(struct net_device *dev,
netdev_features_t features)
{
@@ -3018,20 +3171,24 @@ static const struct net_device_ops cxgb4_netdev_ops = {
#endif /* CONFIG_CHELSIO_T4_FCOE */
.ndo_set_tx_maxrate = cxgb_set_tx_maxrate,
.ndo_setup_tc = cxgb_setup_tc,
+ .ndo_udp_tunnel_add = cxgb_add_udp_tunnel,
+ .ndo_udp_tunnel_del = cxgb_del_udp_tunnel,
+ .ndo_features_check = cxgb_features_check,
.ndo_fix_features = cxgb_fix_features,
};
#ifdef CONFIG_PCI_IOV
static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
- .ndo_open = dummy_open,
- .ndo_set_vf_mac = cxgb_set_vf_mac,
- .ndo_get_vf_config = cxgb_get_vf_config,
- .ndo_set_vf_rate = cxgb_set_vf_rate,
- .ndo_get_phys_port_id = cxgb_get_phys_port_id,
+ .ndo_open = cxgb4_mgmt_open,
+ .ndo_set_vf_mac = cxgb4_mgmt_set_vf_mac,
+ .ndo_get_vf_config = cxgb4_mgmt_get_vf_config,
+ .ndo_set_vf_rate = cxgb4_mgmt_set_vf_rate,
+ .ndo_get_phys_port_id = cxgb4_mgmt_get_phys_port_id,
};
#endif
-static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+static void cxgb4_mgmt_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
{
struct adapter *adapter = netdev2adap(dev);
@@ -3043,7 +3200,7 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
}
static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = {
- .get_drvinfo = get_drvinfo,
+ .get_drvinfo = cxgb4_mgmt_get_drvinfo,
};
void t4_fatal_err(struct adapter *adap)
@@ -4759,7 +4916,7 @@ static int get_chip_type(struct pci_dev *pdev, u32 pl_rev)
}
#ifdef CONFIG_PCI_IOV
-static void dummy_setup(struct net_device *dev)
+static void cxgb4_mgmt_setup(struct net_device *dev)
{
dev->type = ARPHRD_NONE;
dev->mtu = 0;
@@ -4775,38 +4932,6 @@ static void dummy_setup(struct net_device *dev)
dev->needs_free_netdev = true;
}
-static int config_mgmt_dev(struct pci_dev *pdev)
-{
- struct adapter *adap = pci_get_drvdata(pdev);
- struct net_device *netdev;
- struct port_info *pi;
- char name[IFNAMSIZ];
- int err;
-
- snprintf(name, IFNAMSIZ, "mgmtpf%d%d", adap->adap_idx, adap->pf);
- netdev = alloc_netdev(sizeof(struct port_info), name, NET_NAME_UNKNOWN,
- dummy_setup);
- if (!netdev)
- return -ENOMEM;
-
- pi = netdev_priv(netdev);
- pi->adapter = adap;
- pi->tx_chan = adap->pf % adap->params.nports;
- SET_NETDEV_DEV(netdev, &pdev->dev);
-
- adap->port[0] = netdev;
- pi->port_id = 0;
-
- err = register_netdev(adap->port[0]);
- if (err) {
- pr_info("Unable to register VF mgmt netdev %s\n", name);
- free_netdev(adap->port[0]);
- adap->port[0] = NULL;
- return err;
- }
- return 0;
-}
-
static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
{
struct adapter *adap = pci_get_drvdata(pdev);
@@ -4818,7 +4943,7 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
/* Check if cxgb4 is the MASTER and fw is initialized */
if (!(pcie_fw & PCIE_FW_INIT_F) ||
!(pcie_fw & PCIE_FW_MASTER_VLD_F) ||
- PCIE_FW_MASTER_G(pcie_fw) != 4) {
+ PCIE_FW_MASTER_G(pcie_fw) != CXGB4_UNIFIED_PF) {
dev_warn(&pdev->dev,
"cxgb4 driver needs to be MASTER to support SRIOV\n");
return -EOPNOTSUPP;
@@ -4830,46 +4955,132 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
if (current_vfs && pci_vfs_assigned(pdev)) {
dev_err(&pdev->dev,
"Cannot modify SR-IOV while VFs are assigned\n");
- num_vfs = current_vfs;
- return num_vfs;
+ return current_vfs;
}
-
- /* Disable SRIOV when zero is passed.
- * One needs to disable SRIOV before modifying it, else
- * stack throws the below warning:
- * " 'n' VFs already enabled. Disable before enabling 'm' VFs."
+ /* Note that the upper-level code ensures that we're never called with
+ * a non-zero "num_vfs" when we already have VFs instantiated. But
+ * it never hurts to code defensively.
*/
+ if (num_vfs != 0 && current_vfs != 0)
+ return -EBUSY;
+
+ /* Nothing to do for no change. */
+ if (num_vfs == current_vfs)
+ return num_vfs;
+
+ /* Disable SRIOV when zero is passed. */
if (!num_vfs) {
pci_disable_sriov(pdev);
- if (adap->port[0]) {
- unregister_netdev(adap->port[0]);
- adap->port[0] = NULL;
- }
+ /* free VF Management Interface */
+ unregister_netdev(adap->port[0]);
+ free_netdev(adap->port[0]);
+ adap->port[0] = NULL;
+
/* free VF resources */
+ adap->num_vfs = 0;
kfree(adap->vfinfo);
adap->vfinfo = NULL;
- adap->num_vfs = 0;
- return num_vfs;
+ return 0;
}
- if (num_vfs != current_vfs) {
- err = pci_enable_sriov(pdev, num_vfs);
+ if (!current_vfs) {
+ struct fw_pfvf_cmd port_cmd, port_rpl;
+ struct net_device *netdev;
+ unsigned int pmask, port;
+ struct pci_dev *pbridge;
+ struct port_info *pi;
+ char name[IFNAMSIZ];
+ u32 devcap2;
+ u16 flags;
+ int pos;
+
+ /* If we want to instantiate Virtual Functions, then our
+ * parent bridge's PCI-E needs to support Alternative Routing
+ * ID (ARI) because our VFs will show up at function offset 8
+ * and above.
+ */
+ pbridge = pdev->bus->self;
+ pos = pci_find_capability(pbridge, PCI_CAP_ID_EXP);
+ pci_read_config_word(pbridge, pos + PCI_EXP_FLAGS, &flags);
+ pci_read_config_dword(pbridge, pos + PCI_EXP_DEVCAP2, &devcap2);
+
+ if ((flags & PCI_EXP_FLAGS_VERS) < 2 ||
+ !(devcap2 & PCI_EXP_DEVCAP2_ARI)) {
+ /* Our parent bridge does not support ARI so issue a
+ * warning and skip instantiating the VFs. They
+ * won't be reachable.
+ */
+ dev_warn(&pdev->dev, "Parent bridge %02x:%02x.%x doesn't support ARI; can't instantiate Virtual Functions\n",
+ pbridge->bus->number, PCI_SLOT(pbridge->devfn),
+ PCI_FUNC(pbridge->devfn));
+ return -ENOTSUPP;
+ }
+ memset(&port_cmd, 0, sizeof(port_cmd));
+ port_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F |
+ FW_PFVF_CMD_PFN_V(adap->pf) |
+ FW_PFVF_CMD_VFN_V(0));
+ port_cmd.retval_len16 = cpu_to_be32(FW_LEN16(port_cmd));
+ err = t4_wr_mbox(adap, adap->mbox, &port_cmd, sizeof(port_cmd),
+ &port_rpl);
if (err)
return err;
+ pmask = FW_PFVF_CMD_PMASK_G(be32_to_cpu(port_rpl.type_to_neq));
+ port = ffs(pmask) - 1;
+ /* Allocate VF Management Interface. */
+ snprintf(name, IFNAMSIZ, "mgmtpf%d,%d", adap->adap_idx,
+ adap->pf);
+ netdev = alloc_netdev(sizeof(struct port_info),
+ name, NET_NAME_UNKNOWN, cxgb4_mgmt_setup);
+ if (!netdev)
+ return -ENOMEM;
- adap->num_vfs = num_vfs;
- err = config_mgmt_dev(pdev);
- if (err)
+ pi = netdev_priv(netdev);
+ pi->adapter = adap;
+ pi->lport = port;
+ pi->tx_chan = port;
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adap->port[0] = netdev;
+ pi->port_id = 0;
+
+ err = register_netdev(adap->port[0]);
+ if (err) {
+ pr_info("Unable to register VF mgmt netdev %s\n", name);
+ free_netdev(adap->port[0]);
+ adap->port[0] = NULL;
return err;
+ }
+ /* Allocate and set up VF Information. */
+ adap->vfinfo = kcalloc(pci_sriov_get_totalvfs(pdev),
+ sizeof(struct vf_info), GFP_KERNEL);
+ if (!adap->vfinfo) {
+ unregister_netdev(adap->port[0]);
+ free_netdev(adap->port[0]);
+ adap->port[0] = NULL;
+ return -ENOMEM;
+ }
+ cxgb4_mgmt_fill_vf_station_mac_addr(adap);
+ }
+ /* Instantiate the requested number of VFs. */
+ err = pci_enable_sriov(pdev, num_vfs);
+ if (err) {
+ pr_info("Unable to instantiate %d VFs\n", num_vfs);
+ if (!current_vfs) {
+ unregister_netdev(adap->port[0]);
+ free_netdev(adap->port[0]);
+ adap->port[0] = NULL;
+ kfree(adap->vfinfo);
+ adap->vfinfo = NULL;
+ }
+ return err;
}
- adap->vfinfo = kcalloc(adap->num_vfs,
- sizeof(struct vf_info), GFP_KERNEL);
- if (adap->vfinfo)
- fill_vf_station_mac_addr(adap);
+ adap->num_vfs = num_vfs;
return num_vfs;
}
-#endif
+#endif /* CONFIG_PCI_IOV */
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -4882,9 +5093,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
u32 whoami, pl_rev;
enum chip_type chip;
static int adap_idx = 1;
-#ifdef CONFIG_PCI_IOV
- u32 v, port_vec;
-#endif
printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
@@ -4908,6 +5116,13 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_disable_device;
}
+ adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+ if (!adapter) {
+ err = -ENOMEM;
+ goto out_unmap_bar0;
+ }
+
+ adapter->regs = regs;
err = t4_wait_dev_ready(regs);
if (err < 0)
goto out_unmap_bar0;
@@ -4918,13 +5133,29 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
chip = get_chip_type(pdev, pl_rev);
func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ?
SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
+
+ adapter->pdev = pdev;
+ adapter->pdev_dev = &pdev->dev;
+ adapter->name = pci_name(pdev);
+ adapter->mbox = func;
+ adapter->pf = func;
+ adapter->msg_enable = DFLT_MSG_ENABLE;
+ adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
+ (sizeof(struct mbox_cmd) *
+ T4_OS_LOG_MBOX_CMDS),
+ GFP_KERNEL);
+ if (!adapter->mbox_log) {
+ err = -ENOMEM;
+ goto out_free_adapter;
+ }
+ spin_lock_init(&adapter->mbox_lock);
+ INIT_LIST_HEAD(&adapter->mlist.list);
+ pci_set_drvdata(pdev, adapter);
+
if (func != ent->driver_data) {
-#ifndef CONFIG_PCI_IOV
- iounmap(regs);
-#endif
pci_disable_device(pdev);
pci_save_state(pdev); /* to restore SR-IOV later */
- goto sriov;
+ return 0;
}
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
@@ -4933,53 +5164,30 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) {
dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
"coherent allocations\n");
- goto out_unmap_bar0;
+ goto out_free_adapter;
}
} else {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "no usable DMA configuration\n");
- goto out_unmap_bar0;
+ goto out_free_adapter;
}
}
pci_enable_pcie_error_reporting(pdev);
pci_set_master(pdev);
pci_save_state(pdev);
-
- adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
- if (!adapter) {
- err = -ENOMEM;
- goto out_unmap_bar0;
- }
adap_idx++;
-
adapter->workq = create_singlethread_workqueue("cxgb4");
if (!adapter->workq) {
err = -ENOMEM;
goto out_free_adapter;
}
- adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
- (sizeof(struct mbox_cmd) *
- T4_OS_LOG_MBOX_CMDS),
- GFP_KERNEL);
- if (!adapter->mbox_log) {
- err = -ENOMEM;
- goto out_free_adapter;
- }
adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
/* PCI device has been enabled */
adapter->flags |= DEV_ENABLED;
-
- adapter->regs = regs;
- adapter->pdev = pdev;
- adapter->pdev_dev = &pdev->dev;
- adapter->name = pci_name(pdev);
- adapter->mbox = func;
- adapter->pf = func;
- adapter->msg_enable = DFLT_MSG_ENABLE;
memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
/* If possible, we use PCIe Relaxed Ordering Attribute to deliver
@@ -5002,9 +5210,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&adapter->stats_lock);
spin_lock_init(&adapter->tid_release_lock);
spin_lock_init(&adapter->win0_lock);
- spin_lock_init(&adapter->mbox_lock);
-
- INIT_LIST_HEAD(&adapter->mlist.list);
INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
INIT_WORK(&adapter->db_full_task, process_db_full);
@@ -5080,6 +5285,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_RXCSUM | NETIF_F_RXHASH |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_TC;
+
+ if (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5)
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+
if (highdma)
netdev->hw_features |= NETIF_F_HIGHDMA;
netdev->features |= netdev->hw_features;
@@ -5273,58 +5482,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
setup_fw_sge_queues(adapter);
return 0;
-sriov:
-#ifdef CONFIG_PCI_IOV
- adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
- if (!adapter) {
- err = -ENOMEM;
- goto free_pci_region;
- }
-
- adapter->pdev = pdev;
- adapter->pdev_dev = &pdev->dev;
- adapter->name = pci_name(pdev);
- adapter->mbox = func;
- adapter->pf = func;
- adapter->regs = regs;
- adapter->adap_idx = adap_idx;
- adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
- (sizeof(struct mbox_cmd) *
- T4_OS_LOG_MBOX_CMDS),
- GFP_KERNEL);
- if (!adapter->mbox_log) {
- err = -ENOMEM;
- goto free_adapter;
- }
- spin_lock_init(&adapter->mbox_lock);
- INIT_LIST_HEAD(&adapter->mlist.list);
-
- v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
- FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
- err = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 1,
- &v, &port_vec);
- if (err < 0) {
- dev_err(adapter->pdev_dev, "Could not fetch port params\n");
- goto free_mbox_log;
- }
-
- adapter->params.nports = hweight32(port_vec);
- pci_set_drvdata(pdev, adapter);
- return 0;
-
-free_mbox_log:
- kfree(adapter->mbox_log);
- free_adapter:
- kfree(adapter);
- free_pci_region:
- iounmap(regs);
- pci_disable_sriov(pdev);
- pci_release_regions(pdev);
- return err;
-#else
- return 0;
-#endif
-
out_free_dev:
free_some_resources(adapter);
if (adapter->flags & USING_MSIX)
@@ -5416,14 +5573,7 @@ static void remove_one(struct pci_dev *pdev)
}
#ifdef CONFIG_PCI_IOV
else {
- if (adapter->port[0])
- unregister_netdev(adapter->port[0]);
- iounmap(adapter->regs);
- kfree(adapter->vfinfo);
- kfree(adapter->mbox_log);
- kfree(adapter);
- pci_disable_sriov(pdev);
- pci_release_regions(pdev);
+ cxgb4_iov_configure(adapter->pdev, 0);
}
#endif
}
@@ -5467,18 +5617,6 @@ static void shutdown_one(struct pci_dev *pdev)
if (adapter->flags & FW_OK)
t4_fw_bye(adapter, adapter->mbox);
}
-#ifdef CONFIG_PCI_IOV
- else {
- if (adapter->port[0])
- unregister_netdev(adapter->port[0]);
- iounmap(adapter->regs);
- kfree(adapter->vfinfo);
- kfree(adapter->mbox_log);
- kfree(adapter);
- pci_disable_sriov(pdev);
- pci_release_regions(pdev);
- }
-#endif
}
static struct pci_driver cxgb4_driver = {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 922f2f937789..eab781fab2a8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -770,12 +770,19 @@ static inline unsigned int flits_to_desc(unsigned int n)
* Returns whether an Ethernet packet is small enough to fit as
* immediate data. Return value corresponds to headroom required.
*/
-static inline int is_eth_imm(const struct sk_buff *skb)
+static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver)
{
- int hdrlen = skb_shinfo(skb)->gso_size ?
- sizeof(struct cpl_tx_pkt_lso_core) : 0;
+ int hdrlen = 0;
- hdrlen += sizeof(struct cpl_tx_pkt);
+ if (skb->encapsulation && skb_shinfo(skb)->gso_size &&
+ chip_ver > CHELSIO_T5) {
+ hdrlen = sizeof(struct cpl_tx_tnl_lso);
+ hdrlen += sizeof(struct cpl_tx_pkt_core);
+ } else {
+ hdrlen = skb_shinfo(skb)->gso_size ?
+ sizeof(struct cpl_tx_pkt_lso_core) : 0;
+ hdrlen += sizeof(struct cpl_tx_pkt);
+ }
if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
return hdrlen;
return 0;
@@ -788,10 +795,11 @@ static inline int is_eth_imm(const struct sk_buff *skb)
* Returns the number of flits needed for a Tx WR for the given Ethernet
* packet, including the needed WR and CPL headers.
*/
-static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
+static inline unsigned int calc_tx_flits(const struct sk_buff *skb,
+ unsigned int chip_ver)
{
unsigned int flits;
- int hdrlen = is_eth_imm(skb);
+ int hdrlen = is_eth_imm(skb, chip_ver);
/* If the skb is small enough, we can pump it out as a work request
* with only immediate data. In that case we just have to have the
@@ -810,13 +818,20 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
* with an embedded TX Packet Write CPL message.
*/
flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
- if (skb_shinfo(skb)->gso_size)
- flits += (sizeof(struct fw_eth_tx_pkt_wr) +
- sizeof(struct cpl_tx_pkt_lso_core) +
- sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
- else
+ if (skb_shinfo(skb)->gso_size) {
+ if (skb->encapsulation && chip_ver > CHELSIO_T5)
+ hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
+ sizeof(struct cpl_tx_tnl_lso);
+ else
+ hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
+ sizeof(struct cpl_tx_pkt_lso_core);
+
+ hdrlen += sizeof(struct cpl_tx_pkt_core);
+ flits += (hdrlen / sizeof(__be64));
+ } else {
flits += (sizeof(struct fw_eth_tx_pkt_wr) +
sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+ }
return flits;
}
@@ -827,9 +842,10 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
* Returns the number of Tx descriptors needed for the given Ethernet
* packet, including the needed WR and CPL headers.
*/
-static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
+static inline unsigned int calc_tx_descs(const struct sk_buff *skb,
+ unsigned int chip_ver)
{
- return flits_to_desc(calc_tx_flits(skb));
+ return flits_to_desc(calc_tx_flits(skb, chip_ver));
}
/**
@@ -1154,6 +1170,102 @@ cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
}
#endif /* CONFIG_CHELSIO_T4_FCOE */
+/* Returns tunnel type if hardware supports offloading of the same.
+ * It is called only for T5 and onwards.
+ */
+enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb)
+{
+ u8 l4_hdr = 0;
+ enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
+ struct port_info *pi = netdev_priv(skb->dev);
+ struct adapter *adapter = pi->adapter;
+
+ if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
+ skb->inner_protocol != htons(ETH_P_TEB))
+ return tnl_type;
+
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IP):
+ l4_hdr = ip_hdr(skb)->protocol;
+ break;
+ case htons(ETH_P_IPV6):
+ l4_hdr = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ return tnl_type;
+ }
+
+ switch (l4_hdr) {
+ case IPPROTO_UDP:
+ if (adapter->vxlan_port == udp_hdr(skb)->dest)
+ tnl_type = TX_TNL_TYPE_VXLAN;
+ break;
+ default:
+ return tnl_type;
+ }
+
+ return tnl_type;
+}
+
+static inline void t6_fill_tnl_lso(struct sk_buff *skb,
+ struct cpl_tx_tnl_lso *tnl_lso,
+ enum cpl_tx_tnl_lso_type tnl_type)
+{
+ u32 val;
+ int in_eth_xtra_len;
+ int l3hdr_len = skb_network_header_len(skb);
+ int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+ const struct skb_shared_info *ssi = skb_shinfo(skb);
+ bool v6 = (ip_hdr(skb)->version == 6);
+
+ val = CPL_TX_TNL_LSO_OPCODE_V(CPL_TX_TNL_LSO) |
+ CPL_TX_TNL_LSO_FIRST_F |
+ CPL_TX_TNL_LSO_LAST_F |
+ (v6 ? CPL_TX_TNL_LSO_IPV6OUT_F : 0) |
+ CPL_TX_TNL_LSO_ETHHDRLENOUT_V(eth_xtra_len / 4) |
+ CPL_TX_TNL_LSO_IPHDRLENOUT_V(l3hdr_len / 4) |
+ (v6 ? 0 : CPL_TX_TNL_LSO_IPHDRCHKOUT_F) |
+ CPL_TX_TNL_LSO_IPLENSETOUT_F |
+ (v6 ? 0 : CPL_TX_TNL_LSO_IPIDINCOUT_F);
+ tnl_lso->op_to_IpIdSplitOut = htonl(val);
+
+ tnl_lso->IpIdOffsetOut = 0;
+
+ /* Get the tunnel header length */
+ val = skb_inner_mac_header(skb) - skb_mac_header(skb);
+ in_eth_xtra_len = skb_inner_network_header(skb) -
+ skb_inner_mac_header(skb) - ETH_HLEN;
+
+ switch (tnl_type) {
+ case TX_TNL_TYPE_VXLAN:
+ tnl_lso->UdpLenSetOut_to_TnlHdrLen =
+ htons(CPL_TX_TNL_LSO_UDPCHKCLROUT_F |
+ CPL_TX_TNL_LSO_UDPLENSETOUT_F);
+ break;
+ default:
+ tnl_lso->UdpLenSetOut_to_TnlHdrLen = 0;
+ break;
+ }
+
+ tnl_lso->UdpLenSetOut_to_TnlHdrLen |=
+ htons(CPL_TX_TNL_LSO_TNLHDRLEN_V(val) |
+ CPL_TX_TNL_LSO_TNLTYPE_V(tnl_type));
+
+ tnl_lso->r1 = 0;
+
+ val = CPL_TX_TNL_LSO_ETHHDRLEN_V(in_eth_xtra_len / 4) |
+ CPL_TX_TNL_LSO_IPV6_V(inner_ip_hdr(skb)->version == 6) |
+ CPL_TX_TNL_LSO_IPHDRLEN_V(skb_inner_network_header_len(skb) / 4) |
+ CPL_TX_TNL_LSO_TCPHDRLEN_V(inner_tcp_hdrlen(skb) / 4);
+ tnl_lso->Flow_to_TcpHdrLen = htonl(val);
+
+ tnl_lso->IpIdOffset = htons(0);
+
+ tnl_lso->IpIdSplit_to_Mss = htons(CPL_TX_TNL_LSO_MSS_V(ssi->gso_size));
+ tnl_lso->TCPSeqOffset = htonl(0);
+ tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len));
+}
+
/**
* t4_eth_xmit - add a packet to an Ethernet Tx queue
* @skb: the packet
@@ -1177,6 +1289,9 @@ netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
bool immediate = false;
int len, max_pkt_len;
bool ptp_enabled = is_ptp_enabled(skb, dev);
+ unsigned int chip_ver;
+ enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
+
#ifdef CONFIG_CHELSIO_T4_FCOE
int err;
#endif /* CONFIG_CHELSIO_T4_FCOE */
@@ -1227,7 +1342,8 @@ out_free: dev_kfree_skb_any(skb);
}
#endif /* CONFIG_CHELSIO_T4_FCOE */
- flits = calc_tx_flits(skb);
+ chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
+ flits = calc_tx_flits(skb, chip_ver);
ndesc = flits_to_desc(flits);
credits = txq_avail(&q->q) - ndesc;
@@ -1241,9 +1357,12 @@ out_free: dev_kfree_skb_any(skb);
return NETDEV_TX_BUSY;
}
- if (is_eth_imm(skb))
+ if (is_eth_imm(skb, chip_ver))
immediate = true;
+ if (skb->encapsulation && chip_ver > CHELSIO_T5)
+ tnl_type = cxgb_encap_offload_supported(skb);
+
if (!immediate &&
unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
q->mapping_err++;
@@ -1270,33 +1389,58 @@ out_free: dev_kfree_skb_any(skb);
bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
int l3hdr_len = skb_network_header_len(skb);
int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+ struct cpl_tx_tnl_lso *tnl_lso = (void *)(wr + 1);
+
+ if (tnl_type)
+ len += sizeof(*tnl_lso);
+ else
+ len += sizeof(*lso);
- len += sizeof(*lso);
wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
FW_WR_IMMDLEN_V(len));
- lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
- LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
- LSO_IPV6_V(v6) |
- LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
- LSO_IPHDR_LEN_V(l3hdr_len / 4) |
- LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
- lso->c.ipid_ofst = htons(0);
- lso->c.mss = htons(ssi->gso_size);
- lso->c.seqno_offset = htonl(0);
- if (is_t4(adap->params.chip))
- lso->c.len = htonl(skb->len);
- else
- lso->c.len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
- cpl = (void *)(lso + 1);
+ if (tnl_type) {
+ struct iphdr *iph = ip_hdr(skb);
- if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
- cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
- else
- cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+ t6_fill_tnl_lso(skb, tnl_lso, tnl_type);
+ cpl = (void *)(tnl_lso + 1);
+ /* Driver is expected to compute partial checksum that
+ * does not include the IP Total Length.
+ */
+ if (iph->version == 4) {
+ iph->check = 0;
+ iph->tot_len = 0;
+ iph->check = (u16)(~ip_fast_csum((u8 *)iph,
+ iph->ihl));
+ }
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ cntrl = hwcsum(adap->params.chip, skb);
+ } else {
+ lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
+ LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
+ LSO_IPV6_V(v6) |
+ LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
+ LSO_IPHDR_LEN_V(l3hdr_len / 4) |
+ LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
+ lso->c.ipid_ofst = htons(0);
+ lso->c.mss = htons(ssi->gso_size);
+ lso->c.seqno_offset = htonl(0);
+ if (is_t4(adap->params.chip))
+ lso->c.len = htonl(skb->len);
+ else
+ lso->c.len =
+ htonl(LSO_T5_XFER_SIZE_V(skb->len));
+ cpl = (void *)(lso + 1);
- cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
- TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
- TXPKT_IPHDR_LEN_V(l3hdr_len);
+ if (CHELSIO_CHIP_VERSION(adap->params.chip)
+ <= CHELSIO_T5)
+ cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+ else
+ cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+
+ cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
+ TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+ TXPKT_IPHDR_LEN_V(l3hdr_len);
+ }
q->tso++;
q->tx_cso += ssi->gso_segs;
} else {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 0e9f64a46ac5..6d76851a4da9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -7467,6 +7467,112 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
}
/**
+ * t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam
+ * @adap: the adapter
+ * @viid: the VI id
+ * @addr: the MAC address
+ * @mask: the mask
+ * @idx: index of the entry in mps tcam
+ * @lookup_type: MAC address for inner (1) or outer (0) header
+ * @port_id: the port index
+ * @sleep_ok: call is allowed to sleep
+ *
+ * Removes the mac entry at the specified index using raw mac interface.
+ *
+ * Returns a negative error number on failure.
+ */
+int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask, unsigned int idx,
+ u8 lookup_type, u8 port_id, bool sleep_ok)
+{
+ struct fw_vi_mac_cmd c;
+ struct fw_vi_mac_raw *p = &c.u.raw;
+ u32 val;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_CMD_EXEC_V(0) |
+ FW_VI_MAC_CMD_VIID_V(viid));
+ val = FW_CMD_LEN16_V(1) |
+ FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW);
+ c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
+ FW_CMD_LEN16_V(val));
+
+ p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx) |
+ FW_VI_MAC_ID_BASED_FREE);
+
+ /* Lookup Type. Outer header: 0, Inner header: 1 */
+ p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
+ DATAPORTNUM_V(port_id));
+ /* Lookup mask and port mask */
+ p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
+ DATAPORTNUM_V(DATAPORTNUM_M));
+
+ /* Copy the address and the mask */
+ memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
+ memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
+
+ return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
+}
+
+/**
+ * t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
+ * @adap: the adapter
+ * @viid: the VI id
+ * @mac: the MAC address
+ * @mask: the mask
+ * @idx: index at which to add this entry
+ * @port_id: the port index
+ * @lookup_type: MAC address for inner (1) or outer (0) header
+ * @sleep_ok: call is allowed to sleep
+ *
+ * Adds the mac entry at the specified index using raw mac interface.
+ *
+ * Returns a negative error number or the allocated index for this mac.
+ */
+int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask, unsigned int idx,
+ u8 lookup_type, u8 port_id, bool sleep_ok)
+{
+ int ret = 0;
+ struct fw_vi_mac_cmd c;
+ struct fw_vi_mac_raw *p = &c.u.raw;
+ u32 val;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_VI_MAC_CMD_VIID_V(viid));
+ val = FW_CMD_LEN16_V(1) |
+ FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW);
+ c.freemacs_to_len16 = cpu_to_be32(val);
+
+ /* Specify that this is an inner mac address */
+ p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx));
+
+ /* Lookup Type. Outer header: 0, Inner header: 1 */
+ p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
+ DATAPORTNUM_V(port_id));
+ /* Lookup mask and port mask */
+ p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
+ DATAPORTNUM_V(DATAPORTNUM_M));
+
+ /* Copy the address and the mask */
+ memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
+ memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
+
+ ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
+ if (ret == 0) {
+ ret = FW_VI_MAC_CMD_RAW_IDX_G(be32_to_cpu(p->raw_idx_pkd));
+ if (ret != idx)
+ ret = -ENOMEM;
+ }
+
+ return ret;
+}
+
+/**
* t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
* @adap: the adapter
* @mbox: mailbox to use for the FW command
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 7e12f241145b..d0db4427b77e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -107,6 +107,7 @@ enum {
CPL_FW6_MSG = 0xE0,
CPL_FW6_PLD = 0xE1,
+ CPL_TX_TNL_LSO = 0xEC,
CPL_TX_PKT_LSO = 0xED,
CPL_TX_PKT_XT = 0xEE,
@@ -1479,6 +1480,169 @@ struct ulp_txpkt {
#define ULP_TXPKT_RO_V(x) ((x) << ULP_TXPKT_RO_S)
#define ULP_TXPKT_RO_F ULP_TXPKT_RO_V(1U)
+enum cpl_tx_tnl_lso_type {
+ TX_TNL_TYPE_OPAQUE,
+ TX_TNL_TYPE_NVGRE,
+ TX_TNL_TYPE_VXLAN,
+ TX_TNL_TYPE_GENEVE,
+};
+
+struct cpl_tx_tnl_lso {
+ __be32 op_to_IpIdSplitOut;
+ __be16 IpIdOffsetOut;
+ __be16 UdpLenSetOut_to_TnlHdrLen;
+ __be64 r1;
+ __be32 Flow_to_TcpHdrLen;
+ __be16 IpIdOffset;
+ __be16 IpIdSplit_to_Mss;
+ __be32 TCPSeqOffset;
+ __be32 EthLenOffset_Size;
+ /* encapsulated CPL (TX_PKT_XT) follows here */
+};
+
+#define CPL_TX_TNL_LSO_OPCODE_S 24
+#define CPL_TX_TNL_LSO_OPCODE_M 0xff
+#define CPL_TX_TNL_LSO_OPCODE_V(x) ((x) << CPL_TX_TNL_LSO_OPCODE_S)
+#define CPL_TX_TNL_LSO_OPCODE_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_OPCODE_S) & CPL_TX_TNL_LSO_OPCODE_M)
+
+#define CPL_TX_TNL_LSO_FIRST_S 23
+#define CPL_TX_TNL_LSO_FIRST_M 0x1
+#define CPL_TX_TNL_LSO_FIRST_V(x) ((x) << CPL_TX_TNL_LSO_FIRST_S)
+#define CPL_TX_TNL_LSO_FIRST_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_FIRST_S) & CPL_TX_TNL_LSO_FIRST_M)
+#define CPL_TX_TNL_LSO_FIRST_F CPL_TX_TNL_LSO_FIRST_V(1U)
+
+#define CPL_TX_TNL_LSO_LAST_S 22
+#define CPL_TX_TNL_LSO_LAST_M 0x1
+#define CPL_TX_TNL_LSO_LAST_V(x) ((x) << CPL_TX_TNL_LSO_LAST_S)
+#define CPL_TX_TNL_LSO_LAST_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_LAST_S) & CPL_TX_TNL_LSO_LAST_M)
+#define CPL_TX_TNL_LSO_LAST_F CPL_TX_TNL_LSO_LAST_V(1U)
+
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_S 21
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_M 0x1
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_V(x) \
+ ((x) << CPL_TX_TNL_LSO_ETHHDRLENXOUT_S)
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_ETHHDRLENXOUT_S) & \
+ CPL_TX_TNL_LSO_ETHHDRLENXOUT_M)
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_F CPL_TX_TNL_LSO_ETHHDRLENXOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_IPV6OUT_S 20
+#define CPL_TX_TNL_LSO_IPV6OUT_M 0x1
+#define CPL_TX_TNL_LSO_IPV6OUT_V(x) ((x) << CPL_TX_TNL_LSO_IPV6OUT_S)
+#define CPL_TX_TNL_LSO_IPV6OUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPV6OUT_S) & CPL_TX_TNL_LSO_IPV6OUT_M)
+#define CPL_TX_TNL_LSO_IPV6OUT_F CPL_TX_TNL_LSO_IPV6OUT_V(1U)
+
+#define CPL_TX_TNL_LSO_ETHHDRLEN_S 16
+#define CPL_TX_TNL_LSO_ETHHDRLEN_M 0xf
+#define CPL_TX_TNL_LSO_ETHHDRLEN_V(x) ((x) << CPL_TX_TNL_LSO_ETHHDRLEN_S)
+#define CPL_TX_TNL_LSO_ETHHDRLEN_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_ETHHDRLEN_S) & CPL_TX_TNL_LSO_ETHHDRLEN_M)
+
+#define CPL_TX_TNL_LSO_IPHDRLEN_S 4
+#define CPL_TX_TNL_LSO_IPHDRLEN_M 0xfff
+#define CPL_TX_TNL_LSO_IPHDRLEN_V(x) ((x) << CPL_TX_TNL_LSO_IPHDRLEN_S)
+#define CPL_TX_TNL_LSO_IPHDRLEN_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPHDRLEN_S) & CPL_TX_TNL_LSO_IPHDRLEN_M)
+
+#define CPL_TX_TNL_LSO_TCPHDRLEN_S 0
+#define CPL_TX_TNL_LSO_TCPHDRLEN_M 0xf
+#define CPL_TX_TNL_LSO_TCPHDRLEN_V(x) ((x) << CPL_TX_TNL_LSO_TCPHDRLEN_S)
+#define CPL_TX_TNL_LSO_TCPHDRLEN_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_TCPHDRLEN_S) & CPL_TX_TNL_LSO_TCPHDRLEN_M)
+
+#define CPL_TX_TNL_LSO_MSS_S 0
+#define CPL_TX_TNL_LSO_MSS_M 0x3fff
+#define CPL_TX_TNL_LSO_MSS_V(x) ((x) << CPL_TX_TNL_LSO_MSS_S)
+#define CPL_TX_TNL_LSO_MSS_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_MSS_S) & CPL_TX_TNL_LSO_MSS_M)
+
+#define CPL_TX_TNL_LSO_SIZE_S 0
+#define CPL_TX_TNL_LSO_SIZE_M 0xfffffff
+#define CPL_TX_TNL_LSO_SIZE_V(x) ((x) << CPL_TX_TNL_LSO_SIZE_S)
+#define CPL_TX_TNL_LSO_SIZE_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_SIZE_S) & CPL_TX_TNL_LSO_SIZE_M)
+
+#define CPL_TX_TNL_LSO_ETHHDRLENOUT_S 16
+#define CPL_TX_TNL_LSO_ETHHDRLENOUT_M 0xf
+#define CPL_TX_TNL_LSO_ETHHDRLENOUT_V(x) \
+ ((x) << CPL_TX_TNL_LSO_ETHHDRLENOUT_S)
+#define CPL_TX_TNL_LSO_ETHHDRLENOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_ETHHDRLENOUT_S) & CPL_TX_TNL_LSO_ETHHDRLENOUT_M)
+
+#define CPL_TX_TNL_LSO_IPHDRLENOUT_S 4
+#define CPL_TX_TNL_LSO_IPHDRLENOUT_M 0xfff
+#define CPL_TX_TNL_LSO_IPHDRLENOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPHDRLENOUT_S)
+#define CPL_TX_TNL_LSO_IPHDRLENOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPHDRLENOUT_S) & CPL_TX_TNL_LSO_IPHDRLENOUT_M)
+
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_S 3
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_M 0x1
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPHDRCHKOUT_S)
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPHDRCHKOUT_S) & CPL_TX_TNL_LSO_IPHDRCHKOUT_M)
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_F CPL_TX_TNL_LSO_IPHDRCHKOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_IPLENSETOUT_S 2
+#define CPL_TX_TNL_LSO_IPLENSETOUT_M 0x1
+#define CPL_TX_TNL_LSO_IPLENSETOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPLENSETOUT_S)
+#define CPL_TX_TNL_LSO_IPLENSETOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPLENSETOUT_S) & CPL_TX_TNL_LSO_IPLENSETOUT_M)
+#define CPL_TX_TNL_LSO_IPLENSETOUT_F CPL_TX_TNL_LSO_IPLENSETOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_IPIDINCOUT_S 1
+#define CPL_TX_TNL_LSO_IPIDINCOUT_M 0x1
+#define CPL_TX_TNL_LSO_IPIDINCOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPIDINCOUT_S)
+#define CPL_TX_TNL_LSO_IPIDINCOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPIDINCOUT_S) & CPL_TX_TNL_LSO_IPIDINCOUT_M)
+#define CPL_TX_TNL_LSO_IPIDINCOUT_F CPL_TX_TNL_LSO_IPIDINCOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_S 14
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_M 0x1
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_V(x) \
+ ((x) << CPL_TX_TNL_LSO_UDPCHKCLROUT_S)
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_UDPCHKCLROUT_S) & \
+ CPL_TX_TNL_LSO_UDPCHKCLROUT_M)
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_F CPL_TX_TNL_LSO_UDPCHKCLROUT_V(1U)
+
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_S 15
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_M 0x1
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_V(x) \
+ ((x) << CPL_TX_TNL_LSO_UDPLENSETOUT_S)
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_UDPLENSETOUT_S) & \
+ CPL_TX_TNL_LSO_UDPLENSETOUT_M)
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_F CPL_TX_TNL_LSO_UDPLENSETOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_TNLTYPE_S 12
+#define CPL_TX_TNL_LSO_TNLTYPE_M 0x3
+#define CPL_TX_TNL_LSO_TNLTYPE_V(x) ((x) << CPL_TX_TNL_LSO_TNLTYPE_S)
+#define CPL_TX_TNL_LSO_TNLTYPE_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_TNLTYPE_S) & CPL_TX_TNL_LSO_TNLTYPE_M)
+
+#define S_CPL_TX_TNL_LSO_ETHHDRLEN 16
+#define M_CPL_TX_TNL_LSO_ETHHDRLEN 0xf
+#define V_CPL_TX_TNL_LSO_ETHHDRLEN(x) ((x) << S_CPL_TX_TNL_LSO_ETHHDRLEN)
+#define G_CPL_TX_TNL_LSO_ETHHDRLEN(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_ETHHDRLEN) & M_CPL_TX_TNL_LSO_ETHHDRLEN)
+
+#define CPL_TX_TNL_LSO_TNLHDRLEN_S 0
+#define CPL_TX_TNL_LSO_TNLHDRLEN_M 0xfff
+#define CPL_TX_TNL_LSO_TNLHDRLEN_V(x) ((x) << CPL_TX_TNL_LSO_TNLHDRLEN_S)
+#define CPL_TX_TNL_LSO_TNLHDRLEN_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_TNLHDRLEN_S) & CPL_TX_TNL_LSO_TNLHDRLEN_M)
+
+#define CPL_TX_TNL_LSO_IPV6_S 20
+#define CPL_TX_TNL_LSO_IPV6_M 0x1
+#define CPL_TX_TNL_LSO_IPV6_V(x) ((x) << CPL_TX_TNL_LSO_IPV6_S)
+#define CPL_TX_TNL_LSO_IPV6_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPV6_S) & CPL_TX_TNL_LSO_IPV6_M)
+#define CPL_TX_TNL_LSO_IPV6_F CPL_TX_TNL_LSO_IPV6_V(1U)
+
#define ULP_TX_SC_MORE_S 23
#define ULP_TX_SC_MORE_V(x) ((x) << ULP_TX_SC_MORE_S)
#define ULP_TX_SC_MORE_F ULP_TX_SC_MORE_V(1U)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 863bc29153d9..d9c06d6dc7b2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -2511,6 +2511,17 @@
#define MPS_RX_MAC_BG_PG_CNT0_A 0x11208
#define MPS_RX_LPBK_BG_PG_CNT0_A 0x11218
+#define MPS_RX_VXLAN_TYPE_A 0x11234
+
+#define VXLAN_EN_S 16
+#define VXLAN_EN_V(x) ((x) << VXLAN_EN_S)
+#define VXLAN_EN_F VXLAN_EN_V(1U)
+
+#define VXLAN_S 0
+#define VXLAN_M 0xffffU
+#define VXLAN_V(x) ((x) << VXLAN_S)
+#define VXLAN_G(x) (((x) >> VXLAN_S) & VXLAN_M)
+
#define MPS_CLS_TCAM_Y_L_A 0xf000
#define MPS_CLS_TCAM_DATA0_A 0xf000
#define MPS_CLS_TCAM_DATA1_A 0xf004
@@ -2537,8 +2548,14 @@
#define DATAPORTNUM_S 12
#define DATAPORTNUM_M 0xfU
+#define DATAPORTNUM_V(x) ((x) << DATAPORTNUM_S)
#define DATAPORTNUM_G(x) (((x) >> DATAPORTNUM_S) & DATAPORTNUM_M)
+#define DATALKPTYPE_S 10
+#define DATALKPTYPE_M 0x3U
+#define DATALKPTYPE_V(x) ((x) << DATALKPTYPE_S)
+#define DATALKPTYPE_G(x) (((x) >> DATALKPTYPE_S) & DATALKPTYPE_M)
+
#define DATADIPHIT_S 8
#define DATADIPHIT_V(x) ((x) << DATADIPHIT_S)
#define DATADIPHIT_F DATADIPHIT_V(1U)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 427f252a9087..f3310d5b3c4c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -2060,6 +2060,7 @@ struct fw_vi_cmd {
#define FW_VI_MAC_ADD_MAC 0x3FF
#define FW_VI_MAC_ADD_PERSIST_MAC 0x3FE
#define FW_VI_MAC_MAC_BASED_FREE 0x3FD
+#define FW_VI_MAC_ID_BASED_FREE 0x3FC
#define FW_CLS_TCAM_NUM_ENTRIES 336
enum fw_vi_mac_smac {
@@ -2076,6 +2077,13 @@ enum fw_vi_mac_result {
FW_VI_MAC_R_F_ACL_CHECK
};
+enum fw_vi_mac_entry_types {
+ FW_VI_MAC_TYPE_EXACTMAC,
+ FW_VI_MAC_TYPE_HASHVEC,
+ FW_VI_MAC_TYPE_RAW,
+ FW_VI_MAC_TYPE_EXACTMAC_VNI,
+};
+
struct fw_vi_mac_cmd {
__be32 op_to_viid;
__be32 freemacs_to_len16;
@@ -2087,6 +2095,13 @@ struct fw_vi_mac_cmd {
struct fw_vi_mac_hash {
__be64 hashvec;
} hash;
+ struct fw_vi_mac_raw {
+ __be32 raw_idx_pkd;
+ __be32 data0_pkd;
+ __be32 data1[2];
+ __be64 data0m_pkd;
+ __be32 data1m[2];
+ } raw;
} u;
};
@@ -2096,6 +2111,12 @@ struct fw_vi_mac_cmd {
#define FW_VI_MAC_CMD_FREEMACS_S 31
#define FW_VI_MAC_CMD_FREEMACS_V(x) ((x) << FW_VI_MAC_CMD_FREEMACS_S)
+#define FW_VI_MAC_CMD_ENTRY_TYPE_S 23
+#define FW_VI_MAC_CMD_ENTRY_TYPE_M 0x7
+#define FW_VI_MAC_CMD_ENTRY_TYPE_V(x) ((x) << FW_VI_MAC_CMD_ENTRY_TYPE_S)
+#define FW_VI_MAC_CMD_ENTRY_TYPE_G(x) \
+ (((x) >> FW_VI_MAC_CMD_ENTRY_TYPE_S) & FW_VI_MAC_CMD_ENTRY_TYPE_M)
+
#define FW_VI_MAC_CMD_HASHVECEN_S 23
#define FW_VI_MAC_CMD_HASHVECEN_V(x) ((x) << FW_VI_MAC_CMD_HASHVECEN_S)
#define FW_VI_MAC_CMD_HASHVECEN_F FW_VI_MAC_CMD_HASHVECEN_V(1U)
@@ -2122,6 +2143,12 @@ struct fw_vi_mac_cmd {
#define FW_VI_MAC_CMD_IDX_G(x) \
(((x) >> FW_VI_MAC_CMD_IDX_S) & FW_VI_MAC_CMD_IDX_M)
+#define FW_VI_MAC_CMD_RAW_IDX_S 16
+#define FW_VI_MAC_CMD_RAW_IDX_M 0xffff
+#define FW_VI_MAC_CMD_RAW_IDX_V(x) ((x) << FW_VI_MAC_CMD_RAW_IDX_S)
+#define FW_VI_MAC_CMD_RAW_IDX_G(x) \
+ (((x) >> FW_VI_MAC_CMD_RAW_IDX_S) & FW_VI_MAC_CMD_RAW_IDX_M)
+
#define FW_RXMODE_MTU_NO_CHG 65535
struct fw_vi_rxmode_cmd {
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 14d7e673c656..129b914a434c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -2619,8 +2619,8 @@ void t4vf_sge_stop(struct adapter *adapter)
int t4vf_sge_init(struct adapter *adapter)
{
struct sge_params *sge_params = &adapter->params.sge;
- u32 fl0 = sge_params->sge_fl_buffer_size[0];
- u32 fl1 = sge_params->sge_fl_buffer_size[1];
+ u32 fl_small_pg = sge_params->sge_fl_buffer_size[0];
+ u32 fl_large_pg = sge_params->sge_fl_buffer_size[1];
struct sge *s = &adapter->sge;
/*
@@ -2628,9 +2628,20 @@ int t4vf_sge_init(struct adapter *adapter)
* the Physical Function Driver. Ideally we should be able to deal
* with _any_ configuration. Practice is different ...
*/
- if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) {
+
+ /* We only bother using the Large Page logic if the Large Page Buffer
+ * is larger than our Page Size Buffer.
+ */
+ if (fl_large_pg <= fl_small_pg)
+ fl_large_pg = 0;
+
+ /* The Page Size Buffer must be exactly equal to our Page Size and the
+ * Large Page Size Buffer should be 0 (per above) or a power of 2.
+ */
+ if (fl_small_pg != PAGE_SIZE ||
+ (fl_large_pg & (fl_large_pg - 1)) != 0) {
dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
- fl0, fl1);
+ fl_small_pg, fl_large_pg);
return -EINVAL;
}
if ((sge_params->sge_control & RXPKTCPLMODE_F) !=
@@ -2642,8 +2653,8 @@ int t4vf_sge_init(struct adapter *adapter)
/*
* Now translate the adapter parameters into our internal forms.
*/
- if (fl1)
- s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
+ if (fl_large_pg)
+ s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
? 128 : 64);
s->pktshift = PKTSHIFT_G(sge_params->sge_control);
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 410a0a95130b..b3e7fafee3df 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -1913,3 +1913,7 @@ static struct platform_driver cs89x0_driver = {
module_platform_driver_probe(cs89x0_driver, cs89x0_platform_probe);
#endif /* CONFIG_CS89x0_PLATFORM */
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Crystal Semiconductor (Now Cirrus Logic) CS89[02]0 network driver");
+MODULE_AUTHOR("Russell Nelson <[email protected]>");
diff --git a/drivers/net/ethernet/cortina/Kconfig b/drivers/net/ethernet/cortina/Kconfig
new file mode 100644
index 000000000000..0df743ea51f1
--- /dev/null
+++ b/drivers/net/ethernet/cortina/Kconfig
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+# Cortina ethernet devices
+
+config NET_VENDOR_CORTINA
+ bool "Cortina Gemini devices"
+ default y
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+if NET_VENDOR_CORTINA
+
+config GEMINI_ETHERNET
+ tristate "Gemini Gigabit Ethernet support"
+ depends on OF
+ select PHYLIB
+ select CRC32
+ ---help---
+ This driver supports StorLink SL351x (Gemini) dual Gigabit Ethernet.
+
+endif # NET_VENDOR_CORTINA
diff --git a/drivers/net/ethernet/cortina/Makefile b/drivers/net/ethernet/cortina/Makefile
new file mode 100644
index 000000000000..4e86d398a89c
--- /dev/null
+++ b/drivers/net/ethernet/cortina/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for the Cortina Gemini network device drivers.
+
+obj-$(CONFIG_GEMINI_ETHERNET) += gemini.o
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
new file mode 100644
index 000000000000..5eb999af2c40
--- /dev/null
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -0,0 +1,2593 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Ethernet device driver for Cortina Systems Gemini SoC
+ * Also known as the StorLink SL3512 and SL3516 (SL351x) or Lepus
+ * Net Engine and Gigabit Ethernet MAC (GMAC)
+ * This hardware contains a TCP Offload Engine (TOE) but currently the
+ * driver does not make use of it.
+ *
+ * Authors:
+ * Linus Walleij <[email protected]>
+ * Tobias Waldvogel <[email protected]> (OpenWRT)
+ * MichaÅ‚ MirosÅ‚aw <[email protected]>
+ * Paulius Zaleckas <[email protected]>
+ * Giuseppe De Robertis <[email protected]>
+ * Gary Chen & Ch Hsu Storlink Semiconductor
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/cache.h>
+#include <linux/interrupt.h>
+#include <linux/reset.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/skbuff.h>
+#include <linux/phy.h>
+#include <linux/crc32.h>
+#include <linux/ethtool.h>
+#include <linux/tcp.h>
+#include <linux/u64_stats_sync.h>
+
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+
+#include "gemini.h"
+
+#define DRV_NAME "gmac-gemini"
+#define DRV_VERSION "1.0"
+
+#define HSIZE_8 0x00
+#define HSIZE_16 0x01
+#define HSIZE_32 0x02
+
+#define HBURST_SINGLE 0x00
+#define HBURST_INCR 0x01
+#define HBURST_INCR4 0x02
+#define HBURST_INCR8 0x03
+
+#define HPROT_DATA_CACHE BIT(0)
+#define HPROT_PRIVILIGED BIT(1)
+#define HPROT_BUFFERABLE BIT(2)
+#define HPROT_CACHABLE BIT(3)
+
+#define DEFAULT_RX_COALESCE_NSECS 0
+#define DEFAULT_GMAC_RXQ_ORDER 9
+#define DEFAULT_GMAC_TXQ_ORDER 8
+#define DEFAULT_RX_BUF_ORDER 11
+#define DEFAULT_NAPI_WEIGHT 64
+#define TX_MAX_FRAGS 16
+#define TX_QUEUE_NUM 1 /* max: 6 */
+#define RX_MAX_ALLOC_ORDER 2
+
+#define GMAC0_IRQ0_2 (GMAC0_TXDERR_INT_BIT | GMAC0_TXPERR_INT_BIT | \
+ GMAC0_RXDERR_INT_BIT | GMAC0_RXPERR_INT_BIT)
+#define GMAC0_IRQ0_TXQ0_INTS (GMAC0_SWTQ00_EOF_INT_BIT | \
+ GMAC0_SWTQ00_FIN_INT_BIT)
+#define GMAC0_IRQ4_8 (GMAC0_MIB_INT_BIT | GMAC0_RX_OVERRUN_INT_BIT)
+
+#define GMAC_OFFLOAD_FEATURES (NETIF_F_SG | NETIF_F_IP_CSUM | \
+ NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | \
+ NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
+
+/**
+ * struct gmac_queue_page - page buffer per-page info
+ */
+struct gmac_queue_page {
+ struct page *page;
+ dma_addr_t mapping;
+};
+
+struct gmac_txq {
+ struct gmac_txdesc *ring;
+ struct sk_buff **skb;
+ unsigned int cptr;
+ unsigned int noirq_packets;
+};
+
+struct gemini_ethernet;
+
+struct gemini_ethernet_port {
+ u8 id; /* 0 or 1 */
+
+ struct gemini_ethernet *geth;
+ struct net_device *netdev;
+ struct device *dev;
+ void __iomem *dma_base;
+ void __iomem *gmac_base;
+ struct clk *pclk;
+ struct reset_control *reset;
+ int irq;
+ __le32 mac_addr[3];
+
+ void __iomem *rxq_rwptr;
+ struct gmac_rxdesc *rxq_ring;
+ unsigned int rxq_order;
+
+ struct napi_struct napi;
+ struct hrtimer rx_coalesce_timer;
+ unsigned int rx_coalesce_nsecs;
+ unsigned int freeq_refill;
+ struct gmac_txq txq[TX_QUEUE_NUM];
+ unsigned int txq_order;
+ unsigned int irq_every_tx_packets;
+
+ dma_addr_t rxq_dma_base;
+ dma_addr_t txq_dma_base;
+
+ unsigned int msg_enable;
+ spinlock_t config_lock; /* Locks config register */
+
+ struct u64_stats_sync tx_stats_syncp;
+ struct u64_stats_sync rx_stats_syncp;
+ struct u64_stats_sync ir_stats_syncp;
+
+ struct rtnl_link_stats64 stats;
+ u64 hw_stats[RX_STATS_NUM];
+ u64 rx_stats[RX_STATUS_NUM];
+ u64 rx_csum_stats[RX_CHKSUM_NUM];
+ u64 rx_napi_exits;
+ u64 tx_frag_stats[TX_MAX_FRAGS];
+ u64 tx_frags_linearized;
+ u64 tx_hw_csummed;
+};
+
+struct gemini_ethernet {
+ struct device *dev;
+ void __iomem *base;
+ struct gemini_ethernet_port *port0;
+ struct gemini_ethernet_port *port1;
+
+ spinlock_t irq_lock; /* Locks IRQ-related registers */
+ unsigned int freeq_order;
+ unsigned int freeq_frag_order;
+ struct gmac_rxdesc *freeq_ring;
+ dma_addr_t freeq_dma_base;
+ struct gmac_queue_page *freeq_pages;
+ unsigned int num_freeq_pages;
+ spinlock_t freeq_lock; /* Locks queue from reentrance */
+};
+
+#define GMAC_STATS_NUM ( \
+ RX_STATS_NUM + RX_STATUS_NUM + RX_CHKSUM_NUM + 1 + \
+ TX_MAX_FRAGS + 2)
+
+static const char gmac_stats_strings[GMAC_STATS_NUM][ETH_GSTRING_LEN] = {
+ "GMAC_IN_DISCARDS",
+ "GMAC_IN_ERRORS",
+ "GMAC_IN_MCAST",
+ "GMAC_IN_BCAST",
+ "GMAC_IN_MAC1",
+ "GMAC_IN_MAC2",
+ "RX_STATUS_GOOD_FRAME",
+ "RX_STATUS_TOO_LONG_GOOD_CRC",
+ "RX_STATUS_RUNT_FRAME",
+ "RX_STATUS_SFD_NOT_FOUND",
+ "RX_STATUS_CRC_ERROR",
+ "RX_STATUS_TOO_LONG_BAD_CRC",
+ "RX_STATUS_ALIGNMENT_ERROR",
+ "RX_STATUS_TOO_LONG_BAD_ALIGN",
+ "RX_STATUS_RX_ERR",
+ "RX_STATUS_DA_FILTERED",
+ "RX_STATUS_BUFFER_FULL",
+ "RX_STATUS_11",
+ "RX_STATUS_12",
+ "RX_STATUS_13",
+ "RX_STATUS_14",
+ "RX_STATUS_15",
+ "RX_CHKSUM_IP_UDP_TCP_OK",
+ "RX_CHKSUM_IP_OK_ONLY",
+ "RX_CHKSUM_NONE",
+ "RX_CHKSUM_3",
+ "RX_CHKSUM_IP_ERR_UNKNOWN",
+ "RX_CHKSUM_IP_ERR",
+ "RX_CHKSUM_TCP_UDP_ERR",
+ "RX_CHKSUM_7",
+ "RX_NAPI_EXITS",
+ "TX_FRAGS[1]",
+ "TX_FRAGS[2]",
+ "TX_FRAGS[3]",
+ "TX_FRAGS[4]",
+ "TX_FRAGS[5]",
+ "TX_FRAGS[6]",
+ "TX_FRAGS[7]",
+ "TX_FRAGS[8]",
+ "TX_FRAGS[9]",
+ "TX_FRAGS[10]",
+ "TX_FRAGS[11]",
+ "TX_FRAGS[12]",
+ "TX_FRAGS[13]",
+ "TX_FRAGS[14]",
+ "TX_FRAGS[15]",
+ "TX_FRAGS[16+]",
+ "TX_FRAGS_LINEARIZED",
+ "TX_HW_CSUMMED",
+};
+
+static void gmac_dump_dma_state(struct net_device *netdev);
+
+static void gmac_update_config0_reg(struct net_device *netdev,
+ u32 val, u32 vmask)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&port->config_lock, flags);
+
+ reg = readl(port->gmac_base + GMAC_CONFIG0);
+ reg = (reg & ~vmask) | val;
+ writel(reg, port->gmac_base + GMAC_CONFIG0);
+
+ spin_unlock_irqrestore(&port->config_lock, flags);
+}
+
+static void gmac_enable_tx_rx(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&port->config_lock, flags);
+
+ reg = readl(port->gmac_base + GMAC_CONFIG0);
+ reg &= ~CONFIG0_TX_RX_DISABLE;
+ writel(reg, port->gmac_base + GMAC_CONFIG0);
+
+ spin_unlock_irqrestore(&port->config_lock, flags);
+}
+
+static void gmac_disable_tx_rx(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&port->config_lock, flags);
+
+ val = readl(port->gmac_base + GMAC_CONFIG0);
+ val |= CONFIG0_TX_RX_DISABLE;
+ writel(val, port->gmac_base + GMAC_CONFIG0);
+
+ spin_unlock_irqrestore(&port->config_lock, flags);
+
+ mdelay(10); /* let GMAC consume packet */
+}
+
+static void gmac_set_flow_control(struct net_device *netdev, bool tx, bool rx)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&port->config_lock, flags);
+
+ val = readl(port->gmac_base + GMAC_CONFIG0);
+ val &= ~CONFIG0_FLOW_CTL;
+ if (tx)
+ val |= CONFIG0_FLOW_TX;
+ if (rx)
+ val |= CONFIG0_FLOW_RX;
+ writel(val, port->gmac_base + GMAC_CONFIG0);
+
+ spin_unlock_irqrestore(&port->config_lock, flags);
+}
+
+static void gmac_speed_set(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
+ union gmac_status status, old_status;
+ int pause_tx = 0;
+ int pause_rx = 0;
+
+ status.bits32 = readl(port->gmac_base + GMAC_STATUS);
+ old_status.bits32 = status.bits32;
+ status.bits.link = phydev->link;
+ status.bits.duplex = phydev->duplex;
+
+ switch (phydev->speed) {
+ case 1000:
+ status.bits.speed = GMAC_SPEED_1000;
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
+ status.bits.mii_rmii = GMAC_PHY_RGMII_1000;
+ netdev_info(netdev, "connect to RGMII @ 1Gbit\n");
+ break;
+ case 100:
+ status.bits.speed = GMAC_SPEED_100;
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
+ status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
+ netdev_info(netdev, "connect to RGMII @ 100 Mbit\n");
+ break;
+ case 10:
+ status.bits.speed = GMAC_SPEED_10;
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
+ status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
+ netdev_info(netdev, "connect to RGMII @ 10 Mbit\n");
+ break;
+ default:
+ netdev_warn(netdev, "Not supported PHY speed (%d)\n",
+ phydev->speed);
+ }
+
+ if (phydev->duplex == DUPLEX_FULL) {
+ u16 lcladv = phy_read(phydev, MII_ADVERTISE);
+ u16 rmtadv = phy_read(phydev, MII_LPA);
+ u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
+
+ if (cap & FLOW_CTRL_RX)
+ pause_rx = 1;
+ if (cap & FLOW_CTRL_TX)
+ pause_tx = 1;
+ }
+
+ gmac_set_flow_control(netdev, pause_tx, pause_rx);
+
+ if (old_status.bits32 == status.bits32)
+ return;
+
+ if (netif_msg_link(port)) {
+ phy_print_status(phydev);
+ netdev_info(netdev, "link flow control: %s\n",
+ phydev->pause
+ ? (phydev->asym_pause ? "tx" : "both")
+ : (phydev->asym_pause ? "rx" : "none")
+ );
+ }
+
+ gmac_disable_tx_rx(netdev);
+ writel(status.bits32, port->gmac_base + GMAC_STATUS);
+ gmac_enable_tx_rx(netdev);
+}
+
+static int gmac_setup_phy(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ union gmac_status status = { .bits32 = 0 };
+ struct device *dev = port->dev;
+ struct phy_device *phy;
+
+ phy = of_phy_get_and_connect(netdev,
+ dev->of_node,
+ gmac_speed_set);
+ if (!phy)
+ return -ENODEV;
+ netdev->phydev = phy;
+
+ netdev_info(netdev, "connected to PHY \"%s\"\n",
+ phydev_name(phy));
+ phy_attached_print(phy, "phy_id=0x%.8lx, phy_mode=%s\n",
+ (unsigned long)phy->phy_id,
+ phy_modes(phy->interface));
+
+ phy->supported &= PHY_GBIT_FEATURES;
+ phy->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause;
+ phy->advertising = phy->supported;
+
+ /* set PHY interface type */
+ switch (phy->interface) {
+ case PHY_INTERFACE_MODE_MII:
+ netdev_info(netdev, "set GMAC0 to GMII mode, GMAC1 disabled\n");
+ status.bits.mii_rmii = GMAC_PHY_MII;
+ netdev_info(netdev, "connect to MII\n");
+ break;
+ case PHY_INTERFACE_MODE_GMII:
+ netdev_info(netdev, "set GMAC0 to GMII mode, GMAC1 disabled\n");
+ status.bits.mii_rmii = GMAC_PHY_GMII;
+ netdev_info(netdev, "connect to GMII\n");
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ dev_info(dev, "set GMAC0 and GMAC1 to MII/RGMII mode\n");
+ status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
+ netdev_info(netdev, "connect to RGMII\n");
+ break;
+ default:
+ netdev_err(netdev, "Unsupported MII interface\n");
+ phy_disconnect(phy);
+ netdev->phydev = NULL;
+ return -EINVAL;
+ }
+ writel(status.bits32, port->gmac_base + GMAC_STATUS);
+
+ return 0;
+}
+
+static int gmac_pick_rx_max_len(int max_l3_len)
+{
+ /* index = CONFIG_MAXLEN_XXX values */
+ static const int max_len[8] = {
+ 1536, 1518, 1522, 1542,
+ 9212, 10236, 1518, 1518
+ };
+ int i, n = 5;
+
+ max_l3_len += ETH_HLEN + VLAN_HLEN;
+
+ if (max_l3_len > max_len[n])
+ return -1;
+
+ for (i = 0; i < 5; i++) {
+ if (max_len[i] >= max_l3_len && max_len[i] < max_len[n])
+ n = i;
+ }
+
+ return n;
+}
+
+static int gmac_init(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ union gmac_config0 config0 = { .bits = {
+ .dis_tx = 1,
+ .dis_rx = 1,
+ .ipv4_rx_chksum = 1,
+ .ipv6_rx_chksum = 1,
+ .rx_err_detect = 1,
+ .rgmm_edge = 1,
+ .port0_chk_hwq = 1,
+ .port1_chk_hwq = 1,
+ .port0_chk_toeq = 1,
+ .port1_chk_toeq = 1,
+ .port0_chk_classq = 1,
+ .port1_chk_classq = 1,
+ } };
+ union gmac_ahb_weight ahb_weight = { .bits = {
+ .rx_weight = 1,
+ .tx_weight = 1,
+ .hash_weight = 1,
+ .pre_req = 0x1f,
+ .tq_dv_threshold = 0,
+ } };
+ union gmac_tx_wcr0 hw_weigh = { .bits = {
+ .hw_tq3 = 1,
+ .hw_tq2 = 1,
+ .hw_tq1 = 1,
+ .hw_tq0 = 1,
+ } };
+ union gmac_tx_wcr1 sw_weigh = { .bits = {
+ .sw_tq5 = 1,
+ .sw_tq4 = 1,
+ .sw_tq3 = 1,
+ .sw_tq2 = 1,
+ .sw_tq1 = 1,
+ .sw_tq0 = 1,
+ } };
+ union gmac_config1 config1 = { .bits = {
+ .set_threshold = 16,
+ .rel_threshold = 24,
+ } };
+ union gmac_config2 config2 = { .bits = {
+ .set_threshold = 16,
+ .rel_threshold = 32,
+ } };
+ union gmac_config3 config3 = { .bits = {
+ .set_threshold = 0,
+ .rel_threshold = 0,
+ } };
+ union gmac_config0 tmp;
+ u32 val;
+
+ config0.bits.max_len = gmac_pick_rx_max_len(netdev->mtu);
+ tmp.bits32 = readl(port->gmac_base + GMAC_CONFIG0);
+ config0.bits.reserved = tmp.bits.reserved;
+ writel(config0.bits32, port->gmac_base + GMAC_CONFIG0);
+ writel(config1.bits32, port->gmac_base + GMAC_CONFIG1);
+ writel(config2.bits32, port->gmac_base + GMAC_CONFIG2);
+ writel(config3.bits32, port->gmac_base + GMAC_CONFIG3);
+
+ val = readl(port->dma_base + GMAC_AHB_WEIGHT_REG);
+ writel(ahb_weight.bits32, port->dma_base + GMAC_AHB_WEIGHT_REG);
+
+ writel(hw_weigh.bits32,
+ port->dma_base + GMAC_TX_WEIGHTING_CTRL_0_REG);
+ writel(sw_weigh.bits32,
+ port->dma_base + GMAC_TX_WEIGHTING_CTRL_1_REG);
+
+ port->rxq_order = DEFAULT_GMAC_RXQ_ORDER;
+ port->txq_order = DEFAULT_GMAC_TXQ_ORDER;
+ port->rx_coalesce_nsecs = DEFAULT_RX_COALESCE_NSECS;
+
+ /* Mark every quarter of the queue a packet for interrupt
+ * in order to be able to wake up the queue if it was stopped
+ */
+ port->irq_every_tx_packets = 1 << (port->txq_order - 2);
+
+ return 0;
+}
+
+static void gmac_uninit(struct net_device *netdev)
+{
+ if (netdev->phydev)
+ phy_disconnect(netdev->phydev);
+}
+
+static int gmac_setup_txqs(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned int n_txq = netdev->num_tx_queues;
+ struct gemini_ethernet *geth = port->geth;
+ size_t entries = 1 << port->txq_order;
+ struct gmac_txq *txq = port->txq;
+ struct gmac_txdesc *desc_ring;
+ size_t len = n_txq * entries;
+ struct sk_buff **skb_tab;
+ void __iomem *rwptr_reg;
+ unsigned int r;
+ int i;
+
+ rwptr_reg = port->dma_base + GMAC_SW_TX_QUEUE0_PTR_REG;
+
+ skb_tab = kcalloc(len, sizeof(*skb_tab), GFP_KERNEL);
+ if (!skb_tab)
+ return -ENOMEM;
+
+ desc_ring = dma_alloc_coherent(geth->dev, len * sizeof(*desc_ring),
+ &port->txq_dma_base, GFP_KERNEL);
+
+ if (!desc_ring) {
+ kfree(skb_tab);
+ return -ENOMEM;
+ }
+
+ if (port->txq_dma_base & ~DMA_Q_BASE_MASK) {
+ dev_warn(geth->dev, "TX queue base it not aligned\n");
+ return -ENOMEM;
+ }
+
+ writel(port->txq_dma_base | port->txq_order,
+ port->dma_base + GMAC_SW_TX_QUEUE_BASE_REG);
+
+ for (i = 0; i < n_txq; i++) {
+ txq->ring = desc_ring;
+ txq->skb = skb_tab;
+ txq->noirq_packets = 0;
+
+ r = readw(rwptr_reg);
+ rwptr_reg += 2;
+ writew(r, rwptr_reg);
+ rwptr_reg += 2;
+ txq->cptr = r;
+
+ txq++;
+ desc_ring += entries;
+ skb_tab += entries;
+ }
+
+ return 0;
+}
+
+static void gmac_clean_txq(struct net_device *netdev, struct gmac_txq *txq,
+ unsigned int r)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned int m = (1 << port->txq_order) - 1;
+ struct gemini_ethernet *geth = port->geth;
+ unsigned int c = txq->cptr;
+ union gmac_txdesc_0 word0;
+ union gmac_txdesc_1 word1;
+ unsigned int hwchksum = 0;
+ unsigned long bytes = 0;
+ struct gmac_txdesc *txd;
+ unsigned short nfrags;
+ unsigned int errs = 0;
+ unsigned int pkts = 0;
+ unsigned int word3;
+ dma_addr_t mapping;
+
+ if (c == r)
+ return;
+
+ while (c != r) {
+ txd = txq->ring + c;
+ word0 = txd->word0;
+ word1 = txd->word1;
+ mapping = txd->word2.buf_adr;
+ word3 = txd->word3.bits32;
+
+ dma_unmap_single(geth->dev, mapping,
+ word0.bits.buffer_size, DMA_TO_DEVICE);
+
+ if (word3 & EOF_BIT)
+ dev_kfree_skb(txq->skb[c]);
+
+ c++;
+ c &= m;
+
+ if (!(word3 & SOF_BIT))
+ continue;
+
+ if (!word0.bits.status_tx_ok) {
+ errs++;
+ continue;
+ }
+
+ pkts++;
+ bytes += txd->word1.bits.byte_count;
+
+ if (word1.bits32 & TSS_CHECKUM_ENABLE)
+ hwchksum++;
+
+ nfrags = word0.bits.desc_count - 1;
+ if (nfrags) {
+ if (nfrags >= TX_MAX_FRAGS)
+ nfrags = TX_MAX_FRAGS - 1;
+
+ u64_stats_update_begin(&port->tx_stats_syncp);
+ port->tx_frag_stats[nfrags]++;
+ u64_stats_update_end(&port->ir_stats_syncp);
+ }
+ }
+
+ u64_stats_update_begin(&port->ir_stats_syncp);
+ port->stats.tx_errors += errs;
+ port->stats.tx_packets += pkts;
+ port->stats.tx_bytes += bytes;
+ port->tx_hw_csummed += hwchksum;
+ u64_stats_update_end(&port->ir_stats_syncp);
+
+ txq->cptr = c;
+}
+
+static void gmac_cleanup_txqs(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned int n_txq = netdev->num_tx_queues;
+ struct gemini_ethernet *geth = port->geth;
+ void __iomem *rwptr_reg;
+ unsigned int r, i;
+
+ rwptr_reg = port->dma_base + GMAC_SW_TX_QUEUE0_PTR_REG;
+
+ for (i = 0; i < n_txq; i++) {
+ r = readw(rwptr_reg);
+ rwptr_reg += 2;
+ writew(r, rwptr_reg);
+ rwptr_reg += 2;
+
+ gmac_clean_txq(netdev, port->txq + i, r);
+ }
+ writel(0, port->dma_base + GMAC_SW_TX_QUEUE_BASE_REG);
+
+ kfree(port->txq->skb);
+ dma_free_coherent(geth->dev,
+ n_txq * sizeof(*port->txq->ring) << port->txq_order,
+ port->txq->ring, port->txq_dma_base);
+}
+
+static int gmac_setup_rxq(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ struct gemini_ethernet *geth = port->geth;
+ struct nontoe_qhdr __iomem *qhdr;
+
+ qhdr = geth->base + TOE_DEFAULT_Q_HDR_BASE(netdev->dev_id);
+ port->rxq_rwptr = &qhdr->word1;
+
+ /* Remap a slew of memory to use for the RX queue */
+ port->rxq_ring = dma_alloc_coherent(geth->dev,
+ sizeof(*port->rxq_ring) << port->rxq_order,
+ &port->rxq_dma_base, GFP_KERNEL);
+ if (!port->rxq_ring)
+ return -ENOMEM;
+ if (port->rxq_dma_base & ~NONTOE_QHDR0_BASE_MASK) {
+ dev_warn(geth->dev, "RX queue base it not aligned\n");
+ return -ENOMEM;
+ }
+
+ writel(port->rxq_dma_base | port->rxq_order, &qhdr->word0);
+ writel(0, port->rxq_rwptr);
+ return 0;
+}
+
+static struct gmac_queue_page *
+gmac_get_queue_page(struct gemini_ethernet *geth,
+ struct gemini_ethernet_port *port,
+ dma_addr_t addr)
+{
+ struct gmac_queue_page *gpage;
+ dma_addr_t mapping;
+ int i;
+
+ /* Only look for even pages */
+ mapping = addr & PAGE_MASK;
+
+ if (!geth->freeq_pages) {
+ dev_err(geth->dev, "try to get page with no page list\n");
+ return NULL;
+ }
+
+ /* Look up a ring buffer page from virtual mapping */
+ for (i = 0; i < geth->num_freeq_pages; i++) {
+ gpage = &geth->freeq_pages[i];
+ if (gpage->mapping == mapping)
+ return gpage;
+ }
+
+ return NULL;
+}
+
+static void gmac_cleanup_rxq(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ struct gemini_ethernet *geth = port->geth;
+ struct gmac_rxdesc *rxd = port->rxq_ring;
+ static struct gmac_queue_page *gpage;
+ struct nontoe_qhdr __iomem *qhdr;
+ void __iomem *dma_reg;
+ void __iomem *ptr_reg;
+ dma_addr_t mapping;
+ union dma_rwptr rw;
+ unsigned int r, w;
+
+ qhdr = geth->base +
+ TOE_DEFAULT_Q_HDR_BASE(netdev->dev_id);
+ dma_reg = &qhdr->word0;
+ ptr_reg = &qhdr->word1;
+
+ rw.bits32 = readl(ptr_reg);
+ r = rw.bits.rptr;
+ w = rw.bits.wptr;
+ writew(r, ptr_reg + 2);
+
+ writel(0, dma_reg);
+
+ /* Loop from read pointer to write pointer of the RX queue
+ * and free up all pages by the queue.
+ */
+ while (r != w) {
+ mapping = rxd[r].word2.buf_adr;
+ r++;
+ r &= ((1 << port->rxq_order) - 1);
+
+ if (!mapping)
+ continue;
+
+ /* Freeq pointers are one page off */
+ gpage = gmac_get_queue_page(geth, port, mapping + PAGE_SIZE);
+ if (!gpage) {
+ dev_err(geth->dev, "could not find page\n");
+ continue;
+ }
+ /* Release the RX queue reference to the page */
+ put_page(gpage->page);
+ }
+
+ dma_free_coherent(geth->dev, sizeof(*port->rxq_ring) << port->rxq_order,
+ port->rxq_ring, port->rxq_dma_base);
+}
+
+static struct page *geth_freeq_alloc_map_page(struct gemini_ethernet *geth,
+ int pn)
+{
+ struct gmac_rxdesc *freeq_entry;
+ struct gmac_queue_page *gpage;
+ unsigned int fpp_order;
+ unsigned int frag_len;
+ dma_addr_t mapping;
+ struct page *page;
+ int i;
+
+ /* First allocate and DMA map a single page */
+ page = alloc_page(GFP_ATOMIC);
+ if (!page)
+ return NULL;
+
+ mapping = dma_map_single(geth->dev, page_address(page),
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(geth->dev, mapping)) {
+ put_page(page);
+ return NULL;
+ }
+
+ /* The assign the page mapping (physical address) to the buffer address
+ * in the hardware queue. PAGE_SHIFT on ARM is 12 (1 page is 4096 bytes,
+ * 4k), and the default RX frag order is 11 (fragments are up 20 2048
+ * bytes, 2k) so fpp_order (fragments per page order) is default 1. Thus
+ * each page normally needs two entries in the queue.
+ */
+ frag_len = 1 << geth->freeq_frag_order; /* Usually 2048 */
+ fpp_order = PAGE_SHIFT - geth->freeq_frag_order;
+ freeq_entry = geth->freeq_ring + (pn << fpp_order);
+ dev_dbg(geth->dev, "allocate page %d fragment length %d fragments per page %d, freeq entry %p\n",
+ pn, frag_len, (1 << fpp_order), freeq_entry);
+ for (i = (1 << fpp_order); i > 0; i--) {
+ freeq_entry->word2.buf_adr = mapping;
+ freeq_entry++;
+ mapping += frag_len;
+ }
+
+ /* If the freeq entry already has a page mapped, then unmap it. */
+ gpage = &geth->freeq_pages[pn];
+ if (gpage->page) {
+ mapping = geth->freeq_ring[pn << fpp_order].word2.buf_adr;
+ dma_unmap_single(geth->dev, mapping, frag_len, DMA_FROM_DEVICE);
+ /* This should be the last reference to the page so it gets
+ * released
+ */
+ put_page(gpage->page);
+ }
+
+ /* Then put our new mapping into the page table */
+ dev_dbg(geth->dev, "page %d, DMA addr: %08x, page %p\n",
+ pn, (unsigned int)mapping, page);
+ gpage->mapping = mapping;
+ gpage->page = page;
+
+ return page;
+}
+
+/**
+ * geth_fill_freeq() - Fill the freeq with empty fragments to use
+ * @geth: the ethernet adapter
+ * @refill: whether to reset the queue by filling in all freeq entries or
+ * just refill it, usually the interrupt to refill the queue happens when
+ * the queue is half empty.
+ */
+static unsigned int geth_fill_freeq(struct gemini_ethernet *geth, bool refill)
+{
+ unsigned int fpp_order = PAGE_SHIFT - geth->freeq_frag_order;
+ unsigned int count = 0;
+ unsigned int pn, epn;
+ unsigned long flags;
+ union dma_rwptr rw;
+ unsigned int m_pn;
+
+ /* Mask for page */
+ m_pn = (1 << (geth->freeq_order - fpp_order)) - 1;
+
+ spin_lock_irqsave(&geth->freeq_lock, flags);
+
+ rw.bits32 = readl(geth->base + GLOBAL_SWFQ_RWPTR_REG);
+ pn = (refill ? rw.bits.wptr : rw.bits.rptr) >> fpp_order;
+ epn = (rw.bits.rptr >> fpp_order) - 1;
+ epn &= m_pn;
+
+ /* Loop over the freeq ring buffer entries */
+ while (pn != epn) {
+ struct gmac_queue_page *gpage;
+ struct page *page;
+
+ gpage = &geth->freeq_pages[pn];
+ page = gpage->page;
+
+ dev_dbg(geth->dev, "fill entry %d page ref count %d add %d refs\n",
+ pn, page_ref_count(page), 1 << fpp_order);
+
+ if (page_ref_count(page) > 1) {
+ unsigned int fl = (pn - epn) & m_pn;
+
+ if (fl > 64 >> fpp_order)
+ break;
+
+ page = geth_freeq_alloc_map_page(geth, pn);
+ if (!page)
+ break;
+ }
+
+ /* Add one reference per fragment in the page */
+ page_ref_add(page, 1 << fpp_order);
+ count += 1 << fpp_order;
+ pn++;
+ pn &= m_pn;
+ }
+
+ writew(pn << fpp_order, geth->base + GLOBAL_SWFQ_RWPTR_REG + 2);
+
+ spin_unlock_irqrestore(&geth->freeq_lock, flags);
+
+ return count;
+}
+
+static int geth_setup_freeq(struct gemini_ethernet *geth)
+{
+ unsigned int fpp_order = PAGE_SHIFT - geth->freeq_frag_order;
+ unsigned int frag_len = 1 << geth->freeq_frag_order;
+ unsigned int len = 1 << geth->freeq_order;
+ unsigned int pages = len >> fpp_order;
+ union queue_threshold qt;
+ union dma_skb_size skbsz;
+ unsigned int filled;
+ unsigned int pn;
+
+ geth->freeq_ring = dma_alloc_coherent(geth->dev,
+ sizeof(*geth->freeq_ring) << geth->freeq_order,
+ &geth->freeq_dma_base, GFP_KERNEL);
+ if (!geth->freeq_ring)
+ return -ENOMEM;
+ if (geth->freeq_dma_base & ~DMA_Q_BASE_MASK) {
+ dev_warn(geth->dev, "queue ring base it not aligned\n");
+ goto err_freeq;
+ }
+
+ /* Allocate a mapping to page look-up index */
+ geth->freeq_pages = kzalloc(pages * sizeof(*geth->freeq_pages),
+ GFP_KERNEL);
+ if (!geth->freeq_pages)
+ goto err_freeq;
+ geth->num_freeq_pages = pages;
+
+ dev_info(geth->dev, "allocate %d pages for queue\n", pages);
+ for (pn = 0; pn < pages; pn++)
+ if (!geth_freeq_alloc_map_page(geth, pn))
+ goto err_freeq_alloc;
+
+ filled = geth_fill_freeq(geth, false);
+ if (!filled)
+ goto err_freeq_alloc;
+
+ qt.bits32 = readl(geth->base + GLOBAL_QUEUE_THRESHOLD_REG);
+ qt.bits.swfq_empty = 32;
+ writel(qt.bits32, geth->base + GLOBAL_QUEUE_THRESHOLD_REG);
+
+ skbsz.bits.sw_skb_size = 1 << geth->freeq_frag_order;
+ writel(skbsz.bits32, geth->base + GLOBAL_DMA_SKB_SIZE_REG);
+ writel(geth->freeq_dma_base | geth->freeq_order,
+ geth->base + GLOBAL_SW_FREEQ_BASE_SIZE_REG);
+
+ return 0;
+
+err_freeq_alloc:
+ while (pn > 0) {
+ struct gmac_queue_page *gpage;
+ dma_addr_t mapping;
+
+ --pn;
+ mapping = geth->freeq_ring[pn << fpp_order].word2.buf_adr;
+ dma_unmap_single(geth->dev, mapping, frag_len, DMA_FROM_DEVICE);
+ gpage = &geth->freeq_pages[pn];
+ put_page(gpage->page);
+ }
+
+ kfree(geth->freeq_pages);
+err_freeq:
+ dma_free_coherent(geth->dev,
+ sizeof(*geth->freeq_ring) << geth->freeq_order,
+ geth->freeq_ring, geth->freeq_dma_base);
+ geth->freeq_ring = NULL;
+ return -ENOMEM;
+}
+
+/**
+ * geth_cleanup_freeq() - cleanup the DMA mappings and free the queue
+ * @geth: the Gemini global ethernet state
+ */
+static void geth_cleanup_freeq(struct gemini_ethernet *geth)
+{
+ unsigned int fpp_order = PAGE_SHIFT - geth->freeq_frag_order;
+ unsigned int frag_len = 1 << geth->freeq_frag_order;
+ unsigned int len = 1 << geth->freeq_order;
+ unsigned int pages = len >> fpp_order;
+ unsigned int pn;
+
+ writew(readw(geth->base + GLOBAL_SWFQ_RWPTR_REG),
+ geth->base + GLOBAL_SWFQ_RWPTR_REG + 2);
+ writel(0, geth->base + GLOBAL_SW_FREEQ_BASE_SIZE_REG);
+
+ for (pn = 0; pn < pages; pn++) {
+ struct gmac_queue_page *gpage;
+ dma_addr_t mapping;
+
+ mapping = geth->freeq_ring[pn << fpp_order].word2.buf_adr;
+ dma_unmap_single(geth->dev, mapping, frag_len, DMA_FROM_DEVICE);
+
+ gpage = &geth->freeq_pages[pn];
+ while (page_ref_count(gpage->page) > 0)
+ put_page(gpage->page);
+ }
+
+ kfree(geth->freeq_pages);
+
+ dma_free_coherent(geth->dev,
+ sizeof(*geth->freeq_ring) << geth->freeq_order,
+ geth->freeq_ring, geth->freeq_dma_base);
+}
+
+/**
+ * geth_resize_freeq() - resize the software queue depth
+ * @port: the port requesting the change
+ *
+ * This gets called at least once during probe() so the device queue gets
+ * "resized" from the hardware defaults. Since both ports/net devices share
+ * the same hardware queue, some synchronization between the ports is
+ * needed.
+ */
+static int geth_resize_freeq(struct gemini_ethernet_port *port)
+{
+ struct gemini_ethernet *geth = port->geth;
+ struct net_device *netdev = port->netdev;
+ struct gemini_ethernet_port *other_port;
+ struct net_device *other_netdev;
+ unsigned int new_size = 0;
+ unsigned int new_order;
+ unsigned long flags;
+ u32 en;
+ int ret;
+
+ if (netdev->dev_id == 0)
+ other_netdev = geth->port1->netdev;
+ else
+ other_netdev = geth->port0->netdev;
+
+ if (other_netdev && netif_running(other_netdev))
+ return -EBUSY;
+
+ new_size = 1 << (port->rxq_order + 1);
+ netdev_dbg(netdev, "port %d size: %d order %d\n",
+ netdev->dev_id,
+ new_size,
+ port->rxq_order);
+ if (other_netdev) {
+ other_port = netdev_priv(other_netdev);
+ new_size += 1 << (other_port->rxq_order + 1);
+ netdev_dbg(other_netdev, "port %d size: %d order %d\n",
+ other_netdev->dev_id,
+ (1 << (other_port->rxq_order + 1)),
+ other_port->rxq_order);
+ }
+
+ new_order = min(15, ilog2(new_size - 1) + 1);
+ dev_dbg(geth->dev, "set shared queue to size %d order %d\n",
+ new_size, new_order);
+ if (geth->freeq_order == new_order)
+ return 0;
+
+ spin_lock_irqsave(&geth->irq_lock, flags);
+
+ /* Disable the software queue IRQs */
+ en = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+ en &= ~SWFQ_EMPTY_INT_BIT;
+ writel(en, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
+
+ /* Drop the old queue */
+ if (geth->freeq_ring)
+ geth_cleanup_freeq(geth);
+
+ /* Allocate a new queue with the desired order */
+ geth->freeq_order = new_order;
+ ret = geth_setup_freeq(geth);
+
+ /* Restart the interrupts - NOTE if this is the first resize
+ * after probe(), this is where the interrupts get turned on
+ * in the first place.
+ */
+ spin_lock_irqsave(&geth->irq_lock, flags);
+ en |= SWFQ_EMPTY_INT_BIT;
+ writel(en, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
+
+ return ret;
+}
+
+static void gmac_tx_irq_enable(struct net_device *netdev,
+ unsigned int txq, int en)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ struct gemini_ethernet *geth = port->geth;
+ u32 val, mask;
+
+ netdev_dbg(netdev, "%s device %d\n", __func__, netdev->dev_id);
+
+ mask = GMAC0_IRQ0_TXQ0_INTS << (6 * netdev->dev_id + txq);
+
+ if (en)
+ writel(mask, geth->base + GLOBAL_INTERRUPT_STATUS_0_REG);
+
+ val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+ val = en ? val | mask : val & ~mask;
+ writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+}
+
+static void gmac_tx_irq(struct net_device *netdev, unsigned int txq_num)
+{
+ struct netdev_queue *ntxq = netdev_get_tx_queue(netdev, txq_num);
+
+ gmac_tx_irq_enable(netdev, txq_num, 0);
+ netif_tx_wake_queue(ntxq);
+}
+
+static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
+ struct gmac_txq *txq, unsigned short *desc)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ struct skb_shared_info *skb_si = skb_shinfo(skb);
+ unsigned short m = (1 << port->txq_order) - 1;
+ short frag, last_frag = skb_si->nr_frags - 1;
+ struct gemini_ethernet *geth = port->geth;
+ unsigned int word1, word3, buflen;
+ unsigned short w = *desc;
+ struct gmac_txdesc *txd;
+ skb_frag_t *skb_frag;
+ dma_addr_t mapping;
+ unsigned short mtu;
+ void *buffer;
+
+ mtu = ETH_HLEN;
+ mtu += netdev->mtu;
+ if (skb->protocol == htons(ETH_P_8021Q))
+ mtu += VLAN_HLEN;
+
+ word1 = skb->len;
+ word3 = SOF_BIT;
+
+ if (word1 > mtu) {
+ word1 |= TSS_MTU_ENABLE_BIT;
+ word3 |= mtu;
+ }
+
+ if (skb->ip_summed != CHECKSUM_NONE) {
+ int tcp = 0;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ word1 |= TSS_IP_CHKSUM_BIT;
+ tcp = ip_hdr(skb)->protocol == IPPROTO_TCP;
+ } else { /* IPv6 */
+ word1 |= TSS_IPV6_ENABLE_BIT;
+ tcp = ipv6_hdr(skb)->nexthdr == IPPROTO_TCP;
+ }
+
+ word1 |= tcp ? TSS_TCP_CHKSUM_BIT : TSS_UDP_CHKSUM_BIT;
+ }
+
+ frag = -1;
+ while (frag <= last_frag) {
+ if (frag == -1) {
+ buffer = skb->data;
+ buflen = skb_headlen(skb);
+ } else {
+ skb_frag = skb_si->frags + frag;
+ buffer = page_address(skb_frag_page(skb_frag)) +
+ skb_frag->page_offset;
+ buflen = skb_frag->size;
+ }
+
+ if (frag == last_frag) {
+ word3 |= EOF_BIT;
+ txq->skb[w] = skb;
+ }
+
+ mapping = dma_map_single(geth->dev, buffer, buflen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(geth->dev, mapping))
+ goto map_error;
+
+ txd = txq->ring + w;
+ txd->word0.bits32 = buflen;
+ txd->word1.bits32 = word1;
+ txd->word2.buf_adr = mapping;
+ txd->word3.bits32 = word3;
+
+ word3 &= MTU_SIZE_BIT_MASK;
+ w++;
+ w &= m;
+ frag++;
+ }
+
+ *desc = w;
+ return 0;
+
+map_error:
+ while (w != *desc) {
+ w--;
+ w &= m;
+
+ dma_unmap_page(geth->dev, txq->ring[w].word2.buf_adr,
+ txq->ring[w].word0.bits.buffer_size,
+ DMA_TO_DEVICE);
+ }
+ return -ENOMEM;
+}
+
+static int gmac_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned short m = (1 << port->txq_order) - 1;
+ struct netdev_queue *ntxq;
+ unsigned short r, w, d;
+ void __iomem *ptr_reg;
+ struct gmac_txq *txq;
+ int txq_num, nfrags;
+ union dma_rwptr rw;
+
+ SKB_FRAG_ASSERT(skb);
+
+ if (skb->len >= 0x10000)
+ goto out_drop_free;
+
+ txq_num = skb_get_queue_mapping(skb);
+ ptr_reg = port->dma_base + GMAC_SW_TX_QUEUE_PTR_REG(txq_num);
+ txq = &port->txq[txq_num];
+ ntxq = netdev_get_tx_queue(netdev, txq_num);
+ nfrags = skb_shinfo(skb)->nr_frags;
+
+ rw.bits32 = readl(ptr_reg);
+ r = rw.bits.rptr;
+ w = rw.bits.wptr;
+
+ d = txq->cptr - w - 1;
+ d &= m;
+
+ if (d < nfrags + 2) {
+ gmac_clean_txq(netdev, txq, r);
+ d = txq->cptr - w - 1;
+ d &= m;
+
+ if (d < nfrags + 2) {
+ netif_tx_stop_queue(ntxq);
+
+ d = txq->cptr + nfrags + 16;
+ d &= m;
+ txq->ring[d].word3.bits.eofie = 1;
+ gmac_tx_irq_enable(netdev, txq_num, 1);
+
+ u64_stats_update_begin(&port->tx_stats_syncp);
+ netdev->stats.tx_fifo_errors++;
+ u64_stats_update_end(&port->tx_stats_syncp);
+ return NETDEV_TX_BUSY;
+ }
+ }
+
+ if (gmac_map_tx_bufs(netdev, skb, txq, &w)) {
+ if (skb_linearize(skb))
+ goto out_drop;
+
+ u64_stats_update_begin(&port->tx_stats_syncp);
+ port->tx_frags_linearized++;
+ u64_stats_update_end(&port->tx_stats_syncp);
+
+ if (gmac_map_tx_bufs(netdev, skb, txq, &w))
+ goto out_drop_free;
+ }
+
+ writew(w, ptr_reg + 2);
+
+ gmac_clean_txq(netdev, txq, r);
+ return NETDEV_TX_OK;
+
+out_drop_free:
+ dev_kfree_skb(skb);
+out_drop:
+ u64_stats_update_begin(&port->tx_stats_syncp);
+ port->stats.tx_dropped++;
+ u64_stats_update_end(&port->tx_stats_syncp);
+ return NETDEV_TX_OK;
+}
+
+static void gmac_tx_timeout(struct net_device *netdev)
+{
+ netdev_err(netdev, "Tx timeout\n");
+ gmac_dump_dma_state(netdev);
+}
+
+static void gmac_enable_irq(struct net_device *netdev, int enable)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ struct gemini_ethernet *geth = port->geth;
+ unsigned long flags;
+ u32 val, mask;
+
+ netdev_info(netdev, "%s device %d %s\n", __func__,
+ netdev->dev_id, enable ? "enable" : "disable");
+ spin_lock_irqsave(&geth->irq_lock, flags);
+
+ mask = GMAC0_IRQ0_2 << (netdev->dev_id * 2);
+ val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+ val = enable ? (val | mask) : (val & ~mask);
+ writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+
+ mask = DEFAULT_Q0_INT_BIT << netdev->dev_id;
+ val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
+ val = enable ? (val | mask) : (val & ~mask);
+ writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
+
+ mask = GMAC0_IRQ4_8 << (netdev->dev_id * 8);
+ val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+ val = enable ? (val | mask) : (val & ~mask);
+ writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
+}
+
+static void gmac_enable_rx_irq(struct net_device *netdev, int enable)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ struct gemini_ethernet *geth = port->geth;
+ unsigned long flags;
+ u32 val, mask;
+
+ netdev_dbg(netdev, "%s device %d %s\n", __func__, netdev->dev_id,
+ enable ? "enable" : "disable");
+ spin_lock_irqsave(&geth->irq_lock, flags);
+ mask = DEFAULT_Q0_INT_BIT << netdev->dev_id;
+
+ val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
+ val = enable ? (val | mask) : (val & ~mask);
+ writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
+
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
+}
+
+static struct sk_buff *gmac_skb_if_good_frame(struct gemini_ethernet_port *port,
+ union gmac_rxdesc_0 word0,
+ unsigned int frame_len)
+{
+ unsigned int rx_csum = word0.bits.chksum_status;
+ unsigned int rx_status = word0.bits.status;
+ struct sk_buff *skb = NULL;
+
+ port->rx_stats[rx_status]++;
+ port->rx_csum_stats[rx_csum]++;
+
+ if (word0.bits.derr || word0.bits.perr ||
+ rx_status || frame_len < ETH_ZLEN ||
+ rx_csum >= RX_CHKSUM_IP_ERR_UNKNOWN) {
+ port->stats.rx_errors++;
+
+ if (frame_len < ETH_ZLEN || RX_ERROR_LENGTH(rx_status))
+ port->stats.rx_length_errors++;
+ if (RX_ERROR_OVER(rx_status))
+ port->stats.rx_over_errors++;
+ if (RX_ERROR_CRC(rx_status))
+ port->stats.rx_crc_errors++;
+ if (RX_ERROR_FRAME(rx_status))
+ port->stats.rx_frame_errors++;
+ return NULL;
+ }
+
+ skb = napi_get_frags(&port->napi);
+ if (!skb)
+ goto update_exit;
+
+ if (rx_csum == RX_CHKSUM_IP_UDP_TCP_OK)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+update_exit:
+ port->stats.rx_bytes += frame_len;
+ port->stats.rx_packets++;
+ return skb;
+}
+
+static unsigned int gmac_rx(struct net_device *netdev, unsigned int budget)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned short m = (1 << port->rxq_order) - 1;
+ struct gemini_ethernet *geth = port->geth;
+ void __iomem *ptr_reg = port->rxq_rwptr;
+ unsigned int frame_len, frag_len;
+ struct gmac_rxdesc *rx = NULL;
+ struct gmac_queue_page *gpage;
+ static struct sk_buff *skb;
+ union gmac_rxdesc_0 word0;
+ union gmac_rxdesc_1 word1;
+ union gmac_rxdesc_3 word3;
+ struct page *page = NULL;
+ unsigned int page_offs;
+ unsigned short r, w;
+ union dma_rwptr rw;
+ dma_addr_t mapping;
+ int frag_nr = 0;
+
+ rw.bits32 = readl(ptr_reg);
+ /* Reset interrupt as all packages until here are taken into account */
+ writel(DEFAULT_Q0_INT_BIT << netdev->dev_id,
+ geth->base + GLOBAL_INTERRUPT_STATUS_1_REG);
+ r = rw.bits.rptr;
+ w = rw.bits.wptr;
+
+ while (budget && w != r) {
+ rx = port->rxq_ring + r;
+ word0 = rx->word0;
+ word1 = rx->word1;
+ mapping = rx->word2.buf_adr;
+ word3 = rx->word3;
+
+ r++;
+ r &= m;
+
+ frag_len = word0.bits.buffer_size;
+ frame_len = word1.bits.byte_count;
+ page_offs = mapping & ~PAGE_MASK;
+
+ if (!mapping) {
+ netdev_err(netdev,
+ "rxq[%u]: HW BUG: zero DMA desc\n", r);
+ goto err_drop;
+ }
+
+ /* Freeq pointers are one page off */
+ gpage = gmac_get_queue_page(geth, port, mapping + PAGE_SIZE);
+ if (!gpage) {
+ dev_err(geth->dev, "could not find mapping\n");
+ continue;
+ }
+ page = gpage->page;
+
+ if (word3.bits32 & SOF_BIT) {
+ if (skb) {
+ napi_free_frags(&port->napi);
+ port->stats.rx_dropped++;
+ }
+
+ skb = gmac_skb_if_good_frame(port, word0, frame_len);
+ if (!skb)
+ goto err_drop;
+
+ page_offs += NET_IP_ALIGN;
+ frag_len -= NET_IP_ALIGN;
+ frag_nr = 0;
+
+ } else if (!skb) {
+ put_page(page);
+ continue;
+ }
+
+ if (word3.bits32 & EOF_BIT)
+ frag_len = frame_len - skb->len;
+
+ /* append page frag to skb */
+ if (frag_nr == MAX_SKB_FRAGS)
+ goto err_drop;
+
+ if (frag_len == 0)
+ netdev_err(netdev, "Received fragment with len = 0\n");
+
+ skb_fill_page_desc(skb, frag_nr, page, page_offs, frag_len);
+ skb->len += frag_len;
+ skb->data_len += frag_len;
+ skb->truesize += frag_len;
+ frag_nr++;
+
+ if (word3.bits32 & EOF_BIT) {
+ napi_gro_frags(&port->napi);
+ skb = NULL;
+ --budget;
+ }
+ continue;
+
+err_drop:
+ if (skb) {
+ napi_free_frags(&port->napi);
+ skb = NULL;
+ }
+
+ if (mapping)
+ put_page(page);
+
+ port->stats.rx_dropped++;
+ }
+
+ writew(r, ptr_reg);
+ return budget;
+}
+
+static int gmac_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct gemini_ethernet_port *port = netdev_priv(napi->dev);
+ struct gemini_ethernet *geth = port->geth;
+ unsigned int freeq_threshold;
+ unsigned int received;
+
+ freeq_threshold = 1 << (geth->freeq_order - 1);
+ u64_stats_update_begin(&port->rx_stats_syncp);
+
+ received = gmac_rx(napi->dev, budget);
+ if (received < budget) {
+ napi_gro_flush(napi, false);
+ napi_complete_done(napi, received);
+ gmac_enable_rx_irq(napi->dev, 1);
+ ++port->rx_napi_exits;
+ }
+
+ port->freeq_refill += (budget - received);
+ if (port->freeq_refill > freeq_threshold) {
+ port->freeq_refill -= freeq_threshold;
+ geth_fill_freeq(geth, true);
+ }
+
+ u64_stats_update_end(&port->rx_stats_syncp);
+ return received;
+}
+
+static void gmac_dump_dma_state(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ struct gemini_ethernet *geth = port->geth;
+ void __iomem *ptr_reg;
+ u32 reg[5];
+
+ /* Interrupt status */
+ reg[0] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_0_REG);
+ reg[1] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_1_REG);
+ reg[2] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_2_REG);
+ reg[3] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_3_REG);
+ reg[4] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
+ netdev_err(netdev, "IRQ status: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ reg[0], reg[1], reg[2], reg[3], reg[4]);
+
+ /* Interrupt enable */
+ reg[0] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+ reg[1] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
+ reg[2] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_2_REG);
+ reg[3] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_3_REG);
+ reg[4] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+ netdev_err(netdev, "IRQ enable: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ reg[0], reg[1], reg[2], reg[3], reg[4]);
+
+ /* RX DMA status */
+ reg[0] = readl(port->dma_base + GMAC_DMA_RX_FIRST_DESC_REG);
+ reg[1] = readl(port->dma_base + GMAC_DMA_RX_CURR_DESC_REG);
+ reg[2] = GET_RPTR(port->rxq_rwptr);
+ reg[3] = GET_WPTR(port->rxq_rwptr);
+ netdev_err(netdev, "RX DMA regs: 0x%08x 0x%08x, ptr: %u %u\n",
+ reg[0], reg[1], reg[2], reg[3]);
+
+ reg[0] = readl(port->dma_base + GMAC_DMA_RX_DESC_WORD0_REG);
+ reg[1] = readl(port->dma_base + GMAC_DMA_RX_DESC_WORD1_REG);
+ reg[2] = readl(port->dma_base + GMAC_DMA_RX_DESC_WORD2_REG);
+ reg[3] = readl(port->dma_base + GMAC_DMA_RX_DESC_WORD3_REG);
+ netdev_err(netdev, "RX DMA descriptor: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ reg[0], reg[1], reg[2], reg[3]);
+
+ /* TX DMA status */
+ ptr_reg = port->dma_base + GMAC_SW_TX_QUEUE0_PTR_REG;
+
+ reg[0] = readl(port->dma_base + GMAC_DMA_TX_FIRST_DESC_REG);
+ reg[1] = readl(port->dma_base + GMAC_DMA_TX_CURR_DESC_REG);
+ reg[2] = GET_RPTR(ptr_reg);
+ reg[3] = GET_WPTR(ptr_reg);
+ netdev_err(netdev, "TX DMA regs: 0x%08x 0x%08x, ptr: %u %u\n",
+ reg[0], reg[1], reg[2], reg[3]);
+
+ reg[0] = readl(port->dma_base + GMAC_DMA_TX_DESC_WORD0_REG);
+ reg[1] = readl(port->dma_base + GMAC_DMA_TX_DESC_WORD1_REG);
+ reg[2] = readl(port->dma_base + GMAC_DMA_TX_DESC_WORD2_REG);
+ reg[3] = readl(port->dma_base + GMAC_DMA_TX_DESC_WORD3_REG);
+ netdev_err(netdev, "TX DMA descriptor: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ reg[0], reg[1], reg[2], reg[3]);
+
+ /* FREE queues status */
+ ptr_reg = geth->base + GLOBAL_SWFQ_RWPTR_REG;
+
+ reg[0] = GET_RPTR(ptr_reg);
+ reg[1] = GET_WPTR(ptr_reg);
+
+ ptr_reg = geth->base + GLOBAL_HWFQ_RWPTR_REG;
+
+ reg[2] = GET_RPTR(ptr_reg);
+ reg[3] = GET_WPTR(ptr_reg);
+ netdev_err(netdev, "FQ SW ptr: %u %u, HW ptr: %u %u\n",
+ reg[0], reg[1], reg[2], reg[3]);
+}
+
+static void gmac_update_hw_stats(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned int rx_discards, rx_mcast, rx_bcast;
+ struct gemini_ethernet *geth = port->geth;
+ unsigned long flags;
+
+ spin_lock_irqsave(&geth->irq_lock, flags);
+ u64_stats_update_begin(&port->ir_stats_syncp);
+
+ rx_discards = readl(port->gmac_base + GMAC_IN_DISCARDS);
+ port->hw_stats[0] += rx_discards;
+ port->hw_stats[1] += readl(port->gmac_base + GMAC_IN_ERRORS);
+ rx_mcast = readl(port->gmac_base + GMAC_IN_MCAST);
+ port->hw_stats[2] += rx_mcast;
+ rx_bcast = readl(port->gmac_base + GMAC_IN_BCAST);
+ port->hw_stats[3] += rx_bcast;
+ port->hw_stats[4] += readl(port->gmac_base + GMAC_IN_MAC1);
+ port->hw_stats[5] += readl(port->gmac_base + GMAC_IN_MAC2);
+
+ port->stats.rx_missed_errors += rx_discards;
+ port->stats.multicast += rx_mcast;
+ port->stats.multicast += rx_bcast;
+
+ writel(GMAC0_MIB_INT_BIT << (netdev->dev_id * 8),
+ geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
+
+ u64_stats_update_end(&port->ir_stats_syncp);
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
+}
+
+/**
+ * gmac_get_intr_flags() - get interrupt status flags for a port from
+ * @netdev: the net device for the port to get flags from
+ * @i: the interrupt status register 0..4
+ */
+static u32 gmac_get_intr_flags(struct net_device *netdev, int i)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ struct gemini_ethernet *geth = port->geth;
+ void __iomem *irqif_reg, *irqen_reg;
+ unsigned int offs, val;
+
+ /* Calculate the offset using the stride of the status registers */
+ offs = i * (GLOBAL_INTERRUPT_STATUS_1_REG -
+ GLOBAL_INTERRUPT_STATUS_0_REG);
+
+ irqif_reg = geth->base + GLOBAL_INTERRUPT_STATUS_0_REG + offs;
+ irqen_reg = geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG + offs;
+
+ val = readl(irqif_reg) & readl(irqen_reg);
+ return val;
+}
+
+static enum hrtimer_restart gmac_coalesce_delay_expired(struct hrtimer *timer)
+{
+ struct gemini_ethernet_port *port =
+ container_of(timer, struct gemini_ethernet_port,
+ rx_coalesce_timer);
+
+ napi_schedule(&port->napi);
+ return HRTIMER_NORESTART;
+}
+
+static irqreturn_t gmac_irq(int irq, void *data)
+{
+ struct gemini_ethernet_port *port;
+ struct net_device *netdev = data;
+ struct gemini_ethernet *geth;
+ u32 val, orr = 0;
+
+ port = netdev_priv(netdev);
+ geth = port->geth;
+
+ val = gmac_get_intr_flags(netdev, 0);
+ orr |= val;
+
+ if (val & (GMAC0_IRQ0_2 << (netdev->dev_id * 2))) {
+ /* Oh, crap */
+ netdev_err(netdev, "hw failure/sw bug\n");
+ gmac_dump_dma_state(netdev);
+
+ /* don't know how to recover, just reduce losses */
+ gmac_enable_irq(netdev, 0);
+ return IRQ_HANDLED;
+ }
+
+ if (val & (GMAC0_IRQ0_TXQ0_INTS << (netdev->dev_id * 6)))
+ gmac_tx_irq(netdev, 0);
+
+ val = gmac_get_intr_flags(netdev, 1);
+ orr |= val;
+
+ if (val & (DEFAULT_Q0_INT_BIT << netdev->dev_id)) {
+ gmac_enable_rx_irq(netdev, 0);
+
+ if (!port->rx_coalesce_nsecs) {
+ napi_schedule(&port->napi);
+ } else {
+ ktime_t ktime;
+
+ ktime = ktime_set(0, port->rx_coalesce_nsecs);
+ hrtimer_start(&port->rx_coalesce_timer, ktime,
+ HRTIMER_MODE_REL);
+ }
+ }
+
+ val = gmac_get_intr_flags(netdev, 4);
+ orr |= val;
+
+ if (val & (GMAC0_MIB_INT_BIT << (netdev->dev_id * 8)))
+ gmac_update_hw_stats(netdev);
+
+ if (val & (GMAC0_RX_OVERRUN_INT_BIT << (netdev->dev_id * 8))) {
+ writel(GMAC0_RXDERR_INT_BIT << (netdev->dev_id * 8),
+ geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
+
+ spin_lock(&geth->irq_lock);
+ u64_stats_update_begin(&port->ir_stats_syncp);
+ ++port->stats.rx_fifo_errors;
+ u64_stats_update_end(&port->ir_stats_syncp);
+ spin_unlock(&geth->irq_lock);
+ }
+
+ return orr ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static void gmac_start_dma(struct gemini_ethernet_port *port)
+{
+ void __iomem *dma_ctrl_reg = port->dma_base + GMAC_DMA_CTRL_REG;
+ union gmac_dma_ctrl dma_ctrl;
+
+ dma_ctrl.bits32 = readl(dma_ctrl_reg);
+ dma_ctrl.bits.rd_enable = 1;
+ dma_ctrl.bits.td_enable = 1;
+ dma_ctrl.bits.loopback = 0;
+ dma_ctrl.bits.drop_small_ack = 0;
+ dma_ctrl.bits.rd_insert_bytes = NET_IP_ALIGN;
+ dma_ctrl.bits.rd_prot = HPROT_DATA_CACHE | HPROT_PRIVILIGED;
+ dma_ctrl.bits.rd_burst_size = HBURST_INCR8;
+ dma_ctrl.bits.rd_bus = HSIZE_8;
+ dma_ctrl.bits.td_prot = HPROT_DATA_CACHE;
+ dma_ctrl.bits.td_burst_size = HBURST_INCR8;
+ dma_ctrl.bits.td_bus = HSIZE_8;
+
+ writel(dma_ctrl.bits32, dma_ctrl_reg);
+}
+
+static void gmac_stop_dma(struct gemini_ethernet_port *port)
+{
+ void __iomem *dma_ctrl_reg = port->dma_base + GMAC_DMA_CTRL_REG;
+ union gmac_dma_ctrl dma_ctrl;
+
+ dma_ctrl.bits32 = readl(dma_ctrl_reg);
+ dma_ctrl.bits.rd_enable = 0;
+ dma_ctrl.bits.td_enable = 0;
+ writel(dma_ctrl.bits32, dma_ctrl_reg);
+}
+
+static int gmac_open(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ int err;
+
+ if (!netdev->phydev) {
+ err = gmac_setup_phy(netdev);
+ if (err) {
+ netif_err(port, ifup, netdev,
+ "PHY init failed: %d\n", err);
+ return err;
+ }
+ }
+
+ err = request_irq(netdev->irq, gmac_irq,
+ IRQF_SHARED, netdev->name, netdev);
+ if (err) {
+ netdev_err(netdev, "no IRQ\n");
+ return err;
+ }
+
+ netif_carrier_off(netdev);
+ phy_start(netdev->phydev);
+
+ err = geth_resize_freeq(port);
+ if (err) {
+ netdev_err(netdev, "could not resize freeq\n");
+ goto err_stop_phy;
+ }
+
+ err = gmac_setup_rxq(netdev);
+ if (err) {
+ netdev_err(netdev, "could not setup RXQ\n");
+ goto err_stop_phy;
+ }
+
+ err = gmac_setup_txqs(netdev);
+ if (err) {
+ netdev_err(netdev, "could not setup TXQs\n");
+ gmac_cleanup_rxq(netdev);
+ goto err_stop_phy;
+ }
+
+ napi_enable(&port->napi);
+
+ gmac_start_dma(port);
+ gmac_enable_irq(netdev, 1);
+ gmac_enable_tx_rx(netdev);
+ netif_tx_start_all_queues(netdev);
+
+ hrtimer_init(&port->rx_coalesce_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ port->rx_coalesce_timer.function = &gmac_coalesce_delay_expired;
+
+ netdev_info(netdev, "opened\n");
+
+ return 0;
+
+err_stop_phy:
+ phy_stop(netdev->phydev);
+ free_irq(netdev->irq, netdev);
+ return err;
+}
+
+static int gmac_stop(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+
+ hrtimer_cancel(&port->rx_coalesce_timer);
+ netif_tx_stop_all_queues(netdev);
+ gmac_disable_tx_rx(netdev);
+ gmac_stop_dma(port);
+ napi_disable(&port->napi);
+
+ gmac_enable_irq(netdev, 0);
+ gmac_cleanup_rxq(netdev);
+ gmac_cleanup_txqs(netdev);
+
+ phy_stop(netdev->phydev);
+ free_irq(netdev->irq, netdev);
+
+ gmac_update_hw_stats(netdev);
+ return 0;
+}
+
+static void gmac_set_rx_mode(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ union gmac_rx_fltr filter = { .bits = {
+ .broadcast = 1,
+ .multicast = 1,
+ .unicast = 1,
+ } };
+ struct netdev_hw_addr *ha;
+ unsigned int bit_nr;
+ u32 mc_filter[2];
+
+ mc_filter[1] = 0;
+ mc_filter[0] = 0;
+
+ if (netdev->flags & IFF_PROMISC) {
+ filter.bits.error = 1;
+ filter.bits.promiscuous = 1;
+ mc_filter[1] = ~0;
+ mc_filter[0] = ~0;
+ } else if (netdev->flags & IFF_ALLMULTI) {
+ mc_filter[1] = ~0;
+ mc_filter[0] = ~0;
+ } else {
+ netdev_for_each_mc_addr(ha, netdev) {
+ bit_nr = ~crc32_le(~0, ha->addr, ETH_ALEN) & 0x3f;
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 0x1f);
+ }
+ }
+
+ writel(mc_filter[0], port->gmac_base + GMAC_MCAST_FIL0);
+ writel(mc_filter[1], port->gmac_base + GMAC_MCAST_FIL1);
+ writel(filter.bits32, port->gmac_base + GMAC_RX_FLTR);
+}
+
+static void gmac_write_mac_address(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ __le32 addr[3];
+
+ memset(addr, 0, sizeof(addr));
+ memcpy(addr, netdev->dev_addr, ETH_ALEN);
+
+ writel(le32_to_cpu(addr[0]), port->gmac_base + GMAC_STA_ADD0);
+ writel(le32_to_cpu(addr[1]), port->gmac_base + GMAC_STA_ADD1);
+ writel(le32_to_cpu(addr[2]), port->gmac_base + GMAC_STA_ADD2);
+}
+
+static int gmac_set_mac_address(struct net_device *netdev, void *addr)
+{
+ struct sockaddr *sa = addr;
+
+ memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN);
+ gmac_write_mac_address(netdev);
+
+ return 0;
+}
+
+static void gmac_clear_hw_stats(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+
+ readl(port->gmac_base + GMAC_IN_DISCARDS);
+ readl(port->gmac_base + GMAC_IN_ERRORS);
+ readl(port->gmac_base + GMAC_IN_MCAST);
+ readl(port->gmac_base + GMAC_IN_BCAST);
+ readl(port->gmac_base + GMAC_IN_MAC1);
+ readl(port->gmac_base + GMAC_IN_MAC2);
+}
+
+static void gmac_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned int start;
+
+ gmac_update_hw_stats(netdev);
+
+ /* Racing with RX NAPI */
+ do {
+ start = u64_stats_fetch_begin(&port->rx_stats_syncp);
+
+ stats->rx_packets = port->stats.rx_packets;
+ stats->rx_bytes = port->stats.rx_bytes;
+ stats->rx_errors = port->stats.rx_errors;
+ stats->rx_dropped = port->stats.rx_dropped;
+
+ stats->rx_length_errors = port->stats.rx_length_errors;
+ stats->rx_over_errors = port->stats.rx_over_errors;
+ stats->rx_crc_errors = port->stats.rx_crc_errors;
+ stats->rx_frame_errors = port->stats.rx_frame_errors;
+
+ } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start));
+
+ /* Racing with MIB and TX completion interrupts */
+ do {
+ start = u64_stats_fetch_begin(&port->ir_stats_syncp);
+
+ stats->tx_errors = port->stats.tx_errors;
+ stats->tx_packets = port->stats.tx_packets;
+ stats->tx_bytes = port->stats.tx_bytes;
+
+ stats->multicast = port->stats.multicast;
+ stats->rx_missed_errors = port->stats.rx_missed_errors;
+ stats->rx_fifo_errors = port->stats.rx_fifo_errors;
+
+ } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start));
+
+ /* Racing with hard_start_xmit */
+ do {
+ start = u64_stats_fetch_begin(&port->tx_stats_syncp);
+
+ stats->tx_dropped = port->stats.tx_dropped;
+
+ } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start));
+
+ stats->rx_dropped += stats->rx_missed_errors;
+}
+
+static int gmac_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ int max_len = gmac_pick_rx_max_len(new_mtu);
+
+ if (max_len < 0)
+ return -EINVAL;
+
+ gmac_disable_tx_rx(netdev);
+
+ netdev->mtu = new_mtu;
+ gmac_update_config0_reg(netdev, max_len << CONFIG0_MAXLEN_SHIFT,
+ CONFIG0_MAXLEN_MASK);
+
+ netdev_update_features(netdev);
+
+ gmac_enable_tx_rx(netdev);
+
+ return 0;
+}
+
+static netdev_features_t gmac_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ if (netdev->mtu + ETH_HLEN + VLAN_HLEN > MTU_SIZE_BIT_MASK)
+ features &= ~GMAC_OFFLOAD_FEATURES;
+
+ return features;
+}
+
+static int gmac_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ int enable = features & NETIF_F_RXCSUM;
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&port->config_lock, flags);
+
+ reg = readl(port->gmac_base + GMAC_CONFIG0);
+ reg = enable ? reg | CONFIG0_RX_CHKSUM : reg & ~CONFIG0_RX_CHKSUM;
+ writel(reg, port->gmac_base + GMAC_CONFIG0);
+
+ spin_unlock_irqrestore(&port->config_lock, flags);
+ return 0;
+}
+
+static int gmac_get_sset_count(struct net_device *netdev, int sset)
+{
+ return sset == ETH_SS_STATS ? GMAC_STATS_NUM : 0;
+}
+
+static void gmac_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ memcpy(data, gmac_stats_strings, sizeof(gmac_stats_strings));
+}
+
+static void gmac_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *estats, u64 *values)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned int start;
+ u64 *p;
+ int i;
+
+ gmac_update_hw_stats(netdev);
+
+ /* Racing with MIB interrupt */
+ do {
+ p = values;
+ start = u64_stats_fetch_begin(&port->ir_stats_syncp);
+
+ for (i = 0; i < RX_STATS_NUM; i++)
+ *p++ = port->hw_stats[i];
+
+ } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start));
+ values = p;
+
+ /* Racing with RX NAPI */
+ do {
+ p = values;
+ start = u64_stats_fetch_begin(&port->rx_stats_syncp);
+
+ for (i = 0; i < RX_STATUS_NUM; i++)
+ *p++ = port->rx_stats[i];
+ for (i = 0; i < RX_CHKSUM_NUM; i++)
+ *p++ = port->rx_csum_stats[i];
+ *p++ = port->rx_napi_exits;
+
+ } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start));
+ values = p;
+
+ /* Racing with TX start_xmit */
+ do {
+ p = values;
+ start = u64_stats_fetch_begin(&port->tx_stats_syncp);
+
+ for (i = 0; i < TX_MAX_FRAGS; i++) {
+ *values++ = port->tx_frag_stats[i];
+ port->tx_frag_stats[i] = 0;
+ }
+ *values++ = port->tx_frags_linearized;
+ *values++ = port->tx_hw_csummed;
+
+ } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start));
+}
+
+static int gmac_get_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ if (!netdev->phydev)
+ return -ENXIO;
+ phy_ethtool_ksettings_get(netdev->phydev, cmd);
+
+ return 0;
+}
+
+static int gmac_set_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ if (!netdev->phydev)
+ return -ENXIO;
+ return phy_ethtool_ksettings_set(netdev->phydev, cmd);
+}
+
+static int gmac_nway_reset(struct net_device *netdev)
+{
+ if (!netdev->phydev)
+ return -ENXIO;
+ return phy_start_aneg(netdev->phydev);
+}
+
+static void gmac_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pparam)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ union gmac_config0 config0;
+
+ config0.bits32 = readl(port->gmac_base + GMAC_CONFIG0);
+
+ pparam->rx_pause = config0.bits.rx_fc_en;
+ pparam->tx_pause = config0.bits.tx_fc_en;
+ pparam->autoneg = true;
+}
+
+static void gmac_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *rp)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ union gmac_config0 config0;
+
+ config0.bits32 = readl(port->gmac_base + GMAC_CONFIG0);
+
+ rp->rx_max_pending = 1 << 15;
+ rp->rx_mini_max_pending = 0;
+ rp->rx_jumbo_max_pending = 0;
+ rp->tx_max_pending = 1 << 15;
+
+ rp->rx_pending = 1 << port->rxq_order;
+ rp->rx_mini_pending = 0;
+ rp->rx_jumbo_pending = 0;
+ rp->tx_pending = 1 << port->txq_order;
+}
+
+static int gmac_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *rp)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ int err = 0;
+
+ if (netif_running(netdev))
+ return -EBUSY;
+
+ if (rp->rx_pending) {
+ port->rxq_order = min(15, ilog2(rp->rx_pending - 1) + 1);
+ err = geth_resize_freeq(port);
+ }
+ if (rp->tx_pending) {
+ port->txq_order = min(15, ilog2(rp->tx_pending - 1) + 1);
+ port->irq_every_tx_packets = 1 << (port->txq_order - 2);
+ }
+
+ return err;
+}
+
+static int gmac_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ecmd)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+
+ ecmd->rx_max_coalesced_frames = 1;
+ ecmd->tx_max_coalesced_frames = port->irq_every_tx_packets;
+ ecmd->rx_coalesce_usecs = port->rx_coalesce_nsecs / 1000;
+
+ return 0;
+}
+
+static int gmac_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ecmd)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+
+ if (ecmd->tx_max_coalesced_frames < 1)
+ return -EINVAL;
+ if (ecmd->tx_max_coalesced_frames >= 1 << port->txq_order)
+ return -EINVAL;
+
+ port->irq_every_tx_packets = ecmd->tx_max_coalesced_frames;
+ port->rx_coalesce_nsecs = ecmd->rx_coalesce_usecs * 1000;
+
+ return 0;
+}
+
+static u32 gmac_get_msglevel(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+
+ return port->msg_enable;
+}
+
+static void gmac_set_msglevel(struct net_device *netdev, u32 level)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+
+ port->msg_enable = level;
+}
+
+static void gmac_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, netdev->dev_id ? "1" : "0");
+}
+
+static const struct net_device_ops gmac_351x_ops = {
+ .ndo_init = gmac_init,
+ .ndo_uninit = gmac_uninit,
+ .ndo_open = gmac_open,
+ .ndo_stop = gmac_stop,
+ .ndo_start_xmit = gmac_start_xmit,
+ .ndo_tx_timeout = gmac_tx_timeout,
+ .ndo_set_rx_mode = gmac_set_rx_mode,
+ .ndo_set_mac_address = gmac_set_mac_address,
+ .ndo_get_stats64 = gmac_get_stats64,
+ .ndo_change_mtu = gmac_change_mtu,
+ .ndo_fix_features = gmac_fix_features,
+ .ndo_set_features = gmac_set_features,
+};
+
+static const struct ethtool_ops gmac_351x_ethtool_ops = {
+ .get_sset_count = gmac_get_sset_count,
+ .get_strings = gmac_get_strings,
+ .get_ethtool_stats = gmac_get_ethtool_stats,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = gmac_get_ksettings,
+ .set_link_ksettings = gmac_set_ksettings,
+ .nway_reset = gmac_nway_reset,
+ .get_pauseparam = gmac_get_pauseparam,
+ .get_ringparam = gmac_get_ringparam,
+ .set_ringparam = gmac_set_ringparam,
+ .get_coalesce = gmac_get_coalesce,
+ .set_coalesce = gmac_set_coalesce,
+ .get_msglevel = gmac_get_msglevel,
+ .set_msglevel = gmac_set_msglevel,
+ .get_drvinfo = gmac_get_drvinfo,
+};
+
+static irqreturn_t gemini_port_irq_thread(int irq, void *data)
+{
+ unsigned long irqmask = SWFQ_EMPTY_INT_BIT;
+ struct gemini_ethernet_port *port = data;
+ struct gemini_ethernet *geth;
+ unsigned long flags;
+
+ geth = port->geth;
+ /* The queue is half empty so refill it */
+ geth_fill_freeq(geth, true);
+
+ spin_lock_irqsave(&geth->irq_lock, flags);
+ /* ACK queue interrupt */
+ writel(irqmask, geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
+ /* Enable queue interrupt again */
+ irqmask |= readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+ writel(irqmask, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t gemini_port_irq(int irq, void *data)
+{
+ struct gemini_ethernet_port *port = data;
+ struct gemini_ethernet *geth;
+ irqreturn_t ret = IRQ_NONE;
+ u32 val, en;
+
+ geth = port->geth;
+ spin_lock(&geth->irq_lock);
+
+ val = readl(geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
+ en = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+
+ if (val & en & SWFQ_EMPTY_INT_BIT) {
+ /* Disable the queue empty interrupt while we work on
+ * processing the queue. Also disable overrun interrupts
+ * as there is not much we can do about it here.
+ */
+ en &= ~(SWFQ_EMPTY_INT_BIT | GMAC0_RX_OVERRUN_INT_BIT
+ | GMAC1_RX_OVERRUN_INT_BIT);
+ writel(en, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ spin_unlock(&geth->irq_lock);
+
+ return ret;
+}
+
+static void gemini_port_remove(struct gemini_ethernet_port *port)
+{
+ if (port->netdev)
+ unregister_netdev(port->netdev);
+ clk_disable_unprepare(port->pclk);
+ geth_cleanup_freeq(port->geth);
+}
+
+static void gemini_ethernet_init(struct gemini_ethernet *geth)
+{
+ writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+ writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
+ writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_2_REG);
+ writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_3_REG);
+ writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+
+ /* Interrupt config:
+ *
+ * GMAC0 intr bits ------> int0 ----> eth0
+ * GMAC1 intr bits ------> int1 ----> eth1
+ * TOE intr -------------> int1 ----> eth1
+ * Classification Intr --> int0 ----> eth0
+ * Default Q0 -----------> int0 ----> eth0
+ * Default Q1 -----------> int1 ----> eth1
+ * FreeQ intr -----------> int1 ----> eth1
+ */
+ writel(0xCCFC0FC0, geth->base + GLOBAL_INTERRUPT_SELECT_0_REG);
+ writel(0x00F00002, geth->base + GLOBAL_INTERRUPT_SELECT_1_REG);
+ writel(0xFFFFFFFF, geth->base + GLOBAL_INTERRUPT_SELECT_2_REG);
+ writel(0xFFFFFFFF, geth->base + GLOBAL_INTERRUPT_SELECT_3_REG);
+ writel(0xFF000003, geth->base + GLOBAL_INTERRUPT_SELECT_4_REG);
+
+ /* edge-triggered interrupts packed to level-triggered one... */
+ writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_0_REG);
+ writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_1_REG);
+ writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_2_REG);
+ writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_3_REG);
+ writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
+
+ /* Set up queue */
+ writel(0, geth->base + GLOBAL_SW_FREEQ_BASE_SIZE_REG);
+ writel(0, geth->base + GLOBAL_HW_FREEQ_BASE_SIZE_REG);
+ writel(0, geth->base + GLOBAL_SWFQ_RWPTR_REG);
+ writel(0, geth->base + GLOBAL_HWFQ_RWPTR_REG);
+
+ geth->freeq_frag_order = DEFAULT_RX_BUF_ORDER;
+ /* This makes the queue resize on probe() so that we
+ * set up and enable the queue IRQ. FIXME: fragile.
+ */
+ geth->freeq_order = 1;
+}
+
+static void gemini_port_save_mac_addr(struct gemini_ethernet_port *port)
+{
+ port->mac_addr[0] =
+ cpu_to_le32(readl(port->gmac_base + GMAC_STA_ADD0));
+ port->mac_addr[1] =
+ cpu_to_le32(readl(port->gmac_base + GMAC_STA_ADD1));
+ port->mac_addr[2] =
+ cpu_to_le32(readl(port->gmac_base + GMAC_STA_ADD2));
+}
+
+static int gemini_ethernet_port_probe(struct platform_device *pdev)
+{
+ char *port_names[2] = { "ethernet0", "ethernet1" };
+ struct gemini_ethernet_port *port;
+ struct device *dev = &pdev->dev;
+ struct gemini_ethernet *geth;
+ struct net_device *netdev;
+ struct resource *gmacres;
+ struct resource *dmares;
+ struct device *parent;
+ unsigned int id;
+ int irq;
+ int ret;
+
+ parent = dev->parent;
+ geth = dev_get_drvdata(parent);
+
+ if (!strcmp(dev_name(dev), "60008000.ethernet-port"))
+ id = 0;
+ else if (!strcmp(dev_name(dev), "6000c000.ethernet-port"))
+ id = 1;
+ else
+ return -ENODEV;
+
+ dev_info(dev, "probe %s ID %d\n", dev_name(dev), id);
+
+ netdev = alloc_etherdev_mq(sizeof(*port), TX_QUEUE_NUM);
+ if (!netdev) {
+ dev_err(dev, "Can't allocate ethernet device #%d\n", id);
+ return -ENOMEM;
+ }
+
+ port = netdev_priv(netdev);
+ SET_NETDEV_DEV(netdev, dev);
+ port->netdev = netdev;
+ port->id = id;
+ port->geth = geth;
+ port->dev = dev;
+
+ /* DMA memory */
+ dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!dmares) {
+ dev_err(dev, "no DMA resource\n");
+ return -ENODEV;
+ }
+ port->dma_base = devm_ioremap_resource(dev, dmares);
+ if (IS_ERR(port->dma_base))
+ return PTR_ERR(port->dma_base);
+
+ /* GMAC config memory */
+ gmacres = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!gmacres) {
+ dev_err(dev, "no GMAC resource\n");
+ return -ENODEV;
+ }
+ port->gmac_base = devm_ioremap_resource(dev, gmacres);
+ if (IS_ERR(port->gmac_base))
+ return PTR_ERR(port->gmac_base);
+
+ /* Interrupt */
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(dev, "no IRQ\n");
+ return irq ? irq : -ENODEV;
+ }
+ port->irq = irq;
+
+ /* Clock the port */
+ port->pclk = devm_clk_get(dev, "PCLK");
+ if (IS_ERR(port->pclk)) {
+ dev_err(dev, "no PCLK\n");
+ return PTR_ERR(port->pclk);
+ }
+ ret = clk_prepare_enable(port->pclk);
+ if (ret)
+ return ret;
+
+ /* Maybe there is a nice ethernet address we should use */
+ gemini_port_save_mac_addr(port);
+
+ /* Reset the port */
+ port->reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(port->reset)) {
+ dev_err(dev, "no reset\n");
+ return PTR_ERR(port->reset);
+ }
+ reset_control_reset(port->reset);
+ usleep_range(100, 500);
+
+ /* Assign pointer in the main state container */
+ if (!id)
+ geth->port0 = port;
+ else
+ geth->port1 = port;
+ platform_set_drvdata(pdev, port);
+
+ /* Set up and register the netdev */
+ netdev->dev_id = port->id;
+ netdev->irq = irq;
+ netdev->netdev_ops = &gmac_351x_ops;
+ netdev->ethtool_ops = &gmac_351x_ethtool_ops;
+
+ spin_lock_init(&port->config_lock);
+ gmac_clear_hw_stats(netdev);
+
+ netdev->hw_features = GMAC_OFFLOAD_FEATURES;
+ netdev->features |= GMAC_OFFLOAD_FEATURES | NETIF_F_GRO;
+
+ port->freeq_refill = 0;
+ netif_napi_add(netdev, &port->napi, gmac_napi_poll,
+ DEFAULT_NAPI_WEIGHT);
+
+ if (is_valid_ether_addr((void *)port->mac_addr)) {
+ memcpy(netdev->dev_addr, port->mac_addr, ETH_ALEN);
+ } else {
+ dev_dbg(dev, "ethernet address 0x%08x%08x%08x invalid\n",
+ port->mac_addr[0], port->mac_addr[1],
+ port->mac_addr[2]);
+ dev_info(dev, "using a random ethernet address\n");
+ random_ether_addr(netdev->dev_addr);
+ }
+ gmac_write_mac_address(netdev);
+
+ ret = devm_request_threaded_irq(port->dev,
+ port->irq,
+ gemini_port_irq,
+ gemini_port_irq_thread,
+ IRQF_SHARED,
+ port_names[port->id],
+ port);
+ if (ret)
+ return ret;
+
+ ret = register_netdev(netdev);
+ if (!ret) {
+ netdev_info(netdev,
+ "irq %d, DMA @ 0x%pap, GMAC @ 0x%pap\n",
+ port->irq, &dmares->start,
+ &gmacres->start);
+ ret = gmac_setup_phy(netdev);
+ if (ret)
+ netdev_info(netdev,
+ "PHY init failed, deferring to ifup time\n");
+ return 0;
+ }
+
+ port->netdev = NULL;
+ free_netdev(netdev);
+ return ret;
+}
+
+static int gemini_ethernet_port_remove(struct platform_device *pdev)
+{
+ struct gemini_ethernet_port *port = platform_get_drvdata(pdev);
+
+ gemini_port_remove(port);
+ return 0;
+}
+
+static const struct of_device_id gemini_ethernet_port_of_match[] = {
+ {
+ .compatible = "cortina,gemini-ethernet-port",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, gemini_ethernet_port_of_match);
+
+static struct platform_driver gemini_ethernet_port_driver = {
+ .driver = {
+ .name = "gemini-ethernet-port",
+ .of_match_table = of_match_ptr(gemini_ethernet_port_of_match),
+ },
+ .probe = gemini_ethernet_port_probe,
+ .remove = gemini_ethernet_port_remove,
+};
+
+static int gemini_ethernet_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct gemini_ethernet *geth;
+ unsigned int retry = 5;
+ struct resource *res;
+ u32 val;
+
+ /* Global registers */
+ geth = devm_kzalloc(dev, sizeof(*geth), GFP_KERNEL);
+ if (!geth)
+ return -ENOMEM;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+ geth->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(geth->base))
+ return PTR_ERR(geth->base);
+ geth->dev = dev;
+
+ /* Wait for ports to stabilize */
+ do {
+ udelay(2);
+ val = readl(geth->base + GLOBAL_TOE_VERSION_REG);
+ barrier();
+ } while (!val && --retry);
+ if (!retry) {
+ dev_err(dev, "failed to reset ethernet\n");
+ return -EIO;
+ }
+ dev_info(dev, "Ethernet device ID: 0x%03x, revision 0x%01x\n",
+ (val >> 4) & 0xFFFU, val & 0xFU);
+
+ spin_lock_init(&geth->irq_lock);
+ spin_lock_init(&geth->freeq_lock);
+ gemini_ethernet_init(geth);
+
+ /* The children will use this */
+ platform_set_drvdata(pdev, geth);
+
+ /* Spawn child devices for the two ports */
+ return devm_of_platform_populate(dev);
+}
+
+static int gemini_ethernet_remove(struct platform_device *pdev)
+{
+ struct gemini_ethernet *geth = platform_get_drvdata(pdev);
+
+ gemini_ethernet_init(geth);
+ geth_cleanup_freeq(geth);
+
+ return 0;
+}
+
+static const struct of_device_id gemini_ethernet_of_match[] = {
+ {
+ .compatible = "cortina,gemini-ethernet",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, gemini_ethernet_of_match);
+
+static struct platform_driver gemini_ethernet_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(gemini_ethernet_of_match),
+ },
+ .probe = gemini_ethernet_probe,
+ .remove = gemini_ethernet_remove,
+};
+
+static int __init gemini_ethernet_module_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&gemini_ethernet_port_driver);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&gemini_ethernet_driver);
+ if (ret) {
+ platform_driver_unregister(&gemini_ethernet_port_driver);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(gemini_ethernet_module_init);
+
+static void __exit gemini_ethernet_module_exit(void)
+{
+ platform_driver_unregister(&gemini_ethernet_driver);
+ platform_driver_unregister(&gemini_ethernet_port_driver);
+}
+module_exit(gemini_ethernet_module_exit);
+
+MODULE_AUTHOR("Linus Walleij <[email protected]>");
+MODULE_DESCRIPTION("StorLink SL351x (Gemini) ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/net/ethernet/cortina/gemini.h b/drivers/net/ethernet/cortina/gemini.h
new file mode 100644
index 000000000000..0b12f89bf89a
--- /dev/null
+++ b/drivers/net/ethernet/cortina/gemini.h
@@ -0,0 +1,958 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Register definitions for Gemini GMAC Ethernet device driver
+ *
+ * Copyright (C) 2006 Storlink, Corp.
+ * Copyright (C) 2008-2009 Paulius Zaleckas <[email protected]>
+ * Copyright (C) 2010 MichaÅ‚ MirosÅ‚aw <[email protected]>
+ * Copytight (C) 2017 Linus Walleij <[email protected]>
+ */
+#ifndef _GEMINI_ETHERNET_H
+#define _GEMINI_ETHERNET_H
+
+#include <linux/bitops.h>
+
+/* Base Registers */
+#define TOE_NONTOE_QUE_HDR_BASE 0x2000
+#define TOE_TOE_QUE_HDR_BASE 0x3000
+
+/* Queue ID */
+#define TOE_SW_FREE_QID 0x00
+#define TOE_HW_FREE_QID 0x01
+#define TOE_GMAC0_SW_TXQ0_QID 0x02
+#define TOE_GMAC0_SW_TXQ1_QID 0x03
+#define TOE_GMAC0_SW_TXQ2_QID 0x04
+#define TOE_GMAC0_SW_TXQ3_QID 0x05
+#define TOE_GMAC0_SW_TXQ4_QID 0x06
+#define TOE_GMAC0_SW_TXQ5_QID 0x07
+#define TOE_GMAC0_HW_TXQ0_QID 0x08
+#define TOE_GMAC0_HW_TXQ1_QID 0x09
+#define TOE_GMAC0_HW_TXQ2_QID 0x0A
+#define TOE_GMAC0_HW_TXQ3_QID 0x0B
+#define TOE_GMAC1_SW_TXQ0_QID 0x12
+#define TOE_GMAC1_SW_TXQ1_QID 0x13
+#define TOE_GMAC1_SW_TXQ2_QID 0x14
+#define TOE_GMAC1_SW_TXQ3_QID 0x15
+#define TOE_GMAC1_SW_TXQ4_QID 0x16
+#define TOE_GMAC1_SW_TXQ5_QID 0x17
+#define TOE_GMAC1_HW_TXQ0_QID 0x18
+#define TOE_GMAC1_HW_TXQ1_QID 0x19
+#define TOE_GMAC1_HW_TXQ2_QID 0x1A
+#define TOE_GMAC1_HW_TXQ3_QID 0x1B
+#define TOE_GMAC0_DEFAULT_QID 0x20
+#define TOE_GMAC1_DEFAULT_QID 0x21
+#define TOE_CLASSIFICATION_QID(x) (0x22 + x) /* 0x22 ~ 0x2F */
+#define TOE_TOE_QID(x) (0x40 + x) /* 0x40 ~ 0x7F */
+
+/* TOE DMA Queue Size should be 2^n, n = 6...12
+ * TOE DMA Queues are the following queue types:
+ * SW Free Queue, HW Free Queue,
+ * GMAC 0/1 SW TX Q0-5, and GMAC 0/1 HW TX Q0-5
+ * The base address and descriptor number are configured at
+ * DMA Queues Descriptor Ring Base Address/Size Register (offset 0x0004)
+ */
+#define GET_WPTR(addr) readw((addr) + 2)
+#define GET_RPTR(addr) readw((addr))
+#define SET_WPTR(addr, data) writew((data), (addr) + 2)
+#define SET_RPTR(addr, data) writew((data), (addr))
+#define __RWPTR_NEXT(x, mask) (((unsigned int)(x) + 1) & (mask))
+#define __RWPTR_PREV(x, mask) (((unsigned int)(x) - 1) & (mask))
+#define __RWPTR_DISTANCE(r, w, mask) (((unsigned int)(w) - (r)) & (mask))
+#define __RWPTR_MASK(order) ((1 << (order)) - 1)
+#define RWPTR_NEXT(x, order) __RWPTR_NEXT((x), __RWPTR_MASK((order)))
+#define RWPTR_PREV(x, order) __RWPTR_PREV((x), __RWPTR_MASK((order)))
+#define RWPTR_DISTANCE(r, w, order) __RWPTR_DISTANCE((r), (w), \
+ __RWPTR_MASK((order)))
+
+/* Global registers */
+#define GLOBAL_TOE_VERSION_REG 0x0000
+#define GLOBAL_SW_FREEQ_BASE_SIZE_REG 0x0004
+#define GLOBAL_HW_FREEQ_BASE_SIZE_REG 0x0008
+#define GLOBAL_DMA_SKB_SIZE_REG 0x0010
+#define GLOBAL_SWFQ_RWPTR_REG 0x0014
+#define GLOBAL_HWFQ_RWPTR_REG 0x0018
+#define GLOBAL_INTERRUPT_STATUS_0_REG 0x0020
+#define GLOBAL_INTERRUPT_ENABLE_0_REG 0x0024
+#define GLOBAL_INTERRUPT_SELECT_0_REG 0x0028
+#define GLOBAL_INTERRUPT_STATUS_1_REG 0x0030
+#define GLOBAL_INTERRUPT_ENABLE_1_REG 0x0034
+#define GLOBAL_INTERRUPT_SELECT_1_REG 0x0038
+#define GLOBAL_INTERRUPT_STATUS_2_REG 0x0040
+#define GLOBAL_INTERRUPT_ENABLE_2_REG 0x0044
+#define GLOBAL_INTERRUPT_SELECT_2_REG 0x0048
+#define GLOBAL_INTERRUPT_STATUS_3_REG 0x0050
+#define GLOBAL_INTERRUPT_ENABLE_3_REG 0x0054
+#define GLOBAL_INTERRUPT_SELECT_3_REG 0x0058
+#define GLOBAL_INTERRUPT_STATUS_4_REG 0x0060
+#define GLOBAL_INTERRUPT_ENABLE_4_REG 0x0064
+#define GLOBAL_INTERRUPT_SELECT_4_REG 0x0068
+#define GLOBAL_HASH_TABLE_BASE_REG 0x006C
+#define GLOBAL_QUEUE_THRESHOLD_REG 0x0070
+
+/* GMAC 0/1 DMA/TOE register */
+#define GMAC_DMA_CTRL_REG 0x0000
+#define GMAC_TX_WEIGHTING_CTRL_0_REG 0x0004
+#define GMAC_TX_WEIGHTING_CTRL_1_REG 0x0008
+#define GMAC_SW_TX_QUEUE0_PTR_REG 0x000C
+#define GMAC_SW_TX_QUEUE1_PTR_REG 0x0010
+#define GMAC_SW_TX_QUEUE2_PTR_REG 0x0014
+#define GMAC_SW_TX_QUEUE3_PTR_REG 0x0018
+#define GMAC_SW_TX_QUEUE4_PTR_REG 0x001C
+#define GMAC_SW_TX_QUEUE5_PTR_REG 0x0020
+#define GMAC_SW_TX_QUEUE_PTR_REG(i) (GMAC_SW_TX_QUEUE0_PTR_REG + 4 * (i))
+#define GMAC_HW_TX_QUEUE0_PTR_REG 0x0024
+#define GMAC_HW_TX_QUEUE1_PTR_REG 0x0028
+#define GMAC_HW_TX_QUEUE2_PTR_REG 0x002C
+#define GMAC_HW_TX_QUEUE3_PTR_REG 0x0030
+#define GMAC_HW_TX_QUEUE_PTR_REG(i) (GMAC_HW_TX_QUEUE0_PTR_REG + 4 * (i))
+#define GMAC_DMA_TX_FIRST_DESC_REG 0x0038
+#define GMAC_DMA_TX_CURR_DESC_REG 0x003C
+#define GMAC_DMA_TX_DESC_WORD0_REG 0x0040
+#define GMAC_DMA_TX_DESC_WORD1_REG 0x0044
+#define GMAC_DMA_TX_DESC_WORD2_REG 0x0048
+#define GMAC_DMA_TX_DESC_WORD3_REG 0x004C
+#define GMAC_SW_TX_QUEUE_BASE_REG 0x0050
+#define GMAC_HW_TX_QUEUE_BASE_REG 0x0054
+#define GMAC_DMA_RX_FIRST_DESC_REG 0x0058
+#define GMAC_DMA_RX_CURR_DESC_REG 0x005C
+#define GMAC_DMA_RX_DESC_WORD0_REG 0x0060
+#define GMAC_DMA_RX_DESC_WORD1_REG 0x0064
+#define GMAC_DMA_RX_DESC_WORD2_REG 0x0068
+#define GMAC_DMA_RX_DESC_WORD3_REG 0x006C
+#define GMAC_HASH_ENGINE_REG0 0x0070
+#define GMAC_HASH_ENGINE_REG1 0x0074
+/* matching rule 0 Control register 0 */
+#define GMAC_MR0CR0 0x0078
+#define GMAC_MR0CR1 0x007C
+#define GMAC_MR0CR2 0x0080
+#define GMAC_MR1CR0 0x0084
+#define GMAC_MR1CR1 0x0088
+#define GMAC_MR1CR2 0x008C
+#define GMAC_MR2CR0 0x0090
+#define GMAC_MR2CR1 0x0094
+#define GMAC_MR2CR2 0x0098
+#define GMAC_MR3CR0 0x009C
+#define GMAC_MR3CR1 0x00A0
+#define GMAC_MR3CR2 0x00A4
+/* Support Protocol Register 0 */
+#define GMAC_SPR0 0x00A8
+#define GMAC_SPR1 0x00AC
+#define GMAC_SPR2 0x00B0
+#define GMAC_SPR3 0x00B4
+#define GMAC_SPR4 0x00B8
+#define GMAC_SPR5 0x00BC
+#define GMAC_SPR6 0x00C0
+#define GMAC_SPR7 0x00C4
+/* GMAC Hash/Rx/Tx AHB Weighting register */
+#define GMAC_AHB_WEIGHT_REG 0x00C8
+
+/* TOE GMAC 0/1 register */
+#define GMAC_STA_ADD0 0x0000
+#define GMAC_STA_ADD1 0x0004
+#define GMAC_STA_ADD2 0x0008
+#define GMAC_RX_FLTR 0x000c
+#define GMAC_MCAST_FIL0 0x0010
+#define GMAC_MCAST_FIL1 0x0014
+#define GMAC_CONFIG0 0x0018
+#define GMAC_CONFIG1 0x001c
+#define GMAC_CONFIG2 0x0020
+#define GMAC_CONFIG3 0x0024
+#define GMAC_RESERVED 0x0028
+#define GMAC_STATUS 0x002c
+#define GMAC_IN_DISCARDS 0x0030
+#define GMAC_IN_ERRORS 0x0034
+#define GMAC_IN_MCAST 0x0038
+#define GMAC_IN_BCAST 0x003c
+#define GMAC_IN_MAC1 0x0040 /* for STA 1 MAC Address */
+#define GMAC_IN_MAC2 0x0044 /* for STA 2 MAC Address */
+
+#define RX_STATS_NUM 6
+
+/* DMA Queues description Ring Base Address/Size Register (offset 0x0004) */
+union dma_q_base_size {
+ unsigned int bits32;
+ unsigned int base_size;
+};
+
+#define DMA_Q_BASE_MASK (~0x0f)
+
+/* DMA SKB Buffer register (offset 0x0008) */
+union dma_skb_size {
+ unsigned int bits32;
+ struct bit_0008 {
+ unsigned int sw_skb_size : 16; /* SW Free poll SKB Size */
+ unsigned int hw_skb_size : 16; /* HW Free poll SKB Size */
+ } bits;
+};
+
+/* DMA SW Free Queue Read/Write Pointer Register (offset 0x000c) */
+union dma_rwptr {
+ unsigned int bits32;
+ struct bit_000c {
+ unsigned int rptr : 16; /* Read Ptr, RO */
+ unsigned int wptr : 16; /* Write Ptr, RW */
+ } bits;
+};
+
+/* Interrupt Status Register 0 (offset 0x0020)
+ * Interrupt Mask Register 0 (offset 0x0024)
+ * Interrupt Select Register 0 (offset 0x0028)
+ */
+#define GMAC1_TXDERR_INT_BIT BIT(31)
+#define GMAC1_TXPERR_INT_BIT BIT(30)
+#define GMAC0_TXDERR_INT_BIT BIT(29)
+#define GMAC0_TXPERR_INT_BIT BIT(28)
+#define GMAC1_RXDERR_INT_BIT BIT(27)
+#define GMAC1_RXPERR_INT_BIT BIT(26)
+#define GMAC0_RXDERR_INT_BIT BIT(25)
+#define GMAC0_RXPERR_INT_BIT BIT(24)
+#define GMAC1_SWTQ15_FIN_INT_BIT BIT(23)
+#define GMAC1_SWTQ14_FIN_INT_BIT BIT(22)
+#define GMAC1_SWTQ13_FIN_INT_BIT BIT(21)
+#define GMAC1_SWTQ12_FIN_INT_BIT BIT(20)
+#define GMAC1_SWTQ11_FIN_INT_BIT BIT(19)
+#define GMAC1_SWTQ10_FIN_INT_BIT BIT(18)
+#define GMAC0_SWTQ05_FIN_INT_BIT BIT(17)
+#define GMAC0_SWTQ04_FIN_INT_BIT BIT(16)
+#define GMAC0_SWTQ03_FIN_INT_BIT BIT(15)
+#define GMAC0_SWTQ02_FIN_INT_BIT BIT(14)
+#define GMAC0_SWTQ01_FIN_INT_BIT BIT(13)
+#define GMAC0_SWTQ00_FIN_INT_BIT BIT(12)
+#define GMAC1_SWTQ15_EOF_INT_BIT BIT(11)
+#define GMAC1_SWTQ14_EOF_INT_BIT BIT(10)
+#define GMAC1_SWTQ13_EOF_INT_BIT BIT(9)
+#define GMAC1_SWTQ12_EOF_INT_BIT BIT(8)
+#define GMAC1_SWTQ11_EOF_INT_BIT BIT(7)
+#define GMAC1_SWTQ10_EOF_INT_BIT BIT(6)
+#define GMAC0_SWTQ05_EOF_INT_BIT BIT(5)
+#define GMAC0_SWTQ04_EOF_INT_BIT BIT(4)
+#define GMAC0_SWTQ03_EOF_INT_BIT BIT(3)
+#define GMAC0_SWTQ02_EOF_INT_BIT BIT(2)
+#define GMAC0_SWTQ01_EOF_INT_BIT BIT(1)
+#define GMAC0_SWTQ00_EOF_INT_BIT BIT(0)
+
+/* Interrupt Status Register 1 (offset 0x0030)
+ * Interrupt Mask Register 1 (offset 0x0034)
+ * Interrupt Select Register 1 (offset 0x0038)
+ */
+#define TOE_IQ3_FULL_INT_BIT BIT(31)
+#define TOE_IQ2_FULL_INT_BIT BIT(30)
+#define TOE_IQ1_FULL_INT_BIT BIT(29)
+#define TOE_IQ0_FULL_INT_BIT BIT(28)
+#define TOE_IQ3_INT_BIT BIT(27)
+#define TOE_IQ2_INT_BIT BIT(26)
+#define TOE_IQ1_INT_BIT BIT(25)
+#define TOE_IQ0_INT_BIT BIT(24)
+#define GMAC1_HWTQ13_EOF_INT_BIT BIT(23)
+#define GMAC1_HWTQ12_EOF_INT_BIT BIT(22)
+#define GMAC1_HWTQ11_EOF_INT_BIT BIT(21)
+#define GMAC1_HWTQ10_EOF_INT_BIT BIT(20)
+#define GMAC0_HWTQ03_EOF_INT_BIT BIT(19)
+#define GMAC0_HWTQ02_EOF_INT_BIT BIT(18)
+#define GMAC0_HWTQ01_EOF_INT_BIT BIT(17)
+#define GMAC0_HWTQ00_EOF_INT_BIT BIT(16)
+#define CLASS_RX_INT_BIT(x) BIT((x + 2))
+#define DEFAULT_Q1_INT_BIT BIT(1)
+#define DEFAULT_Q0_INT_BIT BIT(0)
+
+#define TOE_IQ_INT_BITS (TOE_IQ0_INT_BIT | TOE_IQ1_INT_BIT | \
+ TOE_IQ2_INT_BIT | TOE_IQ3_INT_BIT)
+#define TOE_IQ_FULL_BITS (TOE_IQ0_FULL_INT_BIT | TOE_IQ1_FULL_INT_BIT | \
+ TOE_IQ2_FULL_INT_BIT | TOE_IQ3_FULL_INT_BIT)
+#define TOE_IQ_ALL_BITS (TOE_IQ_INT_BITS | TOE_IQ_FULL_BITS)
+#define TOE_CLASS_RX_INT_BITS 0xfffc
+
+/* Interrupt Status Register 2 (offset 0x0040)
+ * Interrupt Mask Register 2 (offset 0x0044)
+ * Interrupt Select Register 2 (offset 0x0048)
+ */
+#define TOE_QL_FULL_INT_BIT(x) BIT(x)
+
+/* Interrupt Status Register 3 (offset 0x0050)
+ * Interrupt Mask Register 3 (offset 0x0054)
+ * Interrupt Select Register 3 (offset 0x0058)
+ */
+#define TOE_QH_FULL_INT_BIT(x) BIT(x - 32)
+
+/* Interrupt Status Register 4 (offset 0x0060)
+ * Interrupt Mask Register 4 (offset 0x0064)
+ * Interrupt Select Register 4 (offset 0x0068)
+ */
+#define GMAC1_RESERVED_INT_BIT BIT(31)
+#define GMAC1_MIB_INT_BIT BIT(30)
+#define GMAC1_RX_PAUSE_ON_INT_BIT BIT(29)
+#define GMAC1_TX_PAUSE_ON_INT_BIT BIT(28)
+#define GMAC1_RX_PAUSE_OFF_INT_BIT BIT(27)
+#define GMAC1_TX_PAUSE_OFF_INT_BIT BIT(26)
+#define GMAC1_RX_OVERRUN_INT_BIT BIT(25)
+#define GMAC1_STATUS_CHANGE_INT_BIT BIT(24)
+#define GMAC0_RESERVED_INT_BIT BIT(23)
+#define GMAC0_MIB_INT_BIT BIT(22)
+#define GMAC0_RX_PAUSE_ON_INT_BIT BIT(21)
+#define GMAC0_TX_PAUSE_ON_INT_BIT BIT(20)
+#define GMAC0_RX_PAUSE_OFF_INT_BIT BIT(19)
+#define GMAC0_TX_PAUSE_OFF_INT_BIT BIT(18)
+#define GMAC0_RX_OVERRUN_INT_BIT BIT(17)
+#define GMAC0_STATUS_CHANGE_INT_BIT BIT(16)
+#define CLASS_RX_FULL_INT_BIT(x) BIT(x + 2)
+#define HWFQ_EMPTY_INT_BIT BIT(1)
+#define SWFQ_EMPTY_INT_BIT BIT(0)
+
+#define GMAC0_INT_BITS (GMAC0_RESERVED_INT_BIT | GMAC0_MIB_INT_BIT | \
+ GMAC0_RX_PAUSE_ON_INT_BIT | \
+ GMAC0_TX_PAUSE_ON_INT_BIT | \
+ GMAC0_RX_PAUSE_OFF_INT_BIT | \
+ GMAC0_TX_PAUSE_OFF_INT_BIT | \
+ GMAC0_RX_OVERRUN_INT_BIT | \
+ GMAC0_STATUS_CHANGE_INT_BIT)
+#define GMAC1_INT_BITS (GMAC1_RESERVED_INT_BIT | GMAC1_MIB_INT_BIT | \
+ GMAC1_RX_PAUSE_ON_INT_BIT | \
+ GMAC1_TX_PAUSE_ON_INT_BIT | \
+ GMAC1_RX_PAUSE_OFF_INT_BIT | \
+ GMAC1_TX_PAUSE_OFF_INT_BIT | \
+ GMAC1_RX_OVERRUN_INT_BIT | \
+ GMAC1_STATUS_CHANGE_INT_BIT)
+
+#define CLASS_RX_FULL_INT_BITS 0xfffc
+
+/* GLOBAL_QUEUE_THRESHOLD_REG (offset 0x0070) */
+union queue_threshold {
+ unsigned int bits32;
+ struct bit_0070_2 {
+ /* 7:0 Software Free Queue Empty Threshold */
+ unsigned int swfq_empty:8;
+ /* 15:8 Hardware Free Queue Empty Threshold */
+ unsigned int hwfq_empty:8;
+ /* 23:16 */
+ unsigned int intrq:8;
+ /* 31:24 */
+ unsigned int toe_class:8;
+ } bits;
+};
+
+/* GMAC DMA Control Register
+ * GMAC0 offset 0x8000
+ * GMAC1 offset 0xC000
+ */
+union gmac_dma_ctrl {
+ unsigned int bits32;
+ struct bit_8000 {
+ /* bit 1:0 Peripheral Bus Width */
+ unsigned int td_bus:2;
+ /* bit 3:2 TxDMA max burst size for every AHB request */
+ unsigned int td_burst_size:2;
+ /* bit 7:4 TxDMA protection control */
+ unsigned int td_prot:4;
+ /* bit 9:8 Peripheral Bus Width */
+ unsigned int rd_bus:2;
+ /* bit 11:10 DMA max burst size for every AHB request */
+ unsigned int rd_burst_size:2;
+ /* bit 15:12 DMA Protection Control */
+ unsigned int rd_prot:4;
+ /* bit 17:16 */
+ unsigned int rd_insert_bytes:2;
+ /* bit 27:18 */
+ unsigned int reserved:10;
+ /* bit 28 1: Drop, 0: Accept */
+ unsigned int drop_small_ack:1;
+ /* bit 29 Loopback TxDMA to RxDMA */
+ unsigned int loopback:1;
+ /* bit 30 Tx DMA Enable */
+ unsigned int td_enable:1;
+ /* bit 31 Rx DMA Enable */
+ unsigned int rd_enable:1;
+ } bits;
+};
+
+/* GMAC Tx Weighting Control Register 0
+ * GMAC0 offset 0x8004
+ * GMAC1 offset 0xC004
+ */
+union gmac_tx_wcr0 {
+ unsigned int bits32;
+ struct bit_8004 {
+ /* bit 5:0 HW TX Queue 3 */
+ unsigned int hw_tq0:6;
+ /* bit 11:6 HW TX Queue 2 */
+ unsigned int hw_tq1:6;
+ /* bit 17:12 HW TX Queue 1 */
+ unsigned int hw_tq2:6;
+ /* bit 23:18 HW TX Queue 0 */
+ unsigned int hw_tq3:6;
+ /* bit 31:24 */
+ unsigned int reserved:8;
+ } bits;
+};
+
+/* GMAC Tx Weighting Control Register 1
+ * GMAC0 offset 0x8008
+ * GMAC1 offset 0xC008
+ */
+union gmac_tx_wcr1 {
+ unsigned int bits32;
+ struct bit_8008 {
+ /* bit 4:0 SW TX Queue 0 */
+ unsigned int sw_tq0:5;
+ /* bit 9:5 SW TX Queue 1 */
+ unsigned int sw_tq1:5;
+ /* bit 14:10 SW TX Queue 2 */
+ unsigned int sw_tq2:5;
+ /* bit 19:15 SW TX Queue 3 */
+ unsigned int sw_tq3:5;
+ /* bit 24:20 SW TX Queue 4 */
+ unsigned int sw_tq4:5;
+ /* bit 29:25 SW TX Queue 5 */
+ unsigned int sw_tq5:5;
+ /* bit 31:30 */
+ unsigned int reserved:2;
+ } bits;
+};
+
+/* GMAC DMA Tx Description Word 0 Register
+ * GMAC0 offset 0x8040
+ * GMAC1 offset 0xC040
+ */
+union gmac_txdesc_0 {
+ unsigned int bits32;
+ struct bit_8040 {
+ /* bit 15:0 Transfer size */
+ unsigned int buffer_size:16;
+ /* bit 21:16 number of descriptors used for the current frame */
+ unsigned int desc_count:6;
+ /* bit 22 Tx Status, 1: Successful 0: Failed */
+ unsigned int status_tx_ok:1;
+ /* bit 28:23 Tx Status, Reserved bits */
+ unsigned int status_rvd:6;
+ /* bit 29 protocol error during processing this descriptor */
+ unsigned int perr:1;
+ /* bit 30 data error during processing this descriptor */
+ unsigned int derr:1;
+ /* bit 31 */
+ unsigned int reserved:1;
+ } bits;
+};
+
+/* GMAC DMA Tx Description Word 1 Register
+ * GMAC0 offset 0x8044
+ * GMAC1 offset 0xC044
+ */
+union gmac_txdesc_1 {
+ unsigned int bits32;
+ struct txdesc_word1 {
+ /* bit 15: 0 Tx Frame Byte Count */
+ unsigned int byte_count:16;
+ /* bit 16 TSS segmentation use MTU setting */
+ unsigned int mtu_enable:1;
+ /* bit 17 IPV4 Header Checksum Enable */
+ unsigned int ip_chksum:1;
+ /* bit 18 IPV6 Tx Enable */
+ unsigned int ipv6_enable:1;
+ /* bit 19 TCP Checksum Enable */
+ unsigned int tcp_chksum:1;
+ /* bit 20 UDP Checksum Enable */
+ unsigned int udp_chksum:1;
+ /* bit 21 Bypass HW offload engine */
+ unsigned int bypass_tss:1;
+ /* bit 22 Don't update IP length field */
+ unsigned int ip_fixed_len:1;
+ /* bit 31:23 Tx Flag, Reserved */
+ unsigned int reserved:9;
+ } bits;
+};
+
+#define TSS_IP_FIXED_LEN_BIT BIT(22)
+#define TSS_BYPASS_BIT BIT(21)
+#define TSS_UDP_CHKSUM_BIT BIT(20)
+#define TSS_TCP_CHKSUM_BIT BIT(19)
+#define TSS_IPV6_ENABLE_BIT BIT(18)
+#define TSS_IP_CHKSUM_BIT BIT(17)
+#define TSS_MTU_ENABLE_BIT BIT(16)
+
+#define TSS_CHECKUM_ENABLE \
+ (TSS_IP_CHKSUM_BIT | TSS_IPV6_ENABLE_BIT | \
+ TSS_TCP_CHKSUM_BIT | TSS_UDP_CHKSUM_BIT)
+
+/* GMAC DMA Tx Description Word 2 Register
+ * GMAC0 offset 0x8048
+ * GMAC1 offset 0xC048
+ */
+union gmac_txdesc_2 {
+ unsigned int bits32;
+ unsigned int buf_adr;
+};
+
+/* GMAC DMA Tx Description Word 3 Register
+ * GMAC0 offset 0x804C
+ * GMAC1 offset 0xC04C
+ */
+union gmac_txdesc_3 {
+ unsigned int bits32;
+ struct txdesc_word3 {
+ /* bit 12: 0 Tx Frame Byte Count */
+ unsigned int mtu_size:13;
+ /* bit 28:13 */
+ unsigned int reserved:16;
+ /* bit 29 End of frame interrupt enable */
+ unsigned int eofie:1;
+ /* bit 31:30 11: only one, 10: first, 01: last, 00: linking */
+ unsigned int sof_eof:2;
+ } bits;
+};
+
+#define SOF_EOF_BIT_MASK 0x3fffffff
+#define SOF_BIT 0x80000000
+#define EOF_BIT 0x40000000
+#define EOFIE_BIT BIT(29)
+#define MTU_SIZE_BIT_MASK 0x1fff
+
+/* GMAC Tx Descriptor */
+struct gmac_txdesc {
+ union gmac_txdesc_0 word0;
+ union gmac_txdesc_1 word1;
+ union gmac_txdesc_2 word2;
+ union gmac_txdesc_3 word3;
+};
+
+/* GMAC DMA Rx Description Word 0 Register
+ * GMAC0 offset 0x8060
+ * GMAC1 offset 0xC060
+ */
+union gmac_rxdesc_0 {
+ unsigned int bits32;
+ struct bit_8060 {
+ /* bit 15:0 number of descriptors used for the current frame */
+ unsigned int buffer_size:16;
+ /* bit 21:16 number of descriptors used for the current frame */
+ unsigned int desc_count:6;
+ /* bit 24:22 Status of rx frame */
+ unsigned int status:4;
+ /* bit 28:26 Check Sum Status */
+ unsigned int chksum_status:3;
+ /* bit 29 protocol error during processing this descriptor */
+ unsigned int perr:1;
+ /* bit 30 data error during processing this descriptor */
+ unsigned int derr:1;
+ /* bit 31 TOE/CIS Queue Full dropped packet to default queue */
+ unsigned int drop:1;
+ } bits;
+};
+
+#define GMAC_RXDESC_0_T_derr BIT(30)
+#define GMAC_RXDESC_0_T_perr BIT(29)
+#define GMAC_RXDESC_0_T_chksum_status(x) BIT(x + 26)
+#define GMAC_RXDESC_0_T_status(x) BIT(x + 22)
+#define GMAC_RXDESC_0_T_desc_count(x) BIT(x + 16)
+
+#define RX_CHKSUM_IP_UDP_TCP_OK 0
+#define RX_CHKSUM_IP_OK_ONLY 1
+#define RX_CHKSUM_NONE 2
+#define RX_CHKSUM_IP_ERR_UNKNOWN 4
+#define RX_CHKSUM_IP_ERR 5
+#define RX_CHKSUM_TCP_UDP_ERR 6
+#define RX_CHKSUM_NUM 8
+
+#define RX_STATUS_GOOD_FRAME 0
+#define RX_STATUS_TOO_LONG_GOOD_CRC 1
+#define RX_STATUS_RUNT_FRAME 2
+#define RX_STATUS_SFD_NOT_FOUND 3
+#define RX_STATUS_CRC_ERROR 4
+#define RX_STATUS_TOO_LONG_BAD_CRC 5
+#define RX_STATUS_ALIGNMENT_ERROR 6
+#define RX_STATUS_TOO_LONG_BAD_ALIGN 7
+#define RX_STATUS_RX_ERR 8
+#define RX_STATUS_DA_FILTERED 9
+#define RX_STATUS_BUFFER_FULL 10
+#define RX_STATUS_NUM 16
+
+#define RX_ERROR_LENGTH(s) \
+ ((s) == RX_STATUS_TOO_LONG_GOOD_CRC || \
+ (s) == RX_STATUS_TOO_LONG_BAD_CRC || \
+ (s) == RX_STATUS_TOO_LONG_BAD_ALIGN)
+#define RX_ERROR_OVER(s) \
+ ((s) == RX_STATUS_BUFFER_FULL)
+#define RX_ERROR_CRC(s) \
+ ((s) == RX_STATUS_CRC_ERROR || \
+ (s) == RX_STATUS_TOO_LONG_BAD_CRC)
+#define RX_ERROR_FRAME(s) \
+ ((s) == RX_STATUS_ALIGNMENT_ERROR || \
+ (s) == RX_STATUS_TOO_LONG_BAD_ALIGN)
+#define RX_ERROR_FIFO(s) \
+ (0)
+
+/* GMAC DMA Rx Description Word 1 Register
+ * GMAC0 offset 0x8064
+ * GMAC1 offset 0xC064
+ */
+union gmac_rxdesc_1 {
+ unsigned int bits32;
+ struct rxdesc_word1 {
+ /* bit 15: 0 Rx Frame Byte Count */
+ unsigned int byte_count:16;
+ /* bit 31:16 Software ID */
+ unsigned int sw_id:16;
+ } bits;
+};
+
+/* GMAC DMA Rx Description Word 2 Register
+ * GMAC0 offset 0x8068
+ * GMAC1 offset 0xC068
+ */
+union gmac_rxdesc_2 {
+ unsigned int bits32;
+ unsigned int buf_adr;
+};
+
+#define RX_INSERT_NONE 0
+#define RX_INSERT_1_BYTE 1
+#define RX_INSERT_2_BYTE 2
+#define RX_INSERT_3_BYTE 3
+
+/* GMAC DMA Rx Description Word 3 Register
+ * GMAC0 offset 0x806C
+ * GMAC1 offset 0xC06C
+ */
+union gmac_rxdesc_3 {
+ unsigned int bits32;
+ struct rxdesc_word3 {
+ /* bit 7: 0 L3 data offset */
+ unsigned int l3_offset:8;
+ /* bit 15: 8 L4 data offset */
+ unsigned int l4_offset:8;
+ /* bit 23: 16 L7 data offset */
+ unsigned int l7_offset:8;
+ /* bit 24 Duplicated ACK detected */
+ unsigned int dup_ack:1;
+ /* bit 25 abnormal case found */
+ unsigned int abnormal:1;
+ /* bit 26 IPV4 option or IPV6 extension header */
+ unsigned int option:1;
+ /* bit 27 Out of Sequence packet */
+ unsigned int out_of_seq:1;
+ /* bit 28 Control Flag is present */
+ unsigned int ctrl_flag:1;
+ /* bit 29 End of frame interrupt enable */
+ unsigned int eofie:1;
+ /* bit 31:30 11: only one, 10: first, 01: last, 00: linking */
+ unsigned int sof_eof:2;
+ } bits;
+};
+
+/* GMAC Rx Descriptor, this is simply fitted over the queue registers */
+struct gmac_rxdesc {
+ union gmac_rxdesc_0 word0;
+ union gmac_rxdesc_1 word1;
+ union gmac_rxdesc_2 word2;
+ union gmac_rxdesc_3 word3;
+};
+
+/* GMAC Matching Rule Control Register 0
+ * GMAC0 offset 0x8078
+ * GMAC1 offset 0xC078
+ */
+#define MR_L2_BIT BIT(31)
+#define MR_L3_BIT BIT(30)
+#define MR_L4_BIT BIT(29)
+#define MR_L7_BIT BIT(28)
+#define MR_PORT_BIT BIT(27)
+#define MR_PRIORITY_BIT BIT(26)
+#define MR_DA_BIT BIT(23)
+#define MR_SA_BIT BIT(22)
+#define MR_ETHER_TYPE_BIT BIT(21)
+#define MR_VLAN_BIT BIT(20)
+#define MR_PPPOE_BIT BIT(19)
+#define MR_IP_VER_BIT BIT(15)
+#define MR_IP_HDR_LEN_BIT BIT(14)
+#define MR_FLOW_LABLE_BIT BIT(13)
+#define MR_TOS_TRAFFIC_BIT BIT(12)
+#define MR_SPR_BIT(x) BIT(x)
+#define MR_SPR_BITS 0xff
+
+/* GMAC_AHB_WEIGHT registers
+ * GMAC0 offset 0x80C8
+ * GMAC1 offset 0xC0C8
+ */
+union gmac_ahb_weight {
+ unsigned int bits32;
+ struct bit_80C8 {
+ /* 4:0 */
+ unsigned int hash_weight:5;
+ /* 9:5 */
+ unsigned int rx_weight:5;
+ /* 14:10 */
+ unsigned int tx_weight:5;
+ /* 19:15 Rx Data Pre Request FIFO Threshold */
+ unsigned int pre_req:5;
+ /* 24:20 DMA TqCtrl to Start tqDV FIFO Threshold */
+ unsigned int tq_dv_threshold:5;
+ /* 31:25 */
+ unsigned int reserved:7;
+ } bits;
+};
+
+/* GMAC RX FLTR
+ * GMAC0 Offset 0xA00C
+ * GMAC1 Offset 0xE00C
+ */
+union gmac_rx_fltr {
+ unsigned int bits32;
+ struct bit1_000c {
+ /* Enable receive of unicast frames that are sent to STA
+ * address
+ */
+ unsigned int unicast:1;
+ /* Enable receive of multicast frames that pass multicast
+ * filter
+ */
+ unsigned int multicast:1;
+ /* Enable receive of broadcast frames */
+ unsigned int broadcast:1;
+ /* Enable receive of all frames */
+ unsigned int promiscuous:1;
+ /* Enable receive of all error frames */
+ unsigned int error:1;
+ unsigned int reserved:27;
+ } bits;
+};
+
+/* GMAC Configuration 0
+ * GMAC0 Offset 0xA018
+ * GMAC1 Offset 0xE018
+ */
+union gmac_config0 {
+ unsigned int bits32;
+ struct bit1_0018 {
+ /* 0: disable transmit */
+ unsigned int dis_tx:1;
+ /* 1: disable receive */
+ unsigned int dis_rx:1;
+ /* 2: transmit data loopback enable */
+ unsigned int loop_back:1;
+ /* 3: flow control also trigged by Rx queues */
+ unsigned int flow_ctrl:1;
+ /* 4-7: adjust IFG from 96+/-56 */
+ unsigned int adj_ifg:4;
+ /* 8-10 maximum receive frame length allowed */
+ unsigned int max_len:3;
+ /* 11: disable back-off function */
+ unsigned int dis_bkoff:1;
+ /* 12: disable 16 collisions abort function */
+ unsigned int dis_col:1;
+ /* 13: speed up timers in simulation */
+ unsigned int sim_test:1;
+ /* 14: RX flow control enable */
+ unsigned int rx_fc_en:1;
+ /* 15: TX flow control enable */
+ unsigned int tx_fc_en:1;
+ /* 16: RGMII in-band status enable */
+ unsigned int rgmii_en:1;
+ /* 17: IPv4 RX Checksum enable */
+ unsigned int ipv4_rx_chksum:1;
+ /* 18: IPv6 RX Checksum enable */
+ unsigned int ipv6_rx_chksum:1;
+ /* 19: Remove Rx VLAN tag */
+ unsigned int rx_tag_remove:1;
+ /* 20 */
+ unsigned int rgmm_edge:1;
+ /* 21 */
+ unsigned int rxc_inv:1;
+ /* 22 */
+ unsigned int ipv6_exthdr_order:1;
+ /* 23 */
+ unsigned int rx_err_detect:1;
+ /* 24 */
+ unsigned int port0_chk_hwq:1;
+ /* 25 */
+ unsigned int port1_chk_hwq:1;
+ /* 26 */
+ unsigned int port0_chk_toeq:1;
+ /* 27 */
+ unsigned int port1_chk_toeq:1;
+ /* 28 */
+ unsigned int port0_chk_classq:1;
+ /* 29 */
+ unsigned int port1_chk_classq:1;
+ /* 30, 31 */
+ unsigned int reserved:2;
+ } bits;
+};
+
+#define CONFIG0_TX_RX_DISABLE (BIT(1) | BIT(0))
+#define CONFIG0_RX_CHKSUM (BIT(18) | BIT(17))
+#define CONFIG0_FLOW_RX BIT(14)
+#define CONFIG0_FLOW_TX BIT(15)
+#define CONFIG0_FLOW_TX_RX (BIT(14) | BIT(15))
+#define CONFIG0_FLOW_CTL (BIT(14) | BIT(15))
+
+#define CONFIG0_MAXLEN_SHIFT 8
+#define CONFIG0_MAXLEN_MASK (7 << CONFIG0_MAXLEN_SHIFT)
+#define CONFIG0_MAXLEN_1536 0
+#define CONFIG0_MAXLEN_1518 1
+#define CONFIG0_MAXLEN_1522 2
+#define CONFIG0_MAXLEN_1542 3
+#define CONFIG0_MAXLEN_9k 4 /* 9212 */
+#define CONFIG0_MAXLEN_10k 5 /* 10236 */
+#define CONFIG0_MAXLEN_1518__6 6
+#define CONFIG0_MAXLEN_1518__7 7
+
+/* GMAC Configuration 1
+ * GMAC0 Offset 0xA01C
+ * GMAC1 Offset 0xE01C
+ */
+union gmac_config1 {
+ unsigned int bits32;
+ struct bit1_001c {
+ /* Flow control set threshold */
+ unsigned int set_threshold:8;
+ /* Flow control release threshold */
+ unsigned int rel_threshold:8;
+ unsigned int reserved:16;
+ } bits;
+};
+
+#define GMAC_FLOWCTRL_SET_MAX 32
+#define GMAC_FLOWCTRL_SET_MIN 0
+#define GMAC_FLOWCTRL_RELEASE_MAX 32
+#define GMAC_FLOWCTRL_RELEASE_MIN 0
+
+/* GMAC Configuration 2
+ * GMAC0 Offset 0xA020
+ * GMAC1 Offset 0xE020
+ */
+union gmac_config2 {
+ unsigned int bits32;
+ struct bit1_0020 {
+ /* Flow control set threshold */
+ unsigned int set_threshold:16;
+ /* Flow control release threshold */
+ unsigned int rel_threshold:16;
+ } bits;
+};
+
+/* GMAC Configuration 3
+ * GMAC0 Offset 0xA024
+ * GMAC1 Offset 0xE024
+ */
+union gmac_config3 {
+ unsigned int bits32;
+ struct bit1_0024 {
+ /* Flow control set threshold */
+ unsigned int set_threshold:16;
+ /* Flow control release threshold */
+ unsigned int rel_threshold:16;
+ } bits;
+};
+
+/* GMAC STATUS
+ * GMAC0 Offset 0xA02C
+ * GMAC1 Offset 0xE02C
+ */
+union gmac_status {
+ unsigned int bits32;
+ struct bit1_002c {
+ /* Link status */
+ unsigned int link:1;
+ /* Link speed(00->2.5M 01->25M 10->125M) */
+ unsigned int speed:2;
+ /* Duplex mode */
+ unsigned int duplex:1;
+ unsigned int reserved_1:1;
+ /* PHY interface type */
+ unsigned int mii_rmii:2;
+ unsigned int reserved_2:25;
+ } bits;
+};
+
+#define GMAC_SPEED_10 0
+#define GMAC_SPEED_100 1
+#define GMAC_SPEED_1000 2
+
+#define GMAC_PHY_MII 0
+#define GMAC_PHY_GMII 1
+#define GMAC_PHY_RGMII_100_10 2
+#define GMAC_PHY_RGMII_1000 3
+
+/* Queue Header
+ * (1) TOE Queue Header
+ * (2) Non-TOE Queue Header
+ * (3) Interrupt Queue Header
+ *
+ * memory Layout
+ * TOE Queue Header
+ * 0x60003000 +---------------------------+ 0x0000
+ * | TOE Queue 0 Header |
+ * | 8 * 4 Bytes |
+ * +---------------------------+ 0x0020
+ * | TOE Queue 1 Header |
+ * | 8 * 4 Bytes |
+ * +---------------------------+ 0x0040
+ * | ...... |
+ * | |
+ * +---------------------------+
+ *
+ * Non TOE Queue Header
+ * 0x60002000 +---------------------------+ 0x0000
+ * | Default Queue 0 Header |
+ * | 2 * 4 Bytes |
+ * +---------------------------+ 0x0008
+ * | Default Queue 1 Header |
+ * | 2 * 4 Bytes |
+ * +---------------------------+ 0x0010
+ * | Classification Queue 0 |
+ * | 2 * 4 Bytes |
+ * +---------------------------+
+ * | Classification Queue 1 |
+ * | 2 * 4 Bytes |
+ * +---------------------------+ (n * 8 + 0x10)
+ * | ... |
+ * | 2 * 4 Bytes |
+ * +---------------------------+ (13 * 8 + 0x10)
+ * | Classification Queue 13 |
+ * | 2 * 4 Bytes |
+ * +---------------------------+ 0x80
+ * | Interrupt Queue 0 |
+ * | 2 * 4 Bytes |
+ * +---------------------------+
+ * | Interrupt Queue 1 |
+ * | 2 * 4 Bytes |
+ * +---------------------------+
+ * | Interrupt Queue 2 |
+ * | 2 * 4 Bytes |
+ * +---------------------------+
+ * | Interrupt Queue 3 |
+ * | 2 * 4 Bytes |
+ * +---------------------------+
+ *
+ */
+#define TOE_QUEUE_HDR_ADDR(n) (TOE_TOE_QUE_HDR_BASE + n * 32)
+#define TOE_Q_HDR_AREA_END (TOE_QUEUE_HDR_ADDR(TOE_TOE_QUEUE_MAX + 1))
+#define TOE_DEFAULT_Q_HDR_BASE(x) (TOE_NONTOE_QUE_HDR_BASE + 0x08 * (x))
+#define TOE_CLASS_Q_HDR_BASE (TOE_NONTOE_QUE_HDR_BASE + 0x10)
+#define TOE_INTR_Q_HDR_BASE (TOE_NONTOE_QUE_HDR_BASE + 0x80)
+#define INTERRUPT_QUEUE_HDR_ADDR(n) (TOE_INTR_Q_HDR_BASE + n * 8)
+#define NONTOE_Q_HDR_AREA_END (INTERRUPT_QUEUE_HDR_ADDR(TOE_INTR_QUEUE_MAX + 1))
+
+/* NONTOE Queue Header Word 0 */
+union nontoe_qhdr0 {
+ unsigned int bits32;
+ unsigned int base_size;
+};
+
+#define NONTOE_QHDR0_BASE_MASK (~0x0f)
+
+/* NONTOE Queue Header Word 1 */
+union nontoe_qhdr1 {
+ unsigned int bits32;
+ struct bit_nonqhdr1 {
+ /* bit 15:0 */
+ unsigned int rptr:16;
+ /* bit 31:16 */
+ unsigned int wptr:16;
+ } bits;
+};
+
+/* Non-TOE Queue Header */
+struct nontoe_qhdr {
+ union nontoe_qhdr0 word0;
+ union nontoe_qhdr1 word1;
+};
+
+#endif /* _GEMINI_ETHERNET_H */
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 7892f2f0c6b5..2c2976a2dda6 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -613,9 +613,11 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-static void fs_timeout(struct net_device *dev)
+static void fs_timeout_work(struct work_struct *work)
{
- struct fs_enet_private *fep = netdev_priv(dev);
+ struct fs_enet_private *fep = container_of(work, struct fs_enet_private,
+ timeout_work);
+ struct net_device *dev = fep->ndev;
unsigned long flags;
int wake = 0;
@@ -627,7 +629,6 @@ static void fs_timeout(struct net_device *dev)
phy_stop(dev->phydev);
(*fep->ops->stop)(dev);
(*fep->ops->restart)(dev);
- phy_start(dev->phydev);
}
phy_start(dev->phydev);
@@ -639,6 +640,13 @@ static void fs_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
+static void fs_timeout(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ schedule_work(&fep->timeout_work);
+}
+
/*-----------------------------------------------------------------------------
* generic link-change handler - should be sufficient for most cases
*-----------------------------------------------------------------------------*/
@@ -759,6 +767,7 @@ static int fs_enet_close(struct net_device *dev)
netif_stop_queue(dev);
netif_carrier_off(dev);
napi_disable(&fep->napi);
+ cancel_work_sync(&fep->timeout_work);
phy_stop(dev->phydev);
spin_lock_irqsave(&fep->lock, flags);
@@ -1019,6 +1028,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
ndev->netdev_ops = &fs_enet_netdev_ops;
ndev->watchdog_timeo = 2 * HZ;
+ INIT_WORK(&fep->timeout_work, fs_timeout_work);
netif_napi_add(ndev, &fep->napi, fs_enet_napi, fpi->napi_weight);
ndev->ethtool_ops = &fs_ethtool_ops;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
index 92e06b37a199..195fae6aec4a 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
@@ -125,6 +125,7 @@ struct fs_enet_private {
spinlock_t lock; /* during all ops except TX pckt processing */
spinlock_t tx_lock; /* during fs_start_xmit and fs_tx */
struct fs_platform_info *fpi;
+ struct work_struct timeout_work;
const struct fs_ops *ops;
int rx_ring, tx_ring;
dma_addr_t ring_mem_addr;
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 544114281ea7..9f8d4f8e57e3 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -319,11 +319,10 @@ static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta)
now = tmr_cnt_read(etsects);
now += delta;
tmr_cnt_write(etsects, now);
+ set_fipers(etsects);
spin_unlock_irqrestore(&etsects->lock, flags);
- set_fipers(etsects);
-
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index adec88d941df..634e9327968b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -133,11 +133,16 @@ struct hnae3_vector_info {
#define HNAE3_RING_TYPE_B 0
#define HNAE3_RING_TYPE_TX 0
#define HNAE3_RING_TYPE_RX 1
+#define HNAE3_RING_GL_IDX_S 0
+#define HNAE3_RING_GL_IDX_M GENMASK(1, 0)
+#define HNAE3_RING_GL_RX 0
+#define HNAE3_RING_GL_TX 1
struct hnae3_ring_chain_node {
struct hnae3_ring_chain_node *next;
u32 tqp_index;
u32 flag;
+ u32 int_gl_idx;
};
#define HNAE3_IS_TX_RING(node) \
@@ -448,6 +453,8 @@ struct hnae3_knic_private_info {
u16 num_tqps; /* total number of TQPs in this handle */
struct hnae3_queue **tqp; /* array base of all TQPs in this instance */
const struct hnae3_dcb_ops *dcb_ops;
+
+ u16 int_rl_setting;
};
struct hnae3_roce_private_info {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index b23107d7821f..ac848163ccae 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -158,43 +158,68 @@ static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
napi_disable(&tqp_vector->napi);
}
-static void hns3_set_vector_coalesc_gl(struct hns3_enet_tqp_vector *tqp_vector,
- u32 gl_value)
+void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 rl_value)
{
- /* this defines the configuration for GL (Interrupt Gap Limiter)
- * GL defines inter interrupt gap.
- * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
- */
- writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
- writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
- writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL2_OFFSET);
-}
+ u32 rl_reg = hns3_rl_usec_to_reg(rl_value);
-static void hns3_set_vector_coalesc_rl(struct hns3_enet_tqp_vector *tqp_vector,
- u32 rl_value)
-{
/* this defines the configuration for RL (Interrupt Rate Limiter).
* Rl defines rate of interrupts i.e. number of interrupts-per-second
* GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
*/
- writel(rl_value, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
+
+ if (rl_reg > 0 && !tqp_vector->tx_group.gl_adapt_enable &&
+ !tqp_vector->rx_group.gl_adapt_enable)
+ /* According to the hardware, the range of rl_reg is
+ * 0-59 and the unit is 4.
+ */
+ rl_reg |= HNS3_INT_RL_ENABLE_MASK;
+
+ writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
+}
+
+void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 gl_value)
+{
+ u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value);
+
+ writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
}
-static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector)
+void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 gl_value)
{
+ u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value);
+
+ writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
+}
+
+static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
+ struct hns3_nic_priv *priv)
+{
+ struct hnae3_handle *h = priv->ae_handle;
+
/* initialize the configuration for interrupt coalescing.
* 1. GL (Interrupt Gap Limiter)
* 2. RL (Interrupt Rate Limiter)
*/
- /* Default :enable interrupt coalesce */
- tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K;
+ /* Default: enable interrupt coalescing self-adaptive and GL */
+ tqp_vector->tx_group.gl_adapt_enable = 1;
+ tqp_vector->rx_group.gl_adapt_enable = 1;
+
tqp_vector->tx_group.int_gl = HNS3_INT_GL_50K;
- hns3_set_vector_coalesc_gl(tqp_vector, HNS3_INT_GL_50K);
- /* for now we are disabling Interrupt RL - we
- * will re-enable later
- */
- hns3_set_vector_coalesc_rl(tqp_vector, 0);
+ tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K;
+
+ hns3_set_vector_coalesce_tx_gl(tqp_vector,
+ tqp_vector->tx_group.int_gl);
+ hns3_set_vector_coalesce_rx_gl(tqp_vector,
+ tqp_vector->rx_group.int_gl);
+
+ /* Default: disable RL */
+ h->kinfo.int_rl_setting = 0;
+ hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
+
tqp_vector->rx_group.flow_level = HNS3_FLOW_LOW;
tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW;
}
@@ -1093,26 +1118,31 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
static int hns3_nic_set_features(struct net_device *netdev,
netdev_features_t features)
{
+ netdev_features_t changed = netdev->features ^ features;
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
- netdev_features_t changed;
int ret;
- if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
- priv->ops.fill_desc = hns3_fill_desc_tso;
- priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
- } else {
- priv->ops.fill_desc = hns3_fill_desc;
- priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
+ if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
+ if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
+ priv->ops.fill_desc = hns3_fill_desc_tso;
+ priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
+ } else {
+ priv->ops.fill_desc = hns3_fill_desc;
+ priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
+ }
}
- if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
- h->ae_algo->ops->enable_vlan_filter(h, true);
- else
- h->ae_algo->ops->enable_vlan_filter(h, false);
+ if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ h->ae_algo->ops->enable_vlan_filter) {
+ if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ h->ae_algo->ops->enable_vlan_filter(h, true);
+ else
+ h->ae_algo->ops->enable_vlan_filter(h, false);
+ }
- changed = netdev->features ^ features;
- if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
+ if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
+ h->ae_algo->ops->enable_hw_strip_rxvtag) {
if (features & NETIF_F_HW_VLAN_CTAG_RX)
ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true);
else
@@ -1126,8 +1156,8 @@ static int hns3_nic_set_features(struct net_device *netdev,
return 0;
}
-static void
-hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
+static void hns3_nic_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
int queue_num = priv->ae_handle->kinfo.num_tqps;
@@ -2422,25 +2452,22 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
{
- u16 rx_int_gl, tx_int_gl;
- bool rx, tx;
-
- rx = hns3_get_new_int_gl(&tqp_vector->rx_group);
- tx = hns3_get_new_int_gl(&tqp_vector->tx_group);
- rx_int_gl = tqp_vector->rx_group.int_gl;
- tx_int_gl = tqp_vector->tx_group.int_gl;
- if (rx && tx) {
- if (rx_int_gl > tx_int_gl) {
- tqp_vector->tx_group.int_gl = rx_int_gl;
- tqp_vector->tx_group.flow_level =
- tqp_vector->rx_group.flow_level;
- hns3_set_vector_coalesc_gl(tqp_vector, rx_int_gl);
- } else {
- tqp_vector->rx_group.int_gl = tx_int_gl;
- tqp_vector->rx_group.flow_level =
- tqp_vector->tx_group.flow_level;
- hns3_set_vector_coalesc_gl(tqp_vector, tx_int_gl);
- }
+ struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
+ struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
+ bool rx_update, tx_update;
+
+ if (rx_group->gl_adapt_enable) {
+ rx_update = hns3_get_new_int_gl(rx_group);
+ if (rx_update)
+ hns3_set_vector_coalesce_rx_gl(tqp_vector,
+ rx_group->int_gl);
+ }
+
+ if (tx_group->gl_adapt_enable) {
+ tx_update = hns3_get_new_int_gl(&tqp_vector->tx_group);
+ if (tx_update)
+ hns3_set_vector_coalesce_tx_gl(tqp_vector,
+ tx_group->int_gl);
}
}
@@ -2501,6 +2528,8 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
cur_chain->tqp_index = tx_ring->tqp->tqp_index;
hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
HNAE3_RING_TYPE_TX);
+ hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
cur_chain->next = NULL;
@@ -2516,6 +2545,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
chain->tqp_index = tx_ring->tqp->tqp_index;
hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
HNAE3_RING_TYPE_TX);
+ hnae_set_field(chain->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S,
+ HNAE3_RING_GL_TX);
cur_chain = chain;
}
@@ -2527,6 +2560,8 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
cur_chain->tqp_index = rx_ring->tqp->tqp_index;
hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
HNAE3_RING_TYPE_RX);
+ hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
rx_ring = rx_ring->next;
}
@@ -2540,6 +2575,9 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
chain->tqp_index = rx_ring->tqp->tqp_index;
hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
HNAE3_RING_TYPE_RX);
+ hnae_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
+
cur_chain = chain;
rx_ring = rx_ring->next;
@@ -2628,7 +2666,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
tqp_vector->rx_group.total_packets = 0;
tqp_vector->tx_group.total_bytes = 0;
tqp_vector->tx_group.total_packets = 0;
- hns3_vector_gl_rl_init(tqp_vector);
+ hns3_vector_gl_rl_init(tqp_vector, priv);
tqp_vector->handle = h;
ret = hns3_get_vector_ring_chain(tqp_vector,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index a2a7ea3e9a3a..213f501b30bb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -451,10 +451,14 @@ enum hns3_link_mode_bits {
HNS3_LM_COUNT = 15
};
-#define HNS3_INT_GL_50K 0x000A
-#define HNS3_INT_GL_20K 0x0019
-#define HNS3_INT_GL_18K 0x001B
-#define HNS3_INT_GL_8K 0x003E
+#define HNS3_INT_GL_MAX 0x1FE0
+#define HNS3_INT_GL_50K 0x0014
+#define HNS3_INT_GL_20K 0x0032
+#define HNS3_INT_GL_18K 0x0036
+#define HNS3_INT_GL_8K 0x007C
+
+#define HNS3_INT_RL_MAX 0x00EC
+#define HNS3_INT_RL_ENABLE_MASK 0x40
struct hns3_enet_ring_group {
/* array of pointers to rings */
@@ -464,6 +468,7 @@ struct hns3_enet_ring_group {
u16 count;
enum hns3_flow_level_range flow_level;
u16 int_gl;
+ u8 gl_adapt_enable;
};
struct hns3_enet_tqp_vector {
@@ -594,6 +599,12 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
#define hns3_get_handle(ndev) \
(((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle)
+#define hns3_gl_usec_to_reg(int_gl) (int_gl >> 1)
+#define hns3_gl_round_down(int_gl) round_down(int_gl, 2)
+
+#define hns3_rl_usec_to_reg(int_rl) (int_rl >> 2)
+#define hns3_rl_round_down(int_rl) round_down(int_rl, 4)
+
void hns3_ethtool_set_ops(struct net_device *netdev);
int hns3_set_channels(struct net_device *netdev,
struct ethtool_channels *ch);
@@ -606,6 +617,13 @@ int hns3_clean_rx_ring(
struct hns3_enet_ring *ring, int budget,
void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *));
+void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 gl_value);
+void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 gl_value);
+void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 rl_value);
+
#ifdef CONFIG_HNS3_DCB
void hns3_dcbnl_setup(struct hnae3_handle *handle);
#else
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 1e8fac3ae750..358f78036941 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -59,41 +59,6 @@ static const struct hns3_stats hns3_rxq_stats[] = {
#define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT)
-/* netdev stats */
-#define HNS3_NETDEV_STAT(_string, _member) { \
- .stats_string = _string, \
- .stats_offset = offsetof(struct rtnl_link_stats64, _member) \
-}
-
-static const struct hns3_stats hns3_netdev_stats[] = {
- /* Rx per-queue statistics */
- HNS3_NETDEV_STAT("rx_packets", rx_packets),
- HNS3_NETDEV_STAT("tx_packets", tx_packets),
- HNS3_NETDEV_STAT("rx_bytes", rx_bytes),
- HNS3_NETDEV_STAT("tx_bytes", tx_bytes),
- HNS3_NETDEV_STAT("rx_errors", rx_errors),
- HNS3_NETDEV_STAT("tx_errors", tx_errors),
- HNS3_NETDEV_STAT("rx_dropped", rx_dropped),
- HNS3_NETDEV_STAT("tx_dropped", tx_dropped),
- HNS3_NETDEV_STAT("multicast", multicast),
- HNS3_NETDEV_STAT("collisions", collisions),
- HNS3_NETDEV_STAT("rx_length_errors", rx_length_errors),
- HNS3_NETDEV_STAT("rx_over_errors", rx_over_errors),
- HNS3_NETDEV_STAT("rx_crc_errors", rx_crc_errors),
- HNS3_NETDEV_STAT("rx_frame_errors", rx_frame_errors),
- HNS3_NETDEV_STAT("rx_fifo_errors", rx_fifo_errors),
- HNS3_NETDEV_STAT("rx_missed_errors", rx_missed_errors),
- HNS3_NETDEV_STAT("tx_aborted_errors", tx_aborted_errors),
- HNS3_NETDEV_STAT("tx_carrier_errors", tx_carrier_errors),
- HNS3_NETDEV_STAT("tx_fifo_errors", tx_fifo_errors),
- HNS3_NETDEV_STAT("tx_heartbeat_errors", tx_heartbeat_errors),
- HNS3_NETDEV_STAT("tx_window_errors", tx_window_errors),
- HNS3_NETDEV_STAT("rx_compressed", rx_compressed),
- HNS3_NETDEV_STAT("tx_compressed", tx_compressed),
-};
-
-#define HNS3_NETDEV_STATS_COUNT ARRAY_SIZE(hns3_netdev_stats)
-
#define HNS3_SELF_TEST_TPYE_NUM 1
#define HNS3_NIC_LB_TEST_PKT_NUM 1
#define HNS3_NIC_LB_TEST_RING_ID 0
@@ -466,27 +431,6 @@ static u8 *hns3_get_strings_tqps(struct hnae3_handle *handle, u8 *data)
return data;
}
-static u8 *hns3_netdev_stats_get_strings(u8 *data)
-{
- int i;
-
- /* get strings for netdev */
- for (i = 0; i < HNS3_NETDEV_STATS_COUNT; i++) {
- snprintf(data, ETH_GSTRING_LEN,
- hns3_netdev_stats[i].stats_string);
- data += ETH_GSTRING_LEN;
- }
-
- snprintf(data, ETH_GSTRING_LEN, "netdev_rx_dropped");
- data += ETH_GSTRING_LEN;
- snprintf(data, ETH_GSTRING_LEN, "netdev_tx_dropped");
- data += ETH_GSTRING_LEN;
- snprintf(data, ETH_GSTRING_LEN, "netdev_tx_timeout");
- data += ETH_GSTRING_LEN;
-
- return data;
-}
-
static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
@@ -498,7 +442,6 @@ static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
switch (stringset) {
case ETH_SS_STATS:
- buff = hns3_netdev_stats_get_strings(buff);
buff = hns3_get_strings_tqps(h, buff);
h->ae_algo->ops->get_strings(h, stringset, (u8 *)buff);
break;
@@ -537,27 +480,6 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
return data;
}
-static u64 *hns3_get_netdev_stats(struct net_device *netdev, u64 *data)
-{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- const struct rtnl_link_stats64 *net_stats;
- struct rtnl_link_stats64 temp;
- u8 *stat;
- int i;
-
- net_stats = dev_get_stats(netdev, &temp);
- for (i = 0; i < HNS3_NETDEV_STATS_COUNT; i++) {
- stat = (u8 *)net_stats + hns3_netdev_stats[i].stats_offset;
- *data++ = *(u64 *)stat;
- }
-
- *data++ = netdev->rx_dropped.counter;
- *data++ = netdev->tx_dropped.counter;
- *data++ = priv->tx_timeout_count;
-
- return data;
-}
-
/* hns3_get_stats - get detail statistics.
* @netdev: net device
* @stats: statistics info.
@@ -574,7 +496,7 @@ static void hns3_get_stats(struct net_device *netdev,
return;
}
- p = hns3_get_netdev_stats(netdev, p);
+ h->ae_algo->ops->update_stats(h, &netdev->stats);
/* get per-queue stats */
p = hns3_get_stats_tqps(h, p);
@@ -965,6 +887,182 @@ static void hns3_get_channels(struct net_device *netdev,
h->ae_algo->ops->get_channels(h, ch);
}
+static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
+ struct ethtool_coalesce *cmd)
+{
+ struct hns3_enet_tqp_vector *tx_vector, *rx_vector;
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ u16 queue_num = h->kinfo.num_tqps;
+
+ if (queue >= queue_num) {
+ netdev_err(netdev,
+ "Invalid queue value %d! Queue max id=%d\n",
+ queue, queue_num - 1);
+ return -EINVAL;
+ }
+
+ tx_vector = priv->ring_data[queue].ring->tqp_vector;
+ rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector;
+
+ cmd->use_adaptive_tx_coalesce = tx_vector->tx_group.gl_adapt_enable;
+ cmd->use_adaptive_rx_coalesce = rx_vector->rx_group.gl_adapt_enable;
+
+ cmd->tx_coalesce_usecs = tx_vector->tx_group.int_gl;
+ cmd->rx_coalesce_usecs = rx_vector->rx_group.int_gl;
+
+ cmd->tx_coalesce_usecs_high = h->kinfo.int_rl_setting;
+ cmd->rx_coalesce_usecs_high = h->kinfo.int_rl_setting;
+
+ return 0;
+}
+
+static int hns3_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ return hns3_get_coalesce_per_queue(netdev, 0, cmd);
+}
+
+static int hns3_check_gl_coalesce_para(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ u32 rx_gl, tx_gl;
+
+ if (cmd->rx_coalesce_usecs > HNS3_INT_GL_MAX) {
+ netdev_err(netdev,
+ "Invalid rx-usecs value, rx-usecs range is 0-%d\n",
+ HNS3_INT_GL_MAX);
+ return -EINVAL;
+ }
+
+ if (cmd->tx_coalesce_usecs > HNS3_INT_GL_MAX) {
+ netdev_err(netdev,
+ "Invalid tx-usecs value, tx-usecs range is 0-%d\n",
+ HNS3_INT_GL_MAX);
+ return -EINVAL;
+ }
+
+ rx_gl = hns3_gl_round_down(cmd->rx_coalesce_usecs);
+ if (rx_gl != cmd->rx_coalesce_usecs) {
+ netdev_info(netdev,
+ "rx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n",
+ cmd->rx_coalesce_usecs, rx_gl);
+ }
+
+ tx_gl = hns3_gl_round_down(cmd->tx_coalesce_usecs);
+ if (tx_gl != cmd->tx_coalesce_usecs) {
+ netdev_info(netdev,
+ "tx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n",
+ cmd->tx_coalesce_usecs, tx_gl);
+ }
+
+ return 0;
+}
+
+static int hns3_check_rl_coalesce_para(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ u32 rl;
+
+ if (cmd->tx_coalesce_usecs_high != cmd->rx_coalesce_usecs_high) {
+ netdev_err(netdev,
+ "tx_usecs_high must be same as rx_usecs_high.\n");
+ return -EINVAL;
+ }
+
+ if (cmd->rx_coalesce_usecs_high > HNS3_INT_RL_MAX) {
+ netdev_err(netdev,
+ "Invalid usecs_high value, usecs_high range is 0-%d\n",
+ HNS3_INT_RL_MAX);
+ return -EINVAL;
+ }
+
+ rl = hns3_rl_round_down(cmd->rx_coalesce_usecs_high);
+ if (rl != cmd->rx_coalesce_usecs_high) {
+ netdev_info(netdev,
+ "usecs_high(%d) rounded down to %d, because it must be multiple of 4.\n",
+ cmd->rx_coalesce_usecs_high, rl);
+ }
+
+ return 0;
+}
+
+static int hns3_check_coalesce_para(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ int ret;
+
+ ret = hns3_check_gl_coalesce_para(netdev, cmd);
+ if (ret) {
+ netdev_err(netdev,
+ "Check gl coalesce param fail. ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = hns3_check_rl_coalesce_para(netdev, cmd);
+ if (ret) {
+ netdev_err(netdev,
+ "Check rl coalesce param fail. ret = %d\n", ret);
+ return ret;
+ }
+
+ if (cmd->use_adaptive_tx_coalesce == 1 ||
+ cmd->use_adaptive_rx_coalesce == 1) {
+ netdev_info(netdev,
+ "adaptive-tx=%d and adaptive-rx=%d, tx_usecs or rx_usecs will changed dynamically.\n",
+ cmd->use_adaptive_tx_coalesce,
+ cmd->use_adaptive_rx_coalesce);
+ }
+
+ return 0;
+}
+
+static void hns3_set_coalesce_per_queue(struct net_device *netdev,
+ struct ethtool_coalesce *cmd,
+ u32 queue)
+{
+ struct hns3_enet_tqp_vector *tx_vector, *rx_vector;
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int queue_num = h->kinfo.num_tqps;
+
+ tx_vector = priv->ring_data[queue].ring->tqp_vector;
+ rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector;
+
+ tx_vector->tx_group.gl_adapt_enable = cmd->use_adaptive_tx_coalesce;
+ rx_vector->rx_group.gl_adapt_enable = cmd->use_adaptive_rx_coalesce;
+
+ tx_vector->tx_group.int_gl = cmd->tx_coalesce_usecs;
+ rx_vector->rx_group.int_gl = cmd->rx_coalesce_usecs;
+
+ hns3_set_vector_coalesce_tx_gl(tx_vector, tx_vector->tx_group.int_gl);
+ hns3_set_vector_coalesce_rx_gl(rx_vector, rx_vector->rx_group.int_gl);
+
+ hns3_set_vector_coalesce_rl(tx_vector, h->kinfo.int_rl_setting);
+ hns3_set_vector_coalesce_rl(rx_vector, h->kinfo.int_rl_setting);
+}
+
+static int hns3_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ u16 queue_num = h->kinfo.num_tqps;
+ int ret;
+ int i;
+
+ ret = hns3_check_coalesce_para(netdev, cmd);
+ if (ret)
+ return ret;
+
+ h->kinfo.int_rl_setting =
+ hns3_rl_round_down(cmd->rx_coalesce_usecs_high);
+
+ for (i = 0; i < queue_num; i++)
+ hns3_set_coalesce_per_queue(netdev, cmd, i);
+
+ return 0;
+}
+
static const struct ethtool_ops hns3vf_ethtool_ops = {
.get_drvinfo = hns3_get_drvinfo,
.get_ringparam = hns3_get_ringparam,
@@ -978,6 +1076,7 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
.get_rxfh = hns3_get_rss,
.set_rxfh = hns3_set_rss,
.get_link_ksettings = hns3_get_link_ksettings,
+ .get_channels = hns3_get_channels,
};
static const struct ethtool_ops hns3_ethtool_ops = {
@@ -1002,6 +1101,8 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.nway_reset = hns3_nway_reset,
.get_channels = hns3_get_channels,
.set_channels = hns3_set_channels,
+ .get_coalesce = hns3_get_coalesce,
+ .set_coalesce = hns3_set_coalesce,
};
void hns3_ethtool_set_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index d7352f5f75c3..27f0ab695f5a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -3409,6 +3409,11 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport,
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
hnae_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
HCLGE_TQP_ID_S, node->tqp_index);
+ hnae_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
+ HCLGE_INT_GL_IDX_S,
+ hnae_get_field(node->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S));
req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
index ad8adfecbb22..2caca9317f8c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -86,8 +86,6 @@ enum hclgevf_opcode_type {
HCLGEVF_OPC_QUERY_TX_STATUS = 0x0B03,
HCLGEVF_OPC_QUERY_RX_STATUS = 0x0B13,
HCLGEVF_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
- /* TSO cmd */
- HCLGEVF_OPC_TSO_GENERIC_CONFIG = 0x0C01,
/* RSS cmd */
HCLGEVF_OPC_RSS_GENERIC_CONFIG = 0x0D01,
HCLGEVF_OPC_RSS_INDIR_TABLE = 0x0D07,
@@ -202,12 +200,6 @@ struct hclgevf_cfg_tx_queue_pointer_cmd {
u8 rsv[14];
};
-#define HCLGEVF_TSO_ENABLE_B 0
-struct hclgevf_cfg_tso_status_cmd {
- u8 tso_enable;
- u8 rsv[23];
-};
-
#define HCLGEVF_TYPE_CRQ 0
#define HCLGEVF_TYPE_CSQ 1
#define HCLGEVF_NIC_CSQ_BASEADDR_L_REG 0x27000
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 655f522e44aa..3d2bc9a971fa 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -201,20 +201,6 @@ static int hclge_get_queue_info(struct hclgevf_dev *hdev)
return 0;
}
-static int hclgevf_enable_tso(struct hclgevf_dev *hdev, int enable)
-{
- struct hclgevf_cfg_tso_status_cmd *req;
- struct hclgevf_desc desc;
-
- req = (struct hclgevf_cfg_tso_status_cmd *)desc.data;
-
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_TSO_GENERIC_CONFIG,
- false);
- hnae_set_bit(req->tso_enable, HCLGEVF_TSO_ENABLE_B, enable);
-
- return hclgevf_cmd_send(&hdev->hw, &desc, 1);
-}
-
static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
{
struct hclgevf_tqp *tqp;
@@ -1375,12 +1361,6 @@ static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_config;
}
- ret = hclgevf_enable_tso(hdev, true);
- if (ret) {
- dev_err(&pdev->dev, "failed(%d) to enable tso\n", ret);
- goto err_config;
- }
-
/* Initialize VF's MTA */
hdev->accept_mta_mc = true;
ret = hclgevf_cfg_func_mta_filter(&hdev->nic, hdev->accept_mta_mc);
@@ -1433,6 +1413,35 @@ static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
ae_dev->priv = NULL;
}
+static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
+{
+ struct hnae3_handle *nic = &hdev->nic;
+ struct hnae3_knic_private_info *kinfo = &nic->kinfo;
+
+ return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
+}
+
+/**
+ * hclgevf_get_channels - Get the current channels enabled and max supported.
+ * @handle: hardware information for network interface
+ * @ch: ethtool channels structure
+ *
+ * We don't support separate tx and rx queues as channels. The other count
+ * represents how many queues are being used for control. max_combined counts
+ * how many queue pairs we can support. They may not be mapped 1 to 1 with
+ * q_vectors since we support a lot more queue pairs than q_vectors.
+ **/
+static void hclgevf_get_channels(struct hnae3_handle *handle,
+ struct ethtool_channels *ch)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ ch->max_combined = hclgevf_get_max_channels(hdev);
+ ch->other_count = 0;
+ ch->max_other = 0;
+ ch->combined_count = hdev->num_tqps;
+}
+
static const struct hnae3_ae_ops hclgevf_ops = {
.init_ae_dev = hclgevf_init_ae_dev,
.uninit_ae_dev = hclgevf_uninit_ae_dev,
@@ -1462,6 +1471,7 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.get_tc_size = hclgevf_get_tc_size,
.get_fw_version = hclgevf_get_fw_version,
.set_vlan_filter = hclgevf_set_vlan_filter,
+ .get_channels = hclgevf_get_channels,
};
static struct hnae3_ae_algo ae_algovf = {
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 6911b7cc06c5..be2ce8dece4a 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -757,6 +757,12 @@ static int ibmvnic_login(struct net_device *netdev)
}
} while (adapter->renegotiate);
+ /* handle pending MAC address changes after successful login */
+ if (adapter->mac_change_pending) {
+ __ibmvnic_set_mac(netdev, &adapter->desired.mac);
+ adapter->mac_change_pending = false;
+ }
+
return 0;
}
@@ -994,11 +1000,6 @@ static int ibmvnic_open(struct net_device *netdev)
mutex_lock(&adapter->reset_lock);
- if (adapter->mac_change_pending) {
- __ibmvnic_set_mac(netdev, &adapter->desired.mac);
- adapter->mac_change_pending = false;
- }
-
if (adapter->state != VNIC_CLOSED) {
rc = ibmvnic_login(netdev);
if (rc) {
@@ -1279,6 +1280,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
unsigned char *dst;
u64 *handle_array;
int index = 0;
+ u8 proto = 0;
int ret = 0;
if (adapter->resetting) {
@@ -1367,17 +1369,18 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
}
if (skb->protocol == htons(ETH_P_IP)) {
- if (ip_hdr(skb)->version == 4)
- tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
- else if (ip_hdr(skb)->version == 6)
- tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
-
- if (ip_hdr(skb)->protocol == IPPROTO_TCP)
- tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
- else if (ip_hdr(skb)->protocol != IPPROTO_TCP)
- tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
+ tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
+ proto = ip_hdr(skb)->protocol;
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
+ proto = ipv6_hdr(skb)->nexthdr;
}
+ if (proto == IPPROTO_TCP)
+ tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
+ else if (proto == IPPROTO_UDP)
+ tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
+
if (skb->ip_summed == CHECKSUM_PARTIAL) {
tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
hdrs += 2;
@@ -1532,7 +1535,7 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
struct sockaddr *addr = p;
- if (adapter->state != VNIC_OPEN) {
+ if (adapter->state == VNIC_PROBED) {
memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr));
adapter->mac_change_pending = true;
return 0;
@@ -2453,6 +2456,12 @@ static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
struct ibmvnic_sub_crq_queue *scrq = instance;
struct ibmvnic_adapter *adapter = scrq->adapter;
+ /* When booting a kdump kernel we can hit pending interrupts
+ * prior to completing driver initialization.
+ */
+ if (unlikely(adapter->state != VNIC_OPEN))
+ return IRQ_NONE;
+
adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
@@ -3350,7 +3359,11 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
return;
}
+ adapter->ip_offload_ctrl.len =
+ cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
+ adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum;
+ adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum;
adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 538b42d5c187..8e12aae065d8 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -446,13 +446,13 @@ static void fm10k_type_trans(struct fm10k_ring *rx_ring,
skb->protocol = eth_type_trans(skb, dev);
+ /* Record Rx queue, or update macvlan statistics */
if (!l2_accel)
- return;
-
- /* update MACVLAN statistics */
- macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, 1,
- !!(rx_desc->w.hdr_info &
- cpu_to_le16(FM10K_RXD_HDR_INFO_XC_MASK)));
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+ else
+ macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true,
+ (skb->pkt_type == PACKET_BROADCAST) ||
+ (skb->pkt_type == PACKET_MULTICAST));
}
/**
@@ -479,8 +479,6 @@ static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring,
FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan;
- skb_record_rx_queue(skb, rx_ring->queue_index);
-
FM10K_CB(skb)->fi.d.glort = rx_desc->d.glort;
if (rx_desc->w.vlan) {
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 7f605221a686..a434fecfdfeb 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -2463,7 +2463,6 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface)
return err;
}
-#ifdef CONFIG_PM
/**
* fm10k_resume - Generic PM resume hook
* @dev: generic device structure
@@ -2472,7 +2471,7 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface)
* suspend or hibernation. This function does not need to handle lower PCIe
* device state as the stack takes care of that for us.
**/
-static int fm10k_resume(struct device *dev)
+static int __maybe_unused fm10k_resume(struct device *dev)
{
struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
struct net_device *netdev = interface->netdev;
@@ -2499,7 +2498,7 @@ static int fm10k_resume(struct device *dev)
* system suspend or hibernation. This function does not need to handle lower
* PCIe device state as the stack takes care of that for us.
**/
-static int fm10k_suspend(struct device *dev)
+static int __maybe_unused fm10k_suspend(struct device *dev)
{
struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
struct net_device *netdev = interface->netdev;
@@ -2511,8 +2510,6 @@ static int fm10k_suspend(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM */
-
/**
* fm10k_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device
@@ -2643,11 +2640,9 @@ static struct pci_driver fm10k_driver = {
.id_table = fm10k_pci_tbl,
.probe = fm10k_probe,
.remove = fm10k_remove,
-#ifdef CONFIG_PM
.driver = {
.pm = &fm10k_pm_ops,
},
-#endif /* CONFIG_PM */
.sriov_configure = fm10k_iov_configure,
.err_handler = &fm10k_err_handler
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index b0188b8f91ba..c5776340517c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -198,7 +198,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
- /* Pipeline Personalization Profile */
+ /* Dynamic Device Personalization */
i40e_aqc_opc_write_personalization_profile = 0x0270,
i40e_aqc_opc_get_personalization_profile_list = 0x0271,
@@ -1594,7 +1594,7 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
-/* Pipeline Personalization Profile */
+/* Dynamic Device Personalization */
struct i40e_aqc_write_personalization_profile {
u8 flags;
u8 reserved[3];
@@ -1605,7 +1605,7 @@ struct i40e_aqc_write_personalization_profile {
I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile);
-struct i40e_aqc_write_ppp_resp {
+struct i40e_aqc_write_ddp_resp {
__le32 error_offset;
__le32 error_info;
__le32 addr_high;
@@ -1614,8 +1614,8 @@ struct i40e_aqc_write_ppp_resp {
struct i40e_aqc_get_applied_profiles {
u8 flags;
-#define I40E_AQC_GET_PPP_GET_CONF 0x1
-#define I40E_AQC_GET_PPP_GET_RDPU_CONF 0x2
+#define I40E_AQC_GET_DDP_GET_CONF 0x1
+#define I40E_AQC_GET_DDP_GET_RDPU_CONF 0x2
u8 rsv[3];
__le32 reserved;
__le32 addr_high;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 095965f268bd..40c5f7628aa1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -5236,7 +5236,7 @@ i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw,
}
/**
- * i40e_aq_write_ppp - Write pipeline personalization profile (ppp)
+ * i40e_aq_write_ddp - Write dynamic device personalization (ddp)
* @hw: pointer to the hw struct
* @buff: command buffer (size in bytes = buff_size)
* @buff_size: buffer size in bytes
@@ -5246,7 +5246,7 @@ i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw,
* @cmd_details: pointer to command details structure or NULL
**/
enum
-i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
+i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
u16 buff_size, u32 track_id,
u32 *error_offset, u32 *error_info,
struct i40e_asq_cmd_details *cmd_details)
@@ -5255,7 +5255,7 @@ i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
struct i40e_aqc_write_personalization_profile *cmd =
(struct i40e_aqc_write_personalization_profile *)
&desc.params.raw;
- struct i40e_aqc_write_ppp_resp *resp;
+ struct i40e_aqc_write_ddp_resp *resp;
i40e_status status;
i40e_fill_default_direct_cmd_desc(&desc,
@@ -5271,7 +5271,7 @@ i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
if (!status) {
- resp = (struct i40e_aqc_write_ppp_resp *)&desc.params.raw;
+ resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
if (error_offset)
*error_offset = le32_to_cpu(resp->error_offset);
if (error_info)
@@ -5282,14 +5282,14 @@ i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
}
/**
- * i40e_aq_get_ppp_list - Read pipeline personalization profile (ppp)
+ * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp)
* @hw: pointer to the hw struct
* @buff: command buffer (size in bytes = buff_size)
* @buff_size: buffer size in bytes
* @cmd_details: pointer to command details structure or NULL
**/
enum
-i40e_status_code i40e_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
u16 buff_size, u8 flags,
struct i40e_asq_cmd_details *cmd_details)
{
@@ -5364,11 +5364,6 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
u32 offset = 0, info = 0;
u32 i;
- if (!track_id) {
- i40e_debug(hw, I40E_DEBUG_PACKAGE, "Track_id can't be 0.");
- return I40E_NOT_SUPPORTED;
- }
-
dev_cnt = profile->device_table_count;
for (i = 0; i < dev_cnt; i++) {
@@ -5378,7 +5373,7 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
break;
}
if (i == dev_cnt) {
- i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support PPP");
+ i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP");
return I40E_ERR_DEVICE_NOT_SUPPORTED;
}
@@ -5397,7 +5392,7 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
sizeof(struct i40e_profile_section_header);
/* Write profile */
- status = i40e_aq_write_ppp(hw, (void *)sec, (u16)section_size,
+ status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
track_id, &offset, &info, NULL);
if (status) {
i40e_debug(hw, I40E_DEBUG_PACKAGE,
@@ -5439,10 +5434,10 @@ i40e_add_pinfo_to_list(struct i40e_hw *hw,
sec->section.offset);
pinfo->track_id = track_id;
pinfo->version = profile->version;
- pinfo->op = I40E_PPP_ADD_TRACKID;
- memcpy(pinfo->name, profile->name, I40E_PPP_NAME_SIZE);
+ pinfo->op = I40E_DDP_ADD_TRACKID;
+ memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
- status = i40e_aq_write_ppp(hw, (void *)sec, sec->data_end,
+ status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
track_id, &offset, &info, NULL);
return status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index cfd788b4fd7a..34173f821fd9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -126,6 +126,10 @@ static const struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
I40E_PF_STAT("link_xon_tx", stats.link_xon_tx),
I40E_PF_STAT("link_xoff_tx", stats.link_xoff_tx),
+ I40E_PF_STAT("priority_xon_rx", stats.priority_xon_rx),
+ I40E_PF_STAT("priority_xoff_rx", stats.priority_xoff_rx),
+ I40E_PF_STAT("priority_xon_tx", stats.priority_xon_tx),
+ I40E_PF_STAT("priority_xoff_tx", stats.priority_xoff_tx),
I40E_PF_STAT("rx_size_64", stats.rx_size_64),
I40E_PF_STAT("rx_size_127", stats.rx_size_127),
I40E_PF_STAT("rx_size_255", stats.rx_size_255),
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 42dcaefc4c19..2ab22eba0c7c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -47,8 +47,8 @@ static const char i40e_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 2
-#define DRV_VERSION_MINOR 1
-#define DRV_VERSION_BUILD 14
+#define DRV_VERSION_MINOR 3
+#define DRV_VERSION_BUILD 2
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 7689c2ee0d46..425713fb72e5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -389,7 +389,7 @@ static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data)
{
i40e_status ret_code;
- u16 read_size = *words;
+ u16 read_size;
bool last_cmd = false;
u16 words_read = 0;
u16 i = 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 3bb6659db822..b3cc89cc3a86 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -343,6 +343,37 @@ static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
return i40e_ptype_lookup[ptype];
}
+/**
+ * i40e_virtchnl_link_speed - Convert AdminQ link_speed to virtchnl definition
+ * @link_speed: the speed to convert
+ *
+ * Returns the link_speed in terms of the virtchnl interface, for use in
+ * converting link_speed as reported by the AdminQ into the format used for
+ * talking to virtchnl devices. If we can't represent the link speed properly,
+ * report LINK_SPEED_UNKNOWN.
+ **/
+static inline enum virtchnl_link_speed
+i40e_virtchnl_link_speed(enum i40e_aq_link_speed link_speed)
+{
+ switch (link_speed) {
+ case I40E_LINK_SPEED_100MB:
+ return VIRTCHNL_LINK_SPEED_100MB;
+ case I40E_LINK_SPEED_1GB:
+ return VIRTCHNL_LINK_SPEED_1GB;
+ case I40E_LINK_SPEED_10GB:
+ return VIRTCHNL_LINK_SPEED_10GB;
+ case I40E_LINK_SPEED_40GB:
+ return VIRTCHNL_LINK_SPEED_40GB;
+ case I40E_LINK_SPEED_20GB:
+ return VIRTCHNL_LINK_SPEED_20GB;
+ case I40E_LINK_SPEED_25GB:
+ return VIRTCHNL_LINK_SPEED_25GB;
+ case I40E_LINK_SPEED_UNKNOWN:
+ default:
+ return VIRTCHNL_LINK_SPEED_UNKNOWN;
+ }
+}
+
/* prototype for functions used for SW locks */
/* i40e_common for VF drivers*/
@@ -400,13 +431,15 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
-i40e_status i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
+i40e_status i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
u16 buff_size, u32 track_id,
u32 *error_offset, u32 *error_info,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+ struct i40e_asq_cmd_details *
+ cmd_details);
+i40e_status i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
u16 buff_size, u8 flags,
- struct i40e_asq_cmd_details *cmd_details);
+ struct i40e_asq_cmd_details *
+ cmd_details);
struct i40e_generic_seg_header *
i40e_find_segment_in_package(u32 segment_type,
struct i40e_package_header *pkg_header);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 0e8568719b4e..5a708c363d99 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -1502,19 +1502,19 @@ struct i40e_lldp_variables {
#define I40E_FLEX_57_SHIFT 6
#define I40E_FLEX_57_MASK (0x1ULL << I40E_FLEX_57_SHIFT)
-/* Version format for PPP */
-struct i40e_ppp_version {
+/* Version format for Dynamic Device Personalization(DDP) */
+struct i40e_ddp_version {
u8 major;
u8 minor;
u8 update;
u8 draft;
};
-#define I40E_PPP_NAME_SIZE 32
+#define I40E_DDP_NAME_SIZE 32
/* Package header */
struct i40e_package_header {
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u32 segment_count;
u32 segment_offset[1];
};
@@ -1526,16 +1526,16 @@ struct i40e_generic_seg_header {
#define SEGMENT_TYPE_I40E 0x00000011
#define SEGMENT_TYPE_X722 0x00000012
u32 type;
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u32 size;
- char name[I40E_PPP_NAME_SIZE];
+ char name[I40E_DDP_NAME_SIZE];
};
struct i40e_metadata_segment {
struct i40e_generic_seg_header header;
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u32 track_id;
- char name[I40E_PPP_NAME_SIZE];
+ char name[I40E_DDP_NAME_SIZE];
};
struct i40e_device_id_entry {
@@ -1545,8 +1545,8 @@ struct i40e_device_id_entry {
struct i40e_profile_segment {
struct i40e_generic_seg_header header;
- struct i40e_ppp_version version;
- char name[I40E_PPP_NAME_SIZE];
+ struct i40e_ddp_version version;
+ char name[I40E_DDP_NAME_SIZE];
u32 device_table_count;
struct i40e_device_id_entry device_table[1];
};
@@ -1573,11 +1573,11 @@ struct i40e_profile_section_header {
struct i40e_profile_info {
u32 track_id;
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u8 op;
-#define I40E_PPP_ADD_TRACKID 0x01
-#define I40E_PPP_REMOVE_TRACKID 0x02
+#define I40E_DDP_ADD_TRACKID 0x01
+#define I40E_DDP_REMOVE_TRACKID 0x02
u8 reserved[7];
- u8 name[I40E_PPP_NAME_SIZE];
+ u8 name[I40E_DDP_NAME_SIZE];
};
#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 36cb8e068e85..e9309fb9084b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -81,12 +81,12 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
if (vf->link_forced) {
pfe.event_data.link_event.link_status = vf->link_up;
pfe.event_data.link_event.link_speed =
- (vf->link_up ? I40E_LINK_SPEED_40GB : 0);
+ (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0);
} else {
pfe.event_data.link_event.link_status =
ls->link_info & I40E_AQ_LINK_UP;
pfe.event_data.link_event.link_speed =
- (enum virtchnl_link_speed)ls->link_speed;
+ i40e_virtchnl_link_speed(ls->link_speed);
}
i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
0, (u8 *)&pfe, sizeof(pfe), NULL);
@@ -2749,6 +2749,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
break;
case VIRTCHNL_OP_GET_VF_RESOURCES:
ret = i40e_vc_get_vf_resources_msg(vf, msg);
+ i40e_vc_notify_vf_link_state(vf);
break;
case VIRTCHNL_OP_RESET_VF:
i40e_vc_reset_vf_msg(vf);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index 06b04572c518..435a112d09f5 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -198,7 +198,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
- /* Pipeline Personalization Profile */
+ /* Dynamic Device Personalization */
i40e_aqc_opc_write_personalization_profile = 0x0270,
i40e_aqc_opc_get_personalization_profile_list = 0x0271,
@@ -1562,7 +1562,7 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
-/* Pipeline Personalization Profile */
+/* Dynamic Device Personalization */
struct i40e_aqc_write_personalization_profile {
u8 flags;
u8 reserved[3];
@@ -1573,7 +1573,7 @@ struct i40e_aqc_write_personalization_profile {
I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile);
-struct i40e_aqc_write_ppp_resp {
+struct i40e_aqc_write_ddp_resp {
__le32 error_offset;
__le32 error_info;
__le32 addr_high;
@@ -1582,8 +1582,8 @@ struct i40e_aqc_write_ppp_resp {
struct i40e_aqc_get_applied_profiles {
u8 flags;
-#define I40E_AQC_GET_PPP_GET_CONF 0x1
-#define I40E_AQC_GET_PPP_GET_RDPU_CONF 0x2
+#define I40E_AQC_GET_DDP_GET_CONF 0x1
+#define I40E_AQC_GET_DDP_GET_RDPU_CONF 0x2
u8 rsv[3];
__le32 reserved;
__le32 addr_high;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 7d70bf69b249..a94648429a5b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -1202,7 +1202,7 @@ i40e_status i40e_vf_reset(struct i40e_hw *hw)
}
/**
- * i40evf_aq_write_ppp - Write pipeline personalization profile (ppp)
+ * i40evf_aq_write_ddp - Write dynamic device personalization (ddp)
* @hw: pointer to the hw struct
* @buff: command buffer (size in bytes = buff_size)
* @buff_size: buffer size in bytes
@@ -1212,7 +1212,7 @@ i40e_status i40e_vf_reset(struct i40e_hw *hw)
* @cmd_details: pointer to command details structure or NULL
**/
enum
-i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
+i40e_status_code i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff,
u16 buff_size, u32 track_id,
u32 *error_offset, u32 *error_info,
struct i40e_asq_cmd_details *cmd_details)
@@ -1221,7 +1221,7 @@ i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
struct i40e_aqc_write_personalization_profile *cmd =
(struct i40e_aqc_write_personalization_profile *)
&desc.params.raw;
- struct i40e_aqc_write_ppp_resp *resp;
+ struct i40e_aqc_write_ddp_resp *resp;
i40e_status status;
i40evf_fill_default_direct_cmd_desc(&desc,
@@ -1237,7 +1237,7 @@ i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
status = i40evf_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
if (!status) {
- resp = (struct i40e_aqc_write_ppp_resp *)&desc.params.raw;
+ resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
if (error_offset)
*error_offset = le32_to_cpu(resp->error_offset);
if (error_info)
@@ -1248,16 +1248,16 @@ i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
}
/**
- * i40evf_aq_get_ppp_list - Read pipeline personalization profile (ppp)
+ * i40evf_aq_get_ddp_list - Read dynamic device personalization (ddp)
* @hw: pointer to the hw struct
* @buff: command buffer (size in bytes = buff_size)
* @buff_size: buffer size in bytes
* @cmd_details: pointer to command details structure or NULL
**/
enum
-i40e_status_code i40evf_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+i40e_status_code i40evf_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
u16 buff_size, u8 flags,
- struct i40e_asq_cmd_details *cmd_details)
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_get_applied_profiles *cmd =
@@ -1330,11 +1330,6 @@ i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
u32 offset = 0, info = 0;
u32 i;
- if (!track_id) {
- i40e_debug(hw, I40E_DEBUG_PACKAGE, "Track_id can't be 0.");
- return I40E_NOT_SUPPORTED;
- }
-
dev_cnt = profile->device_table_count;
for (i = 0; i < dev_cnt; i++) {
@@ -1344,7 +1339,7 @@ i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
break;
}
if (i == dev_cnt) {
- i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support PPP");
+ i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP");
return I40E_ERR_DEVICE_NOT_SUPPORTED;
}
@@ -1363,7 +1358,7 @@ i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
sizeof(struct i40e_profile_section_header);
/* Write profile */
- status = i40evf_aq_write_ppp(hw, (void *)sec, (u16)section_size,
+ status = i40evf_aq_write_ddp(hw, (void *)sec, (u16)section_size,
track_id, &offset, &info, NULL);
if (status) {
i40e_debug(hw, I40E_DEBUG_PACKAGE,
@@ -1405,10 +1400,10 @@ i40evf_add_pinfo_to_list(struct i40e_hw *hw,
sec->section.offset);
pinfo->track_id = track_id;
pinfo->version = profile->version;
- pinfo->op = I40E_PPP_ADD_TRACKID;
- memcpy(pinfo->name, profile->name, I40E_PPP_NAME_SIZE);
+ pinfo->op = I40E_DDP_ADD_TRACKID;
+ memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
- status = i40evf_aq_write_ppp(hw, (void *)sec, sec->data_end,
+ status = i40evf_aq_write_ddp(hw, (void *)sec, sec->data_end,
track_id, &offset, &info, NULL);
return status;
}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index b624b5994075..47c429931a57 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -131,13 +131,15 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
-i40e_status i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
+i40e_status i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff,
u16 buff_size, u32 track_id,
u32 *error_offset, u32 *error_info,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40evf_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+ struct i40e_asq_cmd_details *
+ cmd_details);
+i40e_status i40evf_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
u16 buff_size, u8 flags,
- struct i40e_asq_cmd_details *cmd_details);
+ struct i40e_asq_cmd_details *
+ cmd_details);
struct i40e_generic_seg_header *
i40evf_find_segment_in_package(u32 segment_type,
struct i40e_package_header *pkg_header);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 213b773dfad6..6afc31616e04 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -1422,19 +1422,19 @@ enum i40e_reset_type {
#define I40E_FD_INSET_FLEX_WORD57_MASK (0x1ULL << \
I40E_FD_INSET_FLEX_WORD57_SHIFT)
-/* Version format for PPP */
-struct i40e_ppp_version {
+/* Version format for Dynamic Device Personalization(DDP) */
+struct i40e_ddp_version {
u8 major;
u8 minor;
u8 update;
u8 draft;
};
-#define I40E_PPP_NAME_SIZE 32
+#define I40E_DDP_NAME_SIZE 32
/* Package header */
struct i40e_package_header {
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u32 segment_count;
u32 segment_offset[1];
};
@@ -1446,16 +1446,16 @@ struct i40e_generic_seg_header {
#define SEGMENT_TYPE_I40E 0x00000011
#define SEGMENT_TYPE_X722 0x00000012
u32 type;
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u32 size;
- char name[I40E_PPP_NAME_SIZE];
+ char name[I40E_DDP_NAME_SIZE];
};
struct i40e_metadata_segment {
struct i40e_generic_seg_header header;
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u32 track_id;
- char name[I40E_PPP_NAME_SIZE];
+ char name[I40E_DDP_NAME_SIZE];
};
struct i40e_device_id_entry {
@@ -1465,8 +1465,8 @@ struct i40e_device_id_entry {
struct i40e_profile_segment {
struct i40e_generic_seg_header header;
- struct i40e_ppp_version version;
- char name[I40E_PPP_NAME_SIZE];
+ struct i40e_ddp_version version;
+ char name[I40E_DDP_NAME_SIZE];
u32 device_table_count;
struct i40e_device_id_entry device_table[1];
};
@@ -1493,11 +1493,11 @@ struct i40e_profile_section_header {
struct i40e_profile_info {
u32 track_id;
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u8 op;
-#define I40E_PPP_ADD_TRACKID 0x01
-#define I40E_PPP_REMOVE_TRACKID 0x02
+#define I40E_DDP_ADD_TRACKID 0x01
+#define I40E_DDP_REMOVE_TRACKID 0x02
u8 reserved[7];
- u8 name[I40E_PPP_NAME_SIZE];
+ u8 name[I40E_DDP_NAME_SIZE];
};
#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index de0af521d602..47040ab2e298 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -199,6 +199,9 @@ struct i40evf_adapter {
wait_queue_head_t down_waitqueue;
struct i40e_q_vector *q_vectors;
struct list_head vlan_filter_list;
+ struct list_head mac_filter_list;
+ /* Lock to protect accesses to MAC and VLAN lists */
+ spinlock_t mac_vlan_list_lock;
char misc_vector_name[IFNAMSIZ + 9];
int num_active_queues;
int num_req_queues;
@@ -206,7 +209,6 @@ struct i40evf_adapter {
/* TX */
struct i40e_ring *tx_rings;
u32 tx_timeout_count;
- struct list_head mac_filter_list;
u32 tx_desc_count;
/* RX */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 7b2a4eba92e2..f92587aba3c7 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -45,8 +45,8 @@ static const char i40evf_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 3
-#define DRV_VERSION_MINOR 0
-#define DRV_VERSION_BUILD 1
+#define DRV_VERSION_MINOR 2
+#define DRV_VERSION_BUILD 2
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \
@@ -276,37 +276,7 @@ void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask)
if (mask & BIT(i - 1)) {
wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1),
I40E_VFINT_DYN_CTLN1_INTENA_MASK |
- I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
- I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK);
- }
- }
-}
-
-/**
- * i40evf_fire_sw_int - Generate SW interrupt for specified vectors
- * @adapter: board private structure
- * @mask: bitmap of vectors to trigger
- **/
-static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask)
-{
- struct i40e_hw *hw = &adapter->hw;
- int i;
- u32 dyn_ctl;
-
- if (mask & 1) {
- dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTL01);
- dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
- I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
- I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
- wr32(hw, I40E_VFINT_DYN_CTL01, dyn_ctl);
- }
- for (i = 1; i < adapter->num_msix_vectors; i++) {
- if (mask & BIT(i)) {
- dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1));
- dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
- I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
- I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
- wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), dyn_ctl);
+ I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
}
}
}
@@ -337,15 +307,10 @@ static irqreturn_t i40evf_msix_aq(int irq, void *data)
struct net_device *netdev = data;
struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40e_hw *hw = &adapter->hw;
- u32 val;
/* handle non-queue interrupts, these reads clear the registers */
- val = rd32(hw, I40E_VFINT_ICR01);
- val = rd32(hw, I40E_VFINT_ICR0_ENA1);
-
- val = rd32(hw, I40E_VFINT_DYN_CTL01) |
- I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
- wr32(hw, I40E_VFINT_DYN_CTL01, val);
+ rd32(hw, I40E_VFINT_ICR01);
+ rd32(hw, I40E_VFINT_ICR0_ENA1);
/* schedule work on the private workqueue */
schedule_work(&adapter->adminq_task);
@@ -706,7 +671,8 @@ static void i40evf_configure_rx(struct i40evf_adapter *adapter)
* @adapter: board private structure
* @vlan: vlan tag
*
- * Returns ptr to the filter object or NULL
+ * Returns ptr to the filter object or NULL. Must be called while holding the
+ * mac_vlan_list_lock.
**/
static struct
i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan)
@@ -731,14 +697,8 @@ static struct
i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
{
struct i40evf_vlan_filter *f = NULL;
- int count = 50;
- while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
- &adapter->crit_section)) {
- udelay(1);
- if (--count == 0)
- goto out;
- }
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
f = i40evf_find_vlan(adapter, vlan);
if (!f) {
@@ -755,8 +715,7 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
}
clearout:
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
-out:
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return f;
}
@@ -768,21 +727,16 @@ out:
static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
{
struct i40evf_vlan_filter *f;
- int count = 50;
- while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
- &adapter->crit_section)) {
- udelay(1);
- if (--count == 0)
- return;
- }
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
f = i40evf_find_vlan(adapter, vlan);
if (f) {
f->remove = true;
adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
}
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
}
/**
@@ -824,7 +778,8 @@ static int i40evf_vlan_rx_kill_vid(struct net_device *netdev,
* @adapter: board private structure
* @macaddr: the MAC address
*
- * Returns ptr to the filter object or NULL
+ * Returns ptr to the filter object or NULL. Must be called while holding the
+ * mac_vlan_list_lock.
**/
static struct
i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter,
@@ -854,26 +809,17 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
u8 *macaddr)
{
struct i40evf_mac_filter *f;
- int count = 50;
if (!macaddr)
return NULL;
- while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
- &adapter->crit_section)) {
- udelay(1);
- if (--count == 0)
- return NULL;
- }
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
f = i40evf_find_filter(adapter, macaddr);
if (!f) {
f = kzalloc(sizeof(*f), GFP_ATOMIC);
- if (!f) {
- clear_bit(__I40EVF_IN_CRITICAL_TASK,
- &adapter->crit_section);
- return NULL;
- }
+ if (!f)
+ goto clearout;
ether_addr_copy(f->macaddr, macaddr);
@@ -884,7 +830,8 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
f->remove = false;
}
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+clearout:
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return f;
}
@@ -911,12 +858,16 @@ static int i40evf_set_mac(struct net_device *netdev, void *p)
if (adapter->flags & I40EVF_FLAG_ADDR_SET_BY_PF)
return -EPERM;
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
f = i40evf_find_filter(adapter, hw->mac.addr);
if (f) {
f->remove = true;
adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
}
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
f = i40evf_add_filter(adapter, addr->sa_data);
if (f) {
ether_addr_copy(hw->mac.addr, addr->sa_data);
@@ -937,7 +888,6 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
struct netdev_hw_addr *uca;
struct netdev_hw_addr *mca;
struct netdev_hw_addr *ha;
- int count = 50;
/* add addr if not already in the filter list */
netdev_for_each_uc_addr(uca, netdev) {
@@ -947,16 +897,8 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
i40evf_add_filter(adapter, mca->addr);
}
- while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
- &adapter->crit_section)) {
- udelay(1);
- if (--count == 0) {
- dev_err(&adapter->pdev->dev,
- "Failed to get lock in %s\n", __func__);
- return;
- }
- }
- /* remove filter if not in netdev list */
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
netdev_for_each_mc_addr(mca, netdev)
if (ether_addr_equal(mca->addr, f->macaddr))
@@ -995,7 +937,7 @@ bottom_of_search_loop:
adapter->flags & I40EVF_FLAG_ALLMULTI_ON)
adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_ALLMULTI;
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
}
/**
@@ -1058,6 +1000,8 @@ static void i40evf_configure(struct i40evf_adapter *adapter)
/**
* i40evf_up_complete - Finish the last steps of bringing up a connection
* @adapter: board private structure
+ *
+ * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock.
**/
static void i40evf_up_complete(struct i40evf_adapter *adapter)
{
@@ -1075,6 +1019,8 @@ static void i40evf_up_complete(struct i40evf_adapter *adapter)
/**
* i40e_down - Shutdown the connection processing
* @adapter: board private structure
+ *
+ * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock.
**/
void i40evf_down(struct i40evf_adapter *adapter)
{
@@ -1084,16 +1030,14 @@ void i40evf_down(struct i40evf_adapter *adapter)
if (adapter->state <= __I40EVF_DOWN_PENDING)
return;
- while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
- &adapter->crit_section))
- usleep_range(500, 1000);
-
netif_carrier_off(netdev);
netif_tx_disable(netdev);
adapter->link_up = false;
i40evf_napi_disable_all(adapter);
i40evf_irq_disable(adapter);
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
/* remove all MAC filters */
list_for_each_entry(f, &adapter->mac_filter_list, list) {
f->remove = true;
@@ -1102,6 +1046,9 @@ void i40evf_down(struct i40evf_adapter *adapter)
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
f->remove = true;
}
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
adapter->state != __I40EVF_RESETTING) {
/* cancel any current operation */
@@ -1115,7 +1062,6 @@ void i40evf_down(struct i40evf_adapter *adapter)
adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
}
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
}
@@ -1770,13 +1716,6 @@ static void i40evf_watchdog_task(struct work_struct *work)
if (adapter->state == __I40EVF_RUNNING)
i40evf_request_stats(adapter);
watchdog_done:
- if (adapter->state == __I40EVF_RUNNING) {
- i40evf_irq_enable_queues(adapter, ~0);
- i40evf_fire_sw_int(adapter, 0xFF);
- } else {
- i40evf_fire_sw_int(adapter, 0x1);
- }
-
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
restart_watchdog:
if (adapter->state == __I40EVF_REMOVE)
@@ -1796,7 +1735,11 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)
adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
- if (netif_running(adapter->netdev)) {
+ /* We don't use netif_running() because it may be true prior to
+ * ndo_open() returning, so we can't assume it means all our open
+ * tasks have finished, since we're not holding the rtnl_lock here.
+ */
+ if (adapter->state == __I40EVF_RUNNING) {
set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
netif_carrier_off(adapter->netdev);
netif_tx_disable(adapter->netdev);
@@ -1808,6 +1751,8 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)
i40evf_free_all_rx_resources(adapter);
}
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
/* Delete all of the filters, both MAC and VLAN. */
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
list_del(&f->list);
@@ -1819,6 +1764,8 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)
kfree(fv);
}
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
i40evf_free_misc_irq(adapter);
i40evf_reset_interrupt_capability(adapter);
i40evf_free_queues(adapter);
@@ -1854,6 +1801,7 @@ static void i40evf_reset_task(struct work_struct *work)
struct i40evf_mac_filter *f;
u32 reg_val;
int i = 0, err;
+ bool running;
while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK,
&adapter->crit_section))
@@ -1913,7 +1861,13 @@ static void i40evf_reset_task(struct work_struct *work)
}
continue_reset:
- if (netif_running(netdev)) {
+ /* We don't use netif_running() because it may be true prior to
+ * ndo_open() returning, so we can't assume it means all our open
+ * tasks have finished, since we're not holding the rtnl_lock here.
+ */
+ running = (adapter->state == __I40EVF_RUNNING);
+
+ if (running) {
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
adapter->link_up = false;
@@ -1948,6 +1902,8 @@ continue_reset:
adapter->aq_required |= I40EVF_FLAG_AQ_GET_CONFIG;
adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
/* re-add all MAC filters */
list_for_each_entry(f, &adapter->mac_filter_list, list) {
f->add = true;
@@ -1956,15 +1912,19 @@ continue_reset:
list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
vlf->add = true;
}
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
- clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
i40evf_misc_irq_enable(adapter);
mod_timer(&adapter->watchdog_timer, jiffies + 2);
- if (netif_running(adapter->netdev)) {
+ /* We were running when the reset started, so we need to restore some
+ * state here.
+ */
+ if (running) {
/* allocate transmit descriptors */
err = i40evf_setup_all_tx_resources(adapter);
if (err)
@@ -1993,9 +1953,13 @@ continue_reset:
adapter->state = __I40EVF_DOWN;
wake_up(&adapter->down_waitqueue);
}
+ clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
return;
reset_err:
+ clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
i40evf_close(netdev);
}
@@ -2239,8 +2203,14 @@ static int i40evf_open(struct net_device *netdev)
return -EIO;
}
- if (adapter->state != __I40EVF_DOWN)
- return -EBUSY;
+ while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section))
+ usleep_range(500, 1000);
+
+ if (adapter->state != __I40EVF_DOWN) {
+ err = -EBUSY;
+ goto err_unlock;
+ }
/* allocate transmit descriptors */
err = i40evf_setup_all_tx_resources(adapter);
@@ -2264,6 +2234,8 @@ static int i40evf_open(struct net_device *netdev)
i40evf_irq_enable(adapter, true);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
return 0;
err_req_irq:
@@ -2273,6 +2245,8 @@ err_setup_rx:
i40evf_free_all_rx_resources(adapter);
err_setup_tx:
i40evf_free_all_tx_resources(adapter);
+err_unlock:
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
return err;
}
@@ -2296,6 +2270,9 @@ static int i40evf_close(struct net_device *netdev)
if (adapter->state <= __I40EVF_DOWN_PENDING)
return 0;
+ while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section))
+ usleep_range(500, 1000);
set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
if (CLIENT_ENABLED(adapter))
@@ -2305,6 +2282,8 @@ static int i40evf_close(struct net_device *netdev)
adapter->state = __I40EVF_DOWN_PENDING;
i40evf_free_traffic_irqs(adapter);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
/* We explicitly don't free resources here because the hardware is
* still active and can DMA into memory. Resources are cleared in
* i40evf_virtchnl_completion() after we get confirmation from the PF
@@ -2943,6 +2922,8 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mutex_init(&hw->aq.asq_mutex);
mutex_init(&hw->aq.arq_mutex);
+ spin_lock_init(&adapter->mac_vlan_list_lock);
+
INIT_LIST_HEAD(&adapter->mac_filter_list);
INIT_LIST_HEAD(&adapter->vlan_filter_list);
@@ -2985,6 +2966,10 @@ static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state)
netif_device_detach(netdev);
+ while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section))
+ usleep_range(500, 1000);
+
if (netif_running(netdev)) {
rtnl_lock();
i40evf_down(adapter);
@@ -2993,6 +2978,8 @@ static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state)
i40evf_free_misc_irq(adapter);
i40evf_reset_interrupt_capability(adapter);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
retval = pci_save_state(pdev);
if (retval)
return retval;
@@ -3118,6 +3105,7 @@ static void i40evf_remove(struct pci_dev *pdev)
i40evf_free_all_rx_resources(adapter);
i40evf_free_queues(adapter);
kfree(adapter->vf_res);
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
/* If we got removed before an up/down sequence, we've got a filter
* hanging out there that we need to get rid of.
*/
@@ -3130,6 +3118,8 @@ static void i40evf_remove(struct pci_dev *pdev)
kfree(f);
}
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
free_netdev(netdev);
pci_disable_pcie_error_reporting(pdev);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 46c8b8a3907c..feb95b62a077 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -433,12 +433,16 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
adapter->current_op);
return;
}
+
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
list_for_each_entry(f, &adapter->mac_filter_list, list) {
if (f->add)
count++;
}
if (!count) {
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
}
adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR;
@@ -456,8 +460,10 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
}
veal = kzalloc(len, GFP_KERNEL);
- if (!veal)
+ if (!veal) {
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
+ }
veal->vsi_id = adapter->vsi_res->vsi_id;
veal->num_elements = count;
@@ -472,6 +478,9 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
}
if (!more)
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR,
(u8 *)veal, len);
kfree(veal);
@@ -498,12 +507,16 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
adapter->current_op);
return;
}
+
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
list_for_each_entry(f, &adapter->mac_filter_list, list) {
if (f->remove)
count++;
}
if (!count) {
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
}
adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR;
@@ -520,8 +533,10 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
more = true;
}
veal = kzalloc(len, GFP_KERNEL);
- if (!veal)
+ if (!veal) {
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
+ }
veal->vsi_id = adapter->vsi_res->vsi_id;
veal->num_elements = count;
@@ -537,6 +552,9 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
}
if (!more)
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR,
(u8 *)veal, len);
kfree(veal);
@@ -564,12 +582,15 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
return;
}
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
if (f->add)
count++;
}
if (!count) {
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
}
adapter->current_op = VIRTCHNL_OP_ADD_VLAN;
@@ -586,8 +607,10 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
more = true;
}
vvfl = kzalloc(len, GFP_KERNEL);
- if (!vvfl)
+ if (!vvfl) {
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
+ }
vvfl->vsi_id = adapter->vsi_res->vsi_id;
vvfl->num_elements = count;
@@ -602,6 +625,9 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
}
if (!more)
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
kfree(vvfl);
}
@@ -628,12 +654,15 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
return;
}
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
if (f->remove)
count++;
}
if (!count) {
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
}
adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
@@ -650,8 +679,10 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
more = true;
}
vvfl = kzalloc(len, GFP_KERNEL);
- if (!vvfl)
+ if (!vvfl) {
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
+ }
vvfl->vsi_id = adapter->vsi_res->vsi_id;
vvfl->num_elements = count;
@@ -667,6 +698,9 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
}
if (!more)
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
kfree(vvfl);
}
@@ -705,8 +739,10 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
}
if (!flags) {
- adapter->flags &= ~I40EVF_FLAG_PROMISC_ON;
- adapter->aq_required &= ~I40EVF_FLAG_AQ_RELEASE_PROMISC;
+ adapter->flags &= ~(I40EVF_FLAG_PROMISC_ON |
+ I40EVF_FLAG_ALLMULTI_ON);
+ adapter->aq_required &= ~(I40EVF_FLAG_AQ_RELEASE_PROMISC |
+ I40EVF_FLAG_AQ_RELEASE_ALLMULTI);
dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 8611763d6129..49ab0c7a9cd5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -333,7 +333,6 @@ struct ixgbe_ring {
struct net_device *netdev; /* netdev ring belongs to */
struct bpf_prog *xdp_prog;
struct device *dev; /* device for DMA mapping */
- struct ixgbe_fwd_adapter *l2_accel_priv;
void *desc; /* descriptor ring memory */
union {
struct ixgbe_tx_buffer *tx_buffer_info;
@@ -397,8 +396,7 @@ enum ixgbe_ring_f_enum {
#define MAX_XDP_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
#define IXGBE_MAX_L2A_QUEUES 4
#define IXGBE_BAD_L2A_QUEUE 3
-#define IXGBE_MAX_MACVLANS 31
-#define IXGBE_MAX_DCBMACVLANS 8
+#define IXGBE_MAX_MACVLANS 63
struct ixgbe_ring_feature {
u16 limit; /* upper limit on feature indices */
@@ -676,6 +674,7 @@ struct ixgbe_adapter {
struct ieee_ets *ixgbe_ieee_ets;
struct ixgbe_dcb_config dcb_cfg;
struct ixgbe_dcb_config temp_dcb_cfg;
+ u8 hw_tcs;
u8 dcb_set_bitmap;
u8 dcbx_cap;
enum ixgbe_fc_mode last_lfc_mode;
@@ -723,8 +722,7 @@ struct ixgbe_adapter {
u16 bridge_mode;
- u16 eeprom_verh;
- u16 eeprom_verl;
+ char eeprom_id[NVM_VER_SIZE];
u16 eeprom_cap;
u32 interrupt_event;
@@ -768,7 +766,8 @@ struct ixgbe_adapter {
#endif /*CONFIG_DEBUG_FS*/
u8 default_up;
- unsigned long fwd_bitmask; /* Bitmask indicating in use pools */
+ /* Bitmask indicating in use pools */
+ DECLARE_BITMAP(fwd_bitmask, IXGBE_MAX_MACVLANS + 1);
#define IXGBE_MAX_LINK_HANDLE 10
struct ixgbe_jump_table *jump_tables[IXGBE_MAX_LINK_HANDLE];
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 8a32eb7d47b9..a0ebd9ecf243 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -431,6 +431,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
/**
* ixgbe_start_mac_link_82598 - Configures MAC link settings
* @hw: pointer to hardware structure
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
*
* Configures link settings based on values in the ixgbe_hw struct.
* Restarts the link. Performs autonegotiation if needed.
@@ -1054,7 +1055,7 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
* ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
* @hw: pointer to hardware structure
* @byte_offset: byte offset at address 0xA2
- * @eeprom_data: value read
+ * @sff8472_data: value read
*
* Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
**/
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index d602637ccc40..4dfc81dbee4b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -221,7 +221,7 @@ static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
/**
* prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
* @hw: pointer to hardware structure
- * @reg_val: value to write to AUTOC
+ * @autoc: value to write to AUTOC
* @locked: bool to indicate whether the SW/FW lock was already taken by
* previous proc_autoc_read_82599.
*
@@ -1310,10 +1310,11 @@ do { \
/**
* ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
- * @stream: input bitstream to compute the hash on
+ * @input: input bitstream to compute the hash on
+ * @common: compressed common input dword
*
* This function is almost identical to the function above but contains
- * several optomizations such as unwinding all of the loops, letting the
+ * several optimizations such as unwinding all of the loops, letting the
* compiler work out all of the conditional ifs since the keys are static
* defines, and computing two keys at once since the hashed dword stream
* will be the same for both keys.
@@ -1446,7 +1447,7 @@ do { \
/**
* ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
- * @atr_input: input bitstream to compute the hash on
+ * @input: input bitstream to compute the hash on
* @input_mask: mask for the input bitstream
*
* This function serves two main purposes. First it applies the input_mask
@@ -2078,6 +2079,7 @@ reset_pipeline_out:
* ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to read
+ * @dev_addr: address to read from
* @data: value read
*
* Performs byte read operation to SFP module's EEPROM over I2C interface at
@@ -2131,6 +2133,7 @@ release_i2c_access:
* ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to write
+ * @dev_addr: address to write to
* @data: value to write
*
* Performs byte write operation to SFP module's EEPROM over I2C interface at
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 9bef255f6a18..7ac7ef9b37ff 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1613,6 +1613,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
/**
* ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
* @hw: pointer to hardware structure
+ * @count: number of bits to shift
**/
static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
{
@@ -1667,7 +1668,7 @@ static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
/**
* ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
* @hw: pointer to hardware structure
- * @eecd: EECD's current value
+ * @eec: EEC's current value
**/
static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
{
@@ -2037,7 +2038,7 @@ static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
/**
* ixgbe_set_mta - Set bit-vector in multicast table
* @hw: pointer to hardware structure
- * @hash_value: Multicast address hash value
+ * @mc_addr: Multicast address
*
* Sets the bit-vector in the multicast table.
**/
@@ -3086,6 +3087,8 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
* ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
* @hw: pointer to hardware structure
* @vlan: VLAN id to write to VLAN filter
+ * @vlvf_bypass: true to find vlanid only, false returns first empty slot if
+ * vlanid not found
*
* return the VLVF index where this VLAN id should be placed
*
@@ -3476,7 +3479,7 @@ void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
* ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
* @hw: pointer to hardware structure
* @enable: enable or disable switch for VLAN anti-spoofing
- * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
+ * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
*
**/
void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
@@ -4028,6 +4031,118 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
return 0;
}
+/**
+ * ixgbe_get_orom_version - Return option ROM from EEPROM
+ *
+ * @hw: pointer to hardware structure
+ * @nvm_ver: pointer to output structure
+ *
+ * if valid option ROM version, nvm_ver->or_valid set to true
+ * else nvm_ver->or_valid is false.
+ **/
+void ixgbe_get_orom_version(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver)
+{
+ u16 offset, eeprom_cfg_blkh, eeprom_cfg_blkl;
+
+ nvm_ver->or_valid = false;
+ /* Option Rom may or may not be present. Start with pointer */
+ hw->eeprom.ops.read(hw, NVM_OROM_OFFSET, &offset);
+
+ /* make sure offset is valid */
+ if (offset == 0x0 || offset == NVM_INVALID_PTR)
+ return;
+
+ hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_HI, &eeprom_cfg_blkh);
+ hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_LOW, &eeprom_cfg_blkl);
+
+ /* option rom exists and is valid */
+ if ((eeprom_cfg_blkl | eeprom_cfg_blkh) == 0x0 ||
+ eeprom_cfg_blkl == NVM_VER_INVALID ||
+ eeprom_cfg_blkh == NVM_VER_INVALID)
+ return;
+
+ nvm_ver->or_valid = true;
+ nvm_ver->or_major = eeprom_cfg_blkl >> NVM_OROM_SHIFT;
+ nvm_ver->or_build = (eeprom_cfg_blkl << NVM_OROM_SHIFT) |
+ (eeprom_cfg_blkh >> NVM_OROM_SHIFT);
+ nvm_ver->or_patch = eeprom_cfg_blkh & NVM_OROM_PATCH_MASK;
+}
+
+/**
+ * ixgbe_get_oem_prod_version Etrack ID from EEPROM
+ *
+ * @hw: pointer to hardware structure
+ * @nvm_ver: pointer to output structure
+ *
+ * if valid OEM product version, nvm_ver->oem_valid set to true
+ * else nvm_ver->oem_valid is false.
+ **/
+void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver)
+{
+ u16 rel_num, prod_ver, mod_len, cap, offset;
+
+ nvm_ver->oem_valid = false;
+ hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset);
+
+ /* Return is offset to OEM Product Version block is invalid */
+ if (offset == 0x0 && offset == NVM_INVALID_PTR)
+ return;
+
+ /* Read product version block */
+ hw->eeprom.ops.read(hw, offset, &mod_len);
+ hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_CAP_OFF, &cap);
+
+ /* Return if OEM product version block is invalid */
+ if (mod_len != NVM_OEM_PROD_VER_MOD_LEN ||
+ (cap & NVM_OEM_PROD_VER_CAP_MASK) != 0x0)
+ return;
+
+ hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_L, &prod_ver);
+ hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_H, &rel_num);
+
+ /* Return if version is invalid */
+ if ((rel_num | prod_ver) == 0x0 ||
+ rel_num == NVM_VER_INVALID || prod_ver == NVM_VER_INVALID)
+ return;
+
+ nvm_ver->oem_major = prod_ver >> NVM_VER_SHIFT;
+ nvm_ver->oem_minor = prod_ver & NVM_VER_MASK;
+ nvm_ver->oem_release = rel_num;
+ nvm_ver->oem_valid = true;
+}
+
+/**
+ * ixgbe_get_etk_id - Return Etrack ID from EEPROM
+ *
+ * @hw: pointer to hardware structure
+ * @nvm_ver: pointer to output structure
+ *
+ * word read errors will return 0xFFFF
+ **/
+void ixgbe_get_etk_id(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver)
+{
+ u16 etk_id_l, etk_id_h;
+
+ if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_LOW, &etk_id_l))
+ etk_id_l = NVM_VER_INVALID;
+ if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_HI, &etk_id_h))
+ etk_id_h = NVM_VER_INVALID;
+
+ /* The word order for the version format is determined by high order
+ * word bit 15.
+ */
+ if ((etk_id_h & NVM_ETK_VALID) == 0) {
+ nvm_ver->etk_id = etk_id_h;
+ nvm_ver->etk_id |= (etk_id_l << NVM_ETK_SHIFT);
+ } else {
+ nvm_ver->etk_id = etk_id_l;
+ nvm_ver->etk_id |= (etk_id_h << NVM_ETK_SHIFT);
+ }
+}
+
void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
{
u32 rxctrl;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index a01409e2e06c..4d4c02366cb3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -139,6 +139,12 @@ extern const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT];
s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
+void ixgbe_get_etk_id(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver);
+void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver);
+void ixgbe_get_orom_version(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver);
void ixgbe_disable_rx_generic(struct ixgbe_hw *hw);
void ixgbe_enable_rx_generic(struct ixgbe_hw *hw);
s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index 072ef3b5fc61..aaea8282bfd2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -39,6 +39,10 @@
* are the smallest unit programmable into the underlying
* hardware. The IEEE 802.1Qaz specification do not use bandwidth
* groups so this is much simplified from the CEE case.
+ * @bw: bandwidth index by traffic class
+ * @refill: refill credits index by traffic class
+ * @max: max credits by traffic class
+ * @max_frame: maximum frame size
*/
static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill,
__u16 *max, int max_frame)
@@ -72,8 +76,10 @@ static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill,
/**
* ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits
- * @ixgbe_dcb_config: Struct containing DCB settings.
- * @direction: Configuring either Tx or Rx.
+ * @hw: pointer to hardware structure
+ * @dcb_config: Struct containing DCB settings
+ * @max_frame: Maximum frame size
+ * @direction: Configuring either Tx or Rx
*
* This function calculates the credits allocated to each traffic class.
* It should be called only after the rules are checked by
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index b79e93a5b699..f94c7e82a30b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -34,7 +34,9 @@
/**
* ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @prio_type: priority type indexed by traffic class
*
* Configure Rx Data Arbiter and credits for each traffic class.
*/
@@ -91,7 +93,10 @@ s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
/**
* ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
*
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
@@ -137,7 +142,10 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
/**
* ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
*
* Configure Tx Data Arbiter and credits for each traffic class.
*/
@@ -184,7 +192,7 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
/**
* ixgbe_dcb_config_pfc_82598 - Config priority flow control
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @pfc_en: enabled pfc bitmask
*
* Configure Priority Flow Control for each traffic class.
*/
@@ -269,7 +277,11 @@ static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
/**
* ixgbe_dcb_hw_config_82598 - Config and enable DCB
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @pfc_en: enabled pfc bitmask
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
*
* Configure dcb settings and enable dcb mode.
*/
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index 1011d644978f..1eed6811e914 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -38,6 +38,7 @@
* @max: max credits index by traffic class
* @bwg_id: bandwidth grouping indexed by traffic class
* @prio_type: priority type indexed by traffic class
+ * @prio_tc: priority to tc assignments indexed by priority
*
* Configure Rx Packet Arbiter and credits for each traffic class.
*/
@@ -148,6 +149,7 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
* @max: max credits index by traffic class
* @bwg_id: bandwidth grouping indexed by traffic class
* @prio_type: priority type indexed by traffic class
+ * @prio_tc: priority to tc assignments indexed by priority
*
* Configure Tx Packet Arbiter and credits for each traffic class.
*/
@@ -344,11 +346,12 @@ static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
/**
* ixgbe_dcb_hw_config_82599 - Configure and enable DCB
* @hw: pointer to hardware structure
+ * @pfc_en: enabled pfc bitmask
* @refill: refill credits index by traffic class
* @max: max credits index by traffic class
* @bwg_id: bandwidth grouping indexed by traffic class
* @prio_type: priority type indexed by traffic class
- * @pfc_en: enabled pfc bitmask
+ * @prio_tc: priority to tc assignments indexed by priority
*
* Configure dcb settings and enable dcb mode.
*/
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 78c52375acc6..b33f3f87e4b1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -571,7 +571,7 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
if (max_tc > adapter->dcb_cfg.num_tcs.pg_tcs)
return -EINVAL;
- if (max_tc != netdev_get_num_tc(dev)) {
+ if (max_tc != adapter->hw_tcs) {
err = ixgbe_setup_tc(dev, max_tc);
if (err)
return err;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
index 5e2c1e35e517..ad54080488ee 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -249,7 +249,7 @@ void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter)
/**
* ixgbe_dbg_adapter_exit - clear out the adapter's debugfs entries
- * @pf: the pf that is stopping
+ * @adapter: the adapter that is exiting
**/
void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter)
{
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 0aaf70b3cfcd..f064099733b6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1014,16 +1014,13 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- u32 nvm_track_id;
strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, ixgbe_driver_version,
sizeof(drvinfo->version));
- nvm_track_id = (adapter->eeprom_verh << 16) |
- adapter->eeprom_verl;
- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x",
- nvm_track_id);
+ strlcpy(drvinfo->fw_version, adapter->eeprom_id,
+ sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
@@ -3120,7 +3117,7 @@ static int ixgbe_get_ts_info(struct net_device *dev,
static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
{
unsigned int max_combined;
- u8 tcs = netdev_get_num_tc(adapter->netdev);
+ u8 tcs = adapter->hw_tcs;
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
/* We only support one q_vector without MSI-X */
@@ -3177,7 +3174,7 @@ static void ixgbe_get_channels(struct net_device *dev,
return;
/* same thing goes for being DCB enabled */
- if (netdev_get_num_tc(dev) > 1)
+ if (adapter->hw_tcs > 1)
return;
/* if ATR is disabled we can exit */
@@ -3223,7 +3220,7 @@ static int ixgbe_set_channels(struct net_device *dev,
#endif
/* use setup TC to update any traffic class queue mapping */
- return ixgbe_setup_tc(dev, netdev_get_num_tc(dev));
+ return ixgbe_setup_tc(dev, adapter->hw_tcs);
}
static int ixgbe_get_module_info(struct net_device *dev,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index a23c2b5411a0..7a09a40e4472 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -150,6 +150,7 @@ skip_ddpinv:
* @xid: the exchange id requesting ddp
* @sgl: the scatter-gather list for this request
* @sgc: the number of scatter-gather items
+ * @target_mode: 1 to setup target mode, 0 to setup initiator mode
*
* Returns : 1 for success and 0 for no ddp
*/
@@ -1034,11 +1035,8 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
ixgbe_driver_name,
ixgbe_driver_version);
/* Firmware Version */
- snprintf(info->firmware_version,
- sizeof(info->firmware_version),
- "0x%08x",
- (adapter->eeprom_verh << 16) |
- adapter->eeprom_verl);
+ strlcpy(info->firmware_version, adapter->eeprom_id,
+ sizeof(info->firmware_version));
/* Model */
if (hw->mac.type == ixgbe_mac_82599EB) {
@@ -1066,7 +1064,7 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
/**
* ixgbe_fcoe_get_tc - get the current TC that fcoe is mapped to
- * @adapter - pointer to the device adapter structure
+ * @adapter: pointer to the device adapter structure
*
* Return : TC that FCoE is mapped to
*/
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 8e2a957aca18..b3c282d09b18 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -46,8 +46,8 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
#endif /* IXGBE_FCOE */
struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
int i;
- u16 reg_idx;
- u8 tcs = netdev_get_num_tc(adapter->netdev);
+ u16 reg_idx, pool;
+ u8 tcs = adapter->hw_tcs;
/* verify we have DCB queueing enabled before proceeding */
if (tcs <= 1)
@@ -58,12 +58,16 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
return false;
/* start at VMDq register offset for SR-IOV enabled setups */
+ pool = 0;
reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
- for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
+ for (i = 0, pool = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
/* If we are greater than indices move to next pool */
- if ((reg_idx & ~vmdq->mask) >= tcs)
+ if ((reg_idx & ~vmdq->mask) >= tcs) {
+ pool++;
reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+ }
adapter->rx_ring[i]->reg_idx = reg_idx;
+ adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
}
reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
@@ -92,6 +96,7 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
for (i = fcoe->offset; i < adapter->num_rx_queues; i++) {
reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
adapter->rx_ring[i]->reg_idx = reg_idx;
+ adapter->rx_ring[i]->netdev = adapter->netdev;
reg_idx++;
}
@@ -111,9 +116,8 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
unsigned int *tx, unsigned int *rx)
{
- struct net_device *dev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
- u8 num_tcs = netdev_get_num_tc(dev);
+ u8 num_tcs = adapter->hw_tcs;
*tx = 0;
*rx = 0;
@@ -168,10 +172,9 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
**/
static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
{
- struct net_device *dev = adapter->netdev;
+ u8 num_tcs = adapter->hw_tcs;
unsigned int tx_idx, rx_idx;
int tc, offset, rss_i, i;
- u8 num_tcs = netdev_get_num_tc(dev);
/* verify we have DCB queueing enabled before proceeding */
if (num_tcs <= 1)
@@ -184,6 +187,7 @@ static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
adapter->tx_ring[offset + i]->reg_idx = tx_idx;
adapter->rx_ring[offset + i]->reg_idx = rx_idx;
+ adapter->rx_ring[offset + i]->netdev = adapter->netdev;
adapter->tx_ring[offset + i]->dcb_tc = tc;
adapter->rx_ring[offset + i]->dcb_tc = tc;
}
@@ -208,14 +212,15 @@ static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
#endif /* IXGBE_FCOE */
struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];
+ u16 reg_idx, pool;
int i;
- u16 reg_idx;
/* only proceed if VMDq is enabled */
if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
return false;
/* start at VMDq register offset for SR-IOV enabled setups */
+ pool = 0;
reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
#ifdef IXGBE_FCOE
@@ -224,15 +229,20 @@ static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
break;
#endif
/* If we are greater than indices move to next pool */
- if ((reg_idx & ~vmdq->mask) >= rss->indices)
+ if ((reg_idx & ~vmdq->mask) >= rss->indices) {
+ pool++;
reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+ }
adapter->rx_ring[i]->reg_idx = reg_idx;
+ adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
}
#ifdef IXGBE_FCOE
/* FCoE uses a linear block of queues so just assigning 1:1 */
- for (; i < adapter->num_rx_queues; i++, reg_idx++)
+ for (; i < adapter->num_rx_queues; i++, reg_idx++) {
adapter->rx_ring[i]->reg_idx = reg_idx;
+ adapter->rx_ring[i]->netdev = adapter->netdev;
+ }
#endif
reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
@@ -269,8 +279,10 @@ static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
{
int i, reg_idx;
- for (i = 0; i < adapter->num_rx_queues; i++)
+ for (i = 0; i < adapter->num_rx_queues; i++) {
adapter->rx_ring[i]->reg_idx = i;
+ adapter->rx_ring[i]->netdev = adapter->netdev;
+ }
for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++)
adapter->tx_ring[i]->reg_idx = reg_idx;
for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++)
@@ -340,7 +352,7 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
#ifdef IXGBE_FCOE
u16 fcoe_i = 0;
#endif
- u8 tcs = netdev_get_num_tc(adapter->netdev);
+ u8 tcs = adapter->hw_tcs;
/* verify we have DCB queueing enabled before proceeding */
if (tcs <= 1)
@@ -350,6 +362,9 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
return false;
+ /* limit VMDq instances on the PF by number of Tx queues */
+ vmdq_i = min_t(u16, vmdq_i, MAX_TX_QUEUES / tcs);
+
/* Add starting offset to total pool count */
vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
@@ -437,7 +452,7 @@ static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
int tcs;
/* Map queue offset and counts onto allocated tx queues */
- tcs = netdev_get_num_tc(dev);
+ tcs = adapter->hw_tcs;
/* verify we have DCB queueing enabled before proceeding */
if (tcs <= 1)
@@ -512,12 +527,14 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
#ifdef IXGBE_FCOE
u16 fcoe_i = 0;
#endif
- bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
/* only proceed if SR-IOV is enabled */
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
return false;
+ /* limit l2fwd RSS based on total Tx queue limit */
+ rss_i = min_t(u16, rss_i, MAX_TX_QUEUES / vmdq_i);
+
/* Add starting offset to total pool count */
vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
@@ -525,7 +542,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
/* 64 pool mode with 2 queues per pool */
- if ((vmdq_i > 32) || (vmdq_i > 16 && pools)) {
+ if (vmdq_i > 32) {
vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
rss_m = IXGBE_RSS_2Q_MASK;
rss_i = min_t(u16, rss_i, 2);
@@ -602,6 +619,10 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
}
#endif
+ /* populate TC0 for use by pool 0 */
+ netdev_set_tc_queue(adapter->netdev, 0,
+ adapter->num_rx_queues_per_pool, 0);
+
return true;
}
@@ -701,7 +722,7 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
adapter->num_rx_queues = 1;
adapter->num_tx_queues = 1;
adapter->num_xdp_queues = 0;
- adapter->num_rx_pools = adapter->num_rx_queues;
+ adapter->num_rx_pools = 1;
adapter->num_rx_queues_per_pool = 1;
#ifdef CONFIG_IXGBE_DCB
@@ -834,7 +855,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
int node = NUMA_NO_NODE;
int cpu = -1;
int ring_count, size;
- u8 tcs = netdev_get_num_tc(adapter->netdev);
+ u8 tcs = adapter->hw_tcs;
ring_count = txr_count + rxr_count + xdp_count;
size = sizeof(struct ixgbe_q_vector) +
@@ -917,11 +938,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
/* apply Tx specific ring traits */
ring->count = adapter->tx_ring_count;
- if (adapter->num_rx_pools > 1)
- ring->queue_index =
- txr_idx % adapter->num_rx_queues_per_pool;
- else
- ring->queue_index = txr_idx;
+ ring->queue_index = txr_idx;
/* assign ring to adapter */
adapter->tx_ring[txr_idx] = ring;
@@ -991,11 +1008,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
#endif /* IXGBE_FCOE */
/* apply Rx specific ring traits */
ring->count = adapter->rx_ring_count;
- if (adapter->num_rx_pools > 1)
- ring->queue_index =
- rxr_idx % adapter->num_rx_queues_per_pool;
- else
- ring->queue_index = rxr_idx;
+ ring->queue_index = rxr_idx;
/* assign ring to adapter */
adapter->rx_ring[rxr_idx] = ring;
@@ -1171,7 +1184,7 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
*/
/* Disable DCB unless we only have a single traffic class */
- if (netdev_get_num_tc(adapter->netdev) > 1) {
+ if (adapter->hw_tcs > 1) {
e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n");
netdev_reset_tc(adapter->netdev);
@@ -1183,6 +1196,7 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
adapter->dcb_cfg.pfc_mode_enable = false;
}
+ adapter->hw_tcs = 0;
adapter->dcb_cfg.num_tcs.pg_tcs = 1;
adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 95aba975b391..4f28621b76e1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -192,6 +192,13 @@ static struct workqueue_struct *ixgbe_wq;
static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *);
+static const struct net_device_ops ixgbe_netdev_ops;
+
+static bool netif_is_ixgbe(struct net_device *dev)
+{
+ return dev && (dev->netdev_ops == &ixgbe_netdev_ops);
+}
+
static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
u32 reg, u16 *value)
{
@@ -1064,24 +1071,12 @@ static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
{
- struct ixgbe_adapter *adapter;
- struct ixgbe_hw *hw;
- u32 head, tail;
+ unsigned int head, tail;
- if (ring->l2_accel_priv)
- adapter = ring->l2_accel_priv->real_adapter;
- else
- adapter = netdev_priv(ring->netdev);
-
- hw = &adapter->hw;
- head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
- tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
+ head = ring->next_to_clean;
+ tail = ring->next_to_use;
- if (head != tail)
- return (head < tail) ?
- tail - head : (tail + ring->count - head);
-
- return 0;
+ return ((head <= tail) ? tail : tail + ring->count) - head;
}
static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
@@ -1133,6 +1128,9 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
/**
* ixgbe_tx_maxrate - callback to set the maximum per-queue bitrate
+ * @netdev: network interface device structure
+ * @queue_index: Tx queue to set
+ * @maxrate: desired maximum transmit bitrate
**/
static int ixgbe_tx_maxrate(struct net_device *netdev,
int queue_index, u32 maxrate)
@@ -1754,9 +1752,15 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
}
- skb_record_rx_queue(skb, rx_ring->queue_index);
-
skb->protocol = eth_type_trans(skb, dev);
+
+ /* record Rx queue, or update MACVLAN statistics */
+ if (netif_is_ixgbe(dev))
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+ else
+ macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true,
+ (skb->pkt_type == PACKET_BROADCAST) ||
+ (skb->pkt_type == PACKET_MULTICAST));
}
static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
@@ -1921,10 +1925,13 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
if (IS_ERR(skb))
return true;
- /* verify that the packet does not have any known errors */
- if (unlikely(ixgbe_test_staterr(rx_desc,
- IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
- !(netdev->features & NETIF_F_RXALL))) {
+ /* Verify netdev is present, and that packet does not have any
+ * errors that would be unacceptable to the netdev.
+ */
+ if (!netdev ||
+ (unlikely(ixgbe_test_staterr(rx_desc,
+ IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
+ !(netdev->features & NETIF_F_RXALL)))) {
dev_kfree_skb_any(skb);
return true;
}
@@ -2021,8 +2028,8 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
* ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: buffer containing page to add
- * @rx_desc: descriptor containing length of buffer written by hardware
* @skb: sk_buff to place the data into
+ * @size: size of data in rx_buffer
*
* This function will add the data contained in rx_buffer->page to the skb.
* This is done either through a direct copy if the data in the buffer is
@@ -2517,13 +2524,6 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
}
-enum latency_range {
- lowest_latency = 0,
- low_latency = 1,
- bulk_latency = 2,
- latency_invalid = 255
-};
-
/**
* ixgbe_update_itr - update the dynamic ITR value based on statistics
* @q_vector: structure containing interrupt and ring information
@@ -2536,8 +2536,6 @@ enum latency_range {
* based on theoretical maximum wire speed and thresholds were set based
* on testing data as well as attempting to minimize response time
* while increasing bulk throughput.
- * this functionality is controlled by the InterruptThrottleRate module
- * parameter (see ixgbe_param.c)
**/
static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring_container *ring_container)
@@ -3022,6 +3020,8 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
/**
* ixgbe_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
+ * @queues: enable irqs for queues
+ * @flush: flush register write
**/
static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
bool flush)
@@ -3477,6 +3477,7 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
/**
* ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
+ * @adapter: board private structure
*
**/
static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
@@ -3588,7 +3589,7 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 rttdcs, mtqc;
- u8 tcs = netdev_get_num_tc(adapter->netdev);
+ u8 tcs = adapter->hw_tcs;
if (hw->mac.type == ixgbe_mac_82598EB)
return;
@@ -3855,16 +3856,20 @@ static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter)
u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
struct ixgbe_hw *hw = &adapter->hw;
u32 vfreta = 0;
- unsigned int pf_pool = adapter->num_vfs;
/* Write redirection table to HW */
for (i = 0; i < reta_entries; i++) {
+ u16 pool = adapter->num_rx_pools;
+
vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8;
- if ((i & 3) == 3) {
- IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool),
+ if ((i & 3) != 3)
+ continue;
+
+ while (pool--)
+ IXGBE_WRITE_REG(hw,
+ IXGBE_PFVFRETA(i >> 2, VMDQ_P(pool)),
vfreta);
- vfreta = 0;
- }
+ vfreta = 0;
}
}
@@ -3901,13 +3906,17 @@ static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
- unsigned int pf_pool = adapter->num_vfs;
int i, j;
/* Fill out hash function seeds */
- for (i = 0; i < 10; i++)
- IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool),
- *(adapter->rss_key + i));
+ for (i = 0; i < 10; i++) {
+ u16 pool = adapter->num_rx_pools;
+
+ while (pool--)
+ IXGBE_WRITE_REG(hw,
+ IXGBE_PFVFRSSRK(i, VMDQ_P(pool)),
+ *(adapter->rss_key + i));
+ }
/* Fill out the redirection table */
for (i = 0, j = 0; i < 64; i++, j++) {
@@ -3935,7 +3944,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
if (adapter->ring_feature[RING_F_RSS].mask)
mrqc = IXGBE_MRQC_RSSEN;
} else {
- u8 tcs = netdev_get_num_tc(adapter->netdev);
+ u8 tcs = adapter->hw_tcs;
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
if (tcs > 4)
@@ -3973,7 +3982,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
if ((hw->mac.type >= ixgbe_mac_X550) &&
(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
- unsigned int pf_pool = adapter->num_vfs;
+ u16 pool = adapter->num_rx_pools;
/* Enable VF RSS mode */
mrqc |= IXGBE_MRQC_MULTIPLE_RSS;
@@ -3983,7 +3992,11 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
ixgbe_setup_vfreta(adapter);
vfmrqc = IXGBE_MRQC_RSSEN;
vfmrqc |= rss_field;
- IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), vfmrqc);
+
+ while (pool--)
+ IXGBE_WRITE_REG(hw,
+ IXGBE_PFVFMRQC(VMDQ_P(pool)),
+ vfmrqc);
} else {
ixgbe_setup_reta(adapter);
mrqc |= rss_field;
@@ -3993,8 +4006,8 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
/**
* ixgbe_configure_rscctl - enable RSC for the indicated ring
- * @adapter: address of board private structure
- * @index: index of ring to set
+ * @adapter: address of board private structure
+ * @ring: structure containing ring specific data
**/
static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring)
@@ -4146,7 +4159,7 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
int rss_i = adapter->ring_feature[RING_F_RSS].indices;
- u16 pool;
+ u16 pool = adapter->num_rx_pools;
/* PSRTYPE must be initialized in non 82598 adapters */
u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
@@ -4163,7 +4176,7 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
else if (rss_i > 1)
psrtype |= 1u << 29;
- for_each_set_bit(pool, &adapter->fwd_bitmask, 32)
+ while (pool--)
IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
}
@@ -4490,8 +4503,9 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
- if (ring->l2_accel_priv)
+ if (!netif_is_ixgbe(ring->netdev))
continue;
+
j = ring->reg_idx;
vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
vlnctrl &= ~IXGBE_RXDCTL_VME;
@@ -4527,8 +4541,9 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
- if (ring->l2_accel_priv)
+ if (!netif_is_ixgbe(ring->netdev))
continue;
+
j = ring->reg_idx;
vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
vlnctrl |= IXGBE_RXDCTL_VME;
@@ -4848,9 +4863,11 @@ int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
return -ENOMEM;
}
+
/**
* ixgbe_write_uc_addr_list - write unicast addresses to RAR table
* @netdev: network interface device structure
+ * @vfn: pool to associate with unicast addresses
*
* Writes unicast address list to the RAR table.
* Returns: -ENOMEM on failure/insufficient address space
@@ -5197,7 +5214,7 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- int num_tc = netdev_get_num_tc(adapter->netdev);
+ int num_tc = adapter->hw_tcs;
int i;
if (!num_tc)
@@ -5220,7 +5237,7 @@ static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
int hdrm;
- u8 tc = netdev_get_num_tc(adapter->netdev);
+ u8 tc = adapter->hw_tcs;
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
@@ -5279,29 +5296,6 @@ static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
}
-static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
-{
- struct ixgbe_adapter *adapter = vadapter->real_adapter;
- int rss_i = adapter->num_rx_queues_per_pool;
- struct ixgbe_hw *hw = &adapter->hw;
- u16 pool = vadapter->pool;
- u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
- IXGBE_PSRTYPE_UDPHDR |
- IXGBE_PSRTYPE_IPV4HDR |
- IXGBE_PSRTYPE_L2HDR |
- IXGBE_PSRTYPE_IPV6HDR;
-
- if (hw->mac.type == ixgbe_mac_82598EB)
- return;
-
- if (rss_i > 3)
- psrtype |= 2u << 29;
- else if (rss_i > 1)
- psrtype |= 1u << 29;
-
- IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
-}
-
/**
* ixgbe_clean_rx_ring - Free Rx Buffers per Queue
* @rx_ring: ring to free buffers from
@@ -5354,96 +5348,45 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
rx_ring->next_to_use = 0;
}
-static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
- struct ixgbe_ring *rx_ring)
-{
- struct ixgbe_adapter *adapter = vadapter->real_adapter;
- int index = rx_ring->queue_index + vadapter->rx_base_queue;
-
- /* shutdown specific queue receive and wait for dma to settle */
- ixgbe_disable_rx_queue(adapter, rx_ring);
- usleep_range(10000, 20000);
- ixgbe_irq_disable_queues(adapter, BIT_ULL(index));
- ixgbe_clean_rx_ring(rx_ring);
- rx_ring->l2_accel_priv = NULL;
-}
-
-static int ixgbe_fwd_ring_down(struct net_device *vdev,
- struct ixgbe_fwd_adapter *accel)
-{
- struct ixgbe_adapter *adapter = accel->real_adapter;
- unsigned int rxbase = accel->rx_base_queue;
- unsigned int txbase = accel->tx_base_queue;
- int i;
-
- netif_tx_stop_all_queues(vdev);
-
- for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
- ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
- adapter->rx_ring[rxbase + i]->netdev = adapter->netdev;
- }
-
- for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
- adapter->tx_ring[txbase + i]->l2_accel_priv = NULL;
- adapter->tx_ring[txbase + i]->netdev = adapter->netdev;
- }
-
-
- return 0;
-}
-
static int ixgbe_fwd_ring_up(struct net_device *vdev,
struct ixgbe_fwd_adapter *accel)
{
struct ixgbe_adapter *adapter = accel->real_adapter;
- unsigned int rxbase, txbase, queues;
- int i, baseq, err = 0;
+ int i, baseq, err;
- if (!test_bit(accel->pool, &adapter->fwd_bitmask))
+ if (!test_bit(accel->pool, adapter->fwd_bitmask))
return 0;
baseq = accel->pool * adapter->num_rx_queues_per_pool;
- netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
+ netdev_dbg(vdev, "pool %i:%i queues %i:%i\n",
accel->pool, adapter->num_rx_pools,
- baseq, baseq + adapter->num_rx_queues_per_pool,
- adapter->fwd_bitmask);
+ baseq, baseq + adapter->num_rx_queues_per_pool);
accel->netdev = vdev;
- accel->rx_base_queue = rxbase = baseq;
- accel->tx_base_queue = txbase = baseq;
+ accel->rx_base_queue = baseq;
+ accel->tx_base_queue = baseq;
for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
- ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
+ adapter->rx_ring[baseq + i]->netdev = vdev;
- for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
- adapter->rx_ring[rxbase + i]->netdev = vdev;
- adapter->rx_ring[rxbase + i]->l2_accel_priv = accel;
- ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]);
- }
+ /* Guarantee all rings are updated before we update the
+ * MAC address filter.
+ */
+ wmb();
- for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
- adapter->tx_ring[txbase + i]->netdev = vdev;
- adapter->tx_ring[txbase + i]->l2_accel_priv = accel;
+ /* ixgbe_add_mac_filter will return an index if it succeeds, so we
+ * need to only treat it as an error value if it is negative.
+ */
+ err = ixgbe_add_mac_filter(adapter, vdev->dev_addr,
+ VMDQ_P(accel->pool));
+ if (err >= 0) {
+ ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter);
+ return 0;
}
- queues = min_t(unsigned int,
- adapter->num_rx_queues_per_pool, vdev->num_tx_queues);
- err = netif_set_real_num_tx_queues(vdev, queues);
- if (err)
- goto fwd_queue_err;
-
- err = netif_set_real_num_rx_queues(vdev, queues);
- if (err)
- goto fwd_queue_err;
-
- if (is_valid_ether_addr(vdev->dev_addr))
- ixgbe_add_mac_filter(adapter, vdev->dev_addr, accel->pool);
+ for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
+ adapter->rx_ring[baseq + i]->netdev = NULL;
- ixgbe_fwd_psrtype(accel);
- ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter);
- return err;
-fwd_queue_err:
- ixgbe_fwd_ring_down(vdev, accel);
return err;
}
@@ -5919,21 +5862,6 @@ static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
spin_unlock(&adapter->fdir_perfect_lock);
}
-static int ixgbe_disable_macvlan(struct net_device *upper, void *data)
-{
- if (netif_is_macvlan(upper)) {
- struct macvlan_dev *vlan = netdev_priv(upper);
-
- if (vlan->fwd_priv) {
- netif_tx_stop_all_queues(upper);
- netif_carrier_off(upper);
- netif_tx_disable(upper);
- }
- }
-
- return 0;
-}
-
void ixgbe_down(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -5963,10 +5891,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
netif_carrier_off(netdev);
netif_tx_disable(netdev);
- /* disable any upper devices */
- netdev_walk_all_upper_dev_rcu(adapter->netdev,
- ixgbe_disable_macvlan, NULL);
-
ixgbe_irq_disable(adapter);
ixgbe_napi_disable_all(adapter);
@@ -6121,6 +6045,7 @@ static void ixgbe_init_dcb(struct ixgbe_adapter *adapter)
/**
* ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
* @adapter: board private structure to initialize
+ * @ii: pointer to ixgbe_info for device
*
* ixgbe_sw_init initializes the Adapter private data structure.
* Fields are initialized based on PCI device information and
@@ -6155,6 +6080,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
adapter->ring_feature[RING_F_FDIR].limit = fdir;
adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
+ adapter->ring_feature[RING_F_VMDQ].limit = 1;
#ifdef CONFIG_IXGBE_DCA
adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
#endif
@@ -6304,7 +6230,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
}
/* PF holds first pool slot */
- set_bit(0, &adapter->fwd_bitmask);
+ set_bit(0, adapter->fwd_bitmask);
set_bit(__IXGBE_DOWN, &adapter->state);
return 0;
@@ -6404,6 +6330,7 @@ err_setup_tx:
/**
* ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: pointer to ixgbe_adapter
* @rx_ring: rx descriptor ring (for a specific queue) to setup
*
* Returns 0 on success, negative on failure
@@ -6654,20 +6581,12 @@ int ixgbe_open(struct net_device *netdev)
goto err_req_irq;
/* Notify the stack of the actual queue counts. */
- if (adapter->num_rx_pools > 1)
- queues = adapter->num_rx_queues_per_pool;
- else
- queues = adapter->num_tx_queues;
-
+ queues = adapter->num_tx_queues;
err = netif_set_real_num_tx_queues(netdev, queues);
if (err)
goto err_set_queues;
- if (adapter->num_rx_pools > 1 &&
- adapter->num_rx_queues > IXGBE_MAX_L2A_QUEUES)
- queues = IXGBE_MAX_L2A_QUEUES;
- else
- queues = adapter->num_rx_queues;
+ queues = adapter->num_rx_queues;
err = netif_set_real_num_rx_queues(netdev, queues);
if (err)
goto err_set_queues;
@@ -6791,7 +6710,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
- u32 ctrl, fctrl;
+ u32 ctrl;
u32 wufc = adapter->wol;
#ifdef CONFIG_PM
int retval = 0;
@@ -6816,18 +6735,18 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
hw->mac.ops.stop_link_on_d3(hw);
if (wufc) {
+ u32 fctrl;
+
ixgbe_set_rx_mode(netdev);
/* enable the optics for 82599 SFP+ fiber as we can WoL */
if (hw->mac.ops.enable_tx_laser)
hw->mac.ops.enable_tx_laser(hw);
- /* turn on all-multi mode if wake on multicast is enabled */
- if (wufc & IXGBE_WUFC_MC) {
- fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
- fctrl |= IXGBE_FCTRL_MPE;
- IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
- }
+ /* enable the reception of multicast packets */
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ fctrl |= IXGBE_FCTRL_MPE;
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
ctrl |= IXGBE_CTRL_GIO_DIS;
@@ -7224,7 +7143,6 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
/**
* ixgbe_watchdog_update_link - update the link status
* @adapter: pointer to the device adapter structure
- * @link_speed: pointer to a u32 to store the link_speed
**/
static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
{
@@ -7281,18 +7199,6 @@ static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
#endif
}
-static int ixgbe_enable_macvlan(struct net_device *upper, void *data)
-{
- if (netif_is_macvlan(upper)) {
- struct macvlan_dev *vlan = netdev_priv(upper);
-
- if (vlan->fwd_priv)
- netif_tx_wake_all_queues(upper);
- }
-
- return 0;
-}
-
/**
* ixgbe_watchdog_link_is_up - update netif_carrier status and
* print link up message
@@ -7373,12 +7279,6 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
/* enable transmits */
netif_tx_wake_all_queues(adapter->netdev);
- /* enable any upper devices */
- rtnl_lock();
- netdev_walk_all_upper_dev_rcu(adapter->netdev,
- ixgbe_enable_macvlan, NULL);
- rtnl_unlock();
-
/* update the default user priority for VFs */
ixgbe_update_default_up(adapter);
@@ -7663,6 +7563,7 @@ sfp_out:
static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
+ u32 cap_speed;
u32 speed;
bool autoneg = false;
@@ -7675,16 +7576,14 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
- speed = hw->phy.autoneg_advertised;
- if ((!speed) && (hw->mac.ops.get_link_capabilities)) {
- hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg);
+ hw->mac.ops.get_link_capabilities(hw, &cap_speed, &autoneg);
- /* setup the highest link when no autoneg */
- if (!autoneg) {
- if (speed & IXGBE_LINK_SPEED_10GB_FULL)
- speed = IXGBE_LINK_SPEED_10GB_FULL;
- }
- }
+ /* advertise highest capable link speed */
+ if (!autoneg && (cap_speed & IXGBE_LINK_SPEED_10GB_FULL))
+ speed = IXGBE_LINK_SPEED_10GB_FULL;
+ else
+ speed = cap_speed & (IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL);
if (hw->mac.ops.setup_link)
hw->mac.ops.setup_link(hw, speed, true);
@@ -7696,7 +7595,7 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
/**
* ixgbe_service_timer - Timer Call-back
- * @data: pointer to adapter cast into an unsigned long
+ * @t: pointer to timer_list structure
**/
static void ixgbe_service_timer(struct timer_list *t)
{
@@ -8340,14 +8239,19 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
-#ifdef IXGBE_FCOE
struct ixgbe_adapter *adapter;
- struct ixgbe_ring_feature *f;
int txq;
+#ifdef IXGBE_FCOE
+ struct ixgbe_ring_feature *f;
#endif
- if (fwd_adapter)
- return skb->queue_mapping + fwd_adapter->tx_base_queue;
+ if (fwd_adapter) {
+ adapter = netdev_priv(dev);
+ txq = reciprocal_scale(skb_get_hash(skb),
+ adapter->num_rx_queues_per_pool);
+
+ return txq + fwd_adapter->tx_base_queue;
+ }
#ifdef IXGBE_FCOE
@@ -8679,7 +8583,7 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
/**
* ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
* netdev->dev_addrs
- * @netdev: network interface device structure
+ * @dev: network interface device structure
*
* Returns non-zero on failure
**/
@@ -8703,7 +8607,7 @@ static int ixgbe_add_sanmac_netdev(struct net_device *dev)
/**
* ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
* netdev->dev_addrs
- * @netdev: network interface device structure
+ * @dev: network interface device structure
*
* Returns non-zero on failure
**/
@@ -8870,14 +8774,13 @@ static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
/**
* ixgbe_setup_tc - configure net_device for multiple traffic classes
*
- * @netdev: net device to configure
+ * @dev: net device to configure
* @tc: number of traffic classes to enable
*/
int ixgbe_setup_tc(struct net_device *dev, u8 tc)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
- bool pools;
/* Hardware supports up to 8 traffic classes */
if (tc > adapter->dcb_cfg.num_tcs.pg_tcs)
@@ -8886,10 +8789,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS)
return -EINVAL;
- pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
- if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS)
- return -EBUSY;
-
/* Hardware has to reinitialize queues and interrupts to
* match packet buffer alignment. Unfortunately, the
* hardware is not flexible enough to do this dynamically.
@@ -8906,6 +8805,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
netdev_set_num_tc(dev, tc);
ixgbe_set_prio_tc_map(adapter);
+ adapter->hw_tcs = tc;
adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
@@ -8915,10 +8815,19 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
} else {
netdev_reset_tc(dev);
+ /* To support macvlan offload we have to use num_tc to
+ * restrict the queues that can be used by the device.
+ * By doing this we can avoid reporting a false number of
+ * queues.
+ */
+ if (!tc && adapter->num_rx_pools > 1)
+ netdev_set_num_tc(dev, 1);
+
if (adapter->hw.mac.type == ixgbe_mac_82598EB)
adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
+ adapter->hw_tcs = tc;
adapter->temp_dcb_cfg.pfc_mode_enable = false;
adapter->dcb_cfg.pfc_mode_enable = false;
@@ -9052,6 +8961,7 @@ static int get_macvlan_queue(struct net_device *upper, void *_data)
static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
u8 *queue, u64 *action)
{
+ struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
unsigned int num_vfs = adapter->num_vfs, vf;
struct upper_walk_data data;
struct net_device *upper;
@@ -9060,11 +8970,7 @@ static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
for (vf = 0; vf < num_vfs; ++vf) {
upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev);
if (upper->ifindex == ifindex) {
- if (adapter->num_rx_pools > 1)
- *queue = vf * 2;
- else
- *queue = vf * adapter->num_rx_queues_per_pool;
-
+ *queue = vf * __ALIGN_MASK(1, ~vmdq->mask);
*action = vf + 1;
*action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
return 0;
@@ -9454,7 +9360,7 @@ void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
struct net_device *netdev = adapter->netdev;
rtnl_lock();
- ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev));
+ ixgbe_setup_tc(netdev, adapter->hw_tcs);
rtnl_unlock();
}
@@ -9530,7 +9436,7 @@ static int ixgbe_set_features(struct net_device *netdev,
/* We cannot enable ATR if SR-IOV is enabled */
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED ||
/* We cannot enable ATR if we have 2 or more tcs */
- (netdev_get_num_tc(netdev) > 1) ||
+ (adapter->hw_tcs > 1) ||
/* We cannot enable ATR if RSS is disabled */
(adapter->ring_feature[RING_F_RSS].limit <= 1) ||
/* A sample rate of 0 indicates ATR disabled */
@@ -9705,8 +9611,8 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
/**
* ixgbe_configure_bridge_mode - set various bridge modes
- * @adapter - the private structure
- * @mode - requested bridge mode
+ * @adapter: the private structure
+ * @mode: requested bridge mode
*
* Configure some settings require for various bridge modes.
**/
@@ -9831,6 +9737,7 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
struct ixgbe_fwd_adapter *fwd_adapter = NULL;
struct ixgbe_adapter *adapter = netdev_priv(pdev);
int used_pools = adapter->num_vfs + adapter->num_rx_pools;
+ int tcs = adapter->hw_tcs ? : 1;
unsigned int limit;
int pool, err;
@@ -9841,24 +9748,8 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
return ERR_PTR(-EINVAL);
-#ifdef CONFIG_RPS
- if (vdev->num_rx_queues != vdev->num_tx_queues) {
- netdev_info(pdev, "%s: Only supports a single queue count for TX and RX\n",
- vdev->name);
- return ERR_PTR(-EINVAL);
- }
-#endif
- /* Check for hardware restriction on number of rx/tx queues */
- if (vdev->num_tx_queues > IXGBE_MAX_L2A_QUEUES ||
- vdev->num_tx_queues == IXGBE_BAD_L2A_QUEUE) {
- netdev_info(pdev,
- "%s: Supports RX/TX Queue counts 1,2, and 4\n",
- pdev->name);
- return ERR_PTR(-EINVAL);
- }
-
if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
- adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS - 1) ||
+ adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) ||
(adapter->num_rx_pools > IXGBE_MAX_MACVLANS))
return ERR_PTR(-EBUSY);
@@ -9866,53 +9757,68 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
if (!fwd_adapter)
return ERR_PTR(-ENOMEM);
- pool = find_first_zero_bit(&adapter->fwd_bitmask, 32);
- adapter->num_rx_pools++;
- set_bit(pool, &adapter->fwd_bitmask);
- limit = find_last_bit(&adapter->fwd_bitmask, 32);
+ pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
+ set_bit(pool, adapter->fwd_bitmask);
+ limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools + 1);
/* Enable VMDq flag so device will be set in VM mode */
adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED;
adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
- adapter->ring_feature[RING_F_RSS].limit = vdev->num_tx_queues;
- /* Force reinit of ring allocation with VMDQ enabled */
- err = ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
- if (err)
- goto fwd_add_err;
fwd_adapter->pool = pool;
fwd_adapter->real_adapter = adapter;
- if (netif_running(pdev)) {
+ /* Force reinit of ring allocation with VMDQ enabled */
+ err = ixgbe_setup_tc(pdev, adapter->hw_tcs);
+
+ if (!err && netif_running(pdev))
err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
- if (err)
- goto fwd_add_err;
- netif_tx_start_all_queues(vdev);
- }
- return fwd_adapter;
-fwd_add_err:
+ if (!err)
+ return fwd_adapter;
+
/* unwind counter and free adapter struct */
netdev_info(pdev,
"%s: dfwd hardware acceleration failed\n", vdev->name);
- clear_bit(pool, &adapter->fwd_bitmask);
- adapter->num_rx_pools--;
+ clear_bit(pool, adapter->fwd_bitmask);
kfree(fwd_adapter);
return ERR_PTR(err);
}
static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
{
- struct ixgbe_fwd_adapter *fwd_adapter = priv;
- struct ixgbe_adapter *adapter = fwd_adapter->real_adapter;
- unsigned int limit;
+ struct ixgbe_fwd_adapter *accel = priv;
+ struct ixgbe_adapter *adapter = accel->real_adapter;
+ unsigned int rxbase = accel->rx_base_queue;
+ unsigned int limit, i;
- clear_bit(fwd_adapter->pool, &adapter->fwd_bitmask);
- adapter->num_rx_pools--;
+ /* delete unicast filter associated with offloaded interface */
+ ixgbe_del_mac_filter(adapter, accel->netdev->dev_addr,
+ VMDQ_P(accel->pool));
- limit = find_last_bit(&adapter->fwd_bitmask, 32);
+ /* disable ability to receive packets for this pool */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(accel->pool), 0);
+
+ /* Allow remaining Rx packets to get flushed out of the
+ * Rx FIFO before we drop the netdev for the ring.
+ */
+ usleep_range(10000, 20000);
+
+ for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
+ struct ixgbe_ring *ring = adapter->rx_ring[rxbase + i];
+ struct ixgbe_q_vector *qv = ring->q_vector;
+
+ /* Make sure we aren't processing any packets and clear
+ * netdev to shut down the ring.
+ */
+ if (netif_running(adapter->netdev))
+ napi_synchronize(&qv->napi);
+ ring->netdev = NULL;
+ }
+
+ clear_bit(accel->pool, adapter->fwd_bitmask);
+ limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
- ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter);
/* go back to full RSS if we're done with our VMQs */
if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
@@ -9924,13 +9830,13 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
adapter->ring_feature[RING_F_RSS].limit = rss;
}
- ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
- netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
- fwd_adapter->pool, adapter->num_rx_pools,
- fwd_adapter->rx_base_queue,
- fwd_adapter->rx_base_queue + adapter->num_rx_queues_per_pool,
- adapter->fwd_bitmask);
- kfree(fwd_adapter);
+ ixgbe_setup_tc(pdev, adapter->hw_tcs);
+ netdev_dbg(pdev, "pool %i:%i queues %i:%i\n",
+ accel->pool, adapter->num_rx_pools,
+ accel->rx_base_queue,
+ accel->rx_base_queue +
+ adapter->num_rx_queues_per_pool);
+ kfree(accel);
}
#define IXGBE_MAX_MAC_HDR_LEN 127
@@ -9997,7 +9903,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
/* If transitioning XDP modes reconfigure rings */
if (!!prog != !!old_prog) {
- int err = ixgbe_setup_tc(dev, netdev_get_num_tc(dev));
+ int err = ixgbe_setup_tc(dev, adapter->hw_tcs);
if (err) {
rcu_assign_pointer(adapter->xdp_prog, old_prog);
@@ -10174,7 +10080,7 @@ static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
* ixgbe_wol_supported - Check whether device supports WoL
* @adapter: the adapter private structure
* @device_id: the device ID
- * @subdev_id: the subsystem device ID
+ * @subdevice_id: the subsystem device ID
*
* This function is used by probe and ethtool to determine
* which devices have WoL support
@@ -10243,6 +10149,41 @@ bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
}
/**
+ * ixgbe_set_fw_version - Set FW version
+ * @adapter: the adapter private structure
+ *
+ * This function is used by probe and ethtool to determine the FW version to
+ * format to display. The FW version is taken from the EEPROM/NVM.
+ */
+static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_nvm_version nvm_ver;
+
+ ixgbe_get_oem_prod_version(hw, &nvm_ver);
+ if (nvm_ver.oem_valid) {
+ snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+ "%x.%x.%x", nvm_ver.oem_major, nvm_ver.oem_minor,
+ nvm_ver.oem_release);
+ return;
+ }
+
+ ixgbe_get_etk_id(hw, &nvm_ver);
+ ixgbe_get_orom_version(hw, &nvm_ver);
+
+ if (nvm_ver.or_valid) {
+ snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+ "0x%08x, %d.%d.%d", nvm_ver.etk_id, nvm_ver.or_major,
+ nvm_ver.or_build, nvm_ver.or_patch);
+ return;
+ }
+
+ /* Set ETrack ID format */
+ snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+ "0x%08x", nvm_ver.etk_id);
+}
+
+/**
* ixgbe_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in ixgbe_pci_tbl
@@ -10578,8 +10519,7 @@ skip_sriov:
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
/* save off EEPROM version number */
- hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh);
- hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
+ ixgbe_set_fw_version(adapter);
/* pick up the PCI bus settings for reporting later */
if (ixgbe_pcie_from_parent(hw))
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 654a402f0e9e..91bde90f9265 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -378,7 +378,7 @@ static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
/**
* ixgbe_get_phy_type_from_id - Get the phy type
- * @hw: pointer to hardware structure
+ * @phy_id: hardware phy id
*
**/
static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
@@ -489,6 +489,7 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
* the SWFW lock
* @hw: pointer to hardware structure
* @reg_addr: 32 bit address of PHY register to read
+ * @device_type: 5 bit device type
* @phy_data: Pointer to read data from PHY register
**/
s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
@@ -564,6 +565,7 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
* using the SWFW lock - this function is needed in most cases
* @hw: pointer to hardware structure
* @reg_addr: 32 bit address of PHY register to read
+ * @device_type: 5 bit device type
* @phy_data: Pointer to read data from PHY register
**/
s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
@@ -763,6 +765,7 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
* ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities
* @hw: pointer to hardware structure
* @speed: new link speed
+ * @autoneg_wait_to_complete: unused
**/
s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
@@ -861,6 +864,8 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
/**
* ixgbe_check_phy_link_tnx - Determine link and speed status
* @hw: pointer to hardware structure
+ * @speed: link speed
+ * @link_up: status of link
*
* Reads the VS1 register to determine if link is up and the current speed for
* the PHY.
@@ -1667,7 +1672,7 @@ s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
* ixgbe_read_i2c_sff8472_generic - Reads 8 bit word over I2C interface
* @hw: pointer to hardware structure
* @byte_offset: byte offset at address 0xA2
- * @eeprom_data: value read
+ * @sff8472_data: value read
*
* Performs byte read operation to SFP module's SFF-8472 data over I2C
**/
@@ -1714,6 +1719,7 @@ static bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr)
* ixgbe_read_i2c_byte_generic_int - Reads 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to read
+ * @dev_addr: device address
* @data: value read
* @lock: true if to take and release semaphore
*
@@ -1804,6 +1810,7 @@ fail:
* ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to read
+ * @dev_addr: device address
* @data: value read
*
* Performs byte read operation to SFP module's EEPROM over I2C interface at
@@ -1820,6 +1827,7 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
* ixgbe_read_i2c_byte_generic_unlocked - Reads 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to read
+ * @dev_addr: device address
* @data: value read
*
* Performs byte read operation to SFP module's EEPROM over I2C interface at
@@ -1836,6 +1844,7 @@ s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
* ixgbe_write_i2c_byte_generic_int - Writes 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to write
+ * @dev_addr: device address
* @data: value to write
* @lock: true if to take and release semaphore
*
@@ -1904,6 +1913,7 @@ fail:
* ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to write
+ * @dev_addr: device address
* @data: value to write
*
* Performs byte write operation to SFP module's EEPROM over I2C interface at
@@ -1920,6 +1930,7 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
* ixgbe_write_i2c_byte_generic_unlocked - Writes 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to write
+ * @dev_addr: device address
* @data: value to write
*
* Performs byte write operation to SFP module's EEPROM over I2C interface at
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index ae312c45696a..f6cc9166082a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -166,7 +166,7 @@
/**
* ixgbe_ptp_setup_sdp_x540
- * @hw: the hardware private structure
+ * @adapter: private adapter structure
*
* this function enables or disables the clock out feature on SDP0 for
* the X540 device. It will create a 1second periodic output that can
@@ -299,7 +299,7 @@ static u64 ixgbe_ptp_read_82599(const struct cyclecounter *cc)
* ixgbe_ptp_convert_to_hwtstamp - convert register value to hw timestamp
* @adapter: private adapter structure
* @hwtstamp: stack timestamp structure
- * @systim: unsigned 64bit system time value
+ * @timestamp: unsigned 64bit system time value
*
* We need to convert the adapter's RX/TXSTMP registers into a hwtstamp value
* which can be used by the stack's ptp functions.
@@ -1015,7 +1015,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
/**
* ixgbe_ptp_set_ts_config - user entry point for timestamp mode
* @adapter: pointer to adapter struct
- * @ifreq: ioctl data
+ * @ifr: ioctl data
*
* Set hardware to requested mode. If unsupported, return an error with no
* changes. Otherwise, store the mode for future reference.
@@ -1338,7 +1338,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
/**
* ixgbe_ptp_suspend - stop PTP work items
- * @ adapter: pointer to adapter struct
+ * @adapter: pointer to adapter struct
*
* this function suspends PTP activity, and prevents more PTP work from being
* generated, but does not destroy the PTP clock device.
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 112d24c6c9ce..27a70a52f3c9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -78,12 +78,9 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
struct ixgbe_hw *hw = &adapter->hw;
int i;
- adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
-
/* Enable VMDq flag so device will be set in VM mode */
- adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED;
- if (!adapter->ring_feature[RING_F_VMDQ].limit)
- adapter->ring_feature[RING_F_VMDQ].limit = 1;
+ adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED |
+ IXGBE_FLAG_VMDQ_ENABLED;
/* Allocate memory for per VF control structures */
adapter->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage),
@@ -227,9 +224,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs)
int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
{
unsigned int num_vfs = adapter->num_vfs, vf;
- struct ixgbe_hw *hw = &adapter->hw;
- u32 gpie;
- u32 vmdctl;
int rss;
/* set num VFs to 0 to prevent access to vfinfo */
@@ -271,18 +265,6 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
pci_disable_sriov(adapter->pdev);
#endif
- /* turn off device IOV mode */
- IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 0);
- gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
- gpie &= ~IXGBE_GPIE_VTMODE_MASK;
- IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
-
- /* set default pool back to 0 */
- vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
- vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
- IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
- IXGBE_WRITE_FLUSH(hw);
-
/* Disable VMDq flag so device will be set in VM mode */
if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
@@ -305,10 +287,9 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
{
#ifdef CONFIG_PCI_IOV
struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
- int err = 0;
- u8 num_tc;
- int i;
int pre_existing_vfs = pci_num_vf(dev);
+ int err = 0, num_rx_pools, i, limit;
+ u8 num_tc;
if (pre_existing_vfs && pre_existing_vfs != num_vfs)
err = ixgbe_disable_sriov(adapter);
@@ -330,23 +311,15 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
* than we have available pools. The PCI bus driver already checks for
* other values out of range.
*/
- num_tc = netdev_get_num_tc(adapter->netdev);
-
- if (num_tc > 4) {
- if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_8TC) {
- e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_8TC);
- return -EPERM;
- }
- } else if ((num_tc > 1) && (num_tc <= 4)) {
- if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_4TC) {
- e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_4TC);
- return -EPERM;
- }
- } else {
- if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_1TC) {
- e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_1TC);
- return -EPERM;
- }
+ num_tc = adapter->hw_tcs;
+ num_rx_pools = adapter->num_rx_pools;
+ limit = (num_tc > 4) ? IXGBE_MAX_VFS_8TC :
+ (num_tc > 1) ? IXGBE_MAX_VFS_4TC : IXGBE_MAX_VFS_1TC;
+
+ if (num_vfs > (limit - num_rx_pools)) {
+ e_dev_err("Currently configured with %d TCs, and %d offloaded macvlans. Creating more than %d VFs is not allowed\n",
+ num_tc, num_rx_pools - 1, limit - num_rx_pools);
+ return -EPERM;
}
err = __ixgbe_enable_sriov(adapter, num_vfs);
@@ -378,13 +351,15 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
int err;
#ifdef CONFIG_PCI_IOV
u32 current_flags = adapter->flags;
+ int prev_num_vf = pci_num_vf(dev);
#endif
err = ixgbe_disable_sriov(adapter);
/* Only reinit if no error and state changed */
#ifdef CONFIG_PCI_IOV
- if (!err && current_flags != adapter->flags)
+ if (!err && (current_flags != adapter->flags ||
+ prev_num_vf != pci_num_vf(dev)))
ixgbe_sriov_reinit(adapter);
#endif
@@ -738,7 +713,7 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
- u8 num_tcs = netdev_get_num_tc(adapter->netdev);
+ u8 num_tcs = adapter->hw_tcs;
/* remove VLAN filters beloning to this VF */
ixgbe_clear_vf_vlans(adapter, vf);
@@ -946,7 +921,7 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
{
u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
- u8 tcs = netdev_get_num_tc(adapter->netdev);
+ u8 tcs = adapter->hw_tcs;
if (adapter->vfinfo[vf].pf_vlan || tcs) {
e_warn(drv,
@@ -1034,7 +1009,7 @@ static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter,
struct net_device *dev = adapter->netdev;
struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
unsigned int default_tc = 0;
- u8 num_tcs = netdev_get_num_tc(dev);
+ u8 num_tcs = adapter->hw_tcs;
/* verify the PF is supporting the correct APIs */
switch (adapter->vfinfo[vf].vf_api) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index ffa0ee5cd0f5..21eb79ae3c30 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -235,6 +235,45 @@ struct ixgbe_thermal_sensor_data {
struct ixgbe_thermal_diode_data sensor[IXGBE_MAX_SENSORS];
};
+#define NVM_OROM_OFFSET 0x17
+#define NVM_OROM_BLK_LOW 0x83
+#define NVM_OROM_BLK_HI 0x84
+#define NVM_OROM_PATCH_MASK 0xFF
+#define NVM_OROM_SHIFT 8
+
+#define NVM_VER_MASK 0x00FF /* version mask */
+#define NVM_VER_SHIFT 8 /* version bit shift */
+#define NVM_OEM_PROD_VER_PTR 0x1B /* OEM Product version block pointer */
+#define NVM_OEM_PROD_VER_CAP_OFF 0x1 /* OEM Product version format offset */
+#define NVM_OEM_PROD_VER_OFF_L 0x2 /* OEM Product version offset low */
+#define NVM_OEM_PROD_VER_OFF_H 0x3 /* OEM Product version offset high */
+#define NVM_OEM_PROD_VER_CAP_MASK 0xF /* OEM Product version cap mask */
+#define NVM_OEM_PROD_VER_MOD_LEN 0x3 /* OEM Product version module length */
+#define NVM_ETK_OFF_LOW 0x2D /* version low order word */
+#define NVM_ETK_OFF_HI 0x2E /* version high order word */
+#define NVM_ETK_SHIFT 16 /* high version word shift */
+#define NVM_VER_INVALID 0xFFFF
+#define NVM_ETK_VALID 0x8000
+#define NVM_INVALID_PTR 0xFFFF
+#define NVM_VER_SIZE 32 /* version sting size */
+
+struct ixgbe_nvm_version {
+ u32 etk_id;
+ u8 nvm_major;
+ u16 nvm_minor;
+ u8 nvm_id;
+
+ bool oem_valid;
+ u8 oem_major;
+ u8 oem_minor;
+ u16 oem_release;
+
+ bool or_valid;
+ u8 or_major;
+ u16 or_build;
+ u8 or_patch;
+};
+
/* Interrupt Registers */
#define IXGBE_EICR 0x00800
#define IXGBE_EICS 0x00808
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index cb7da5f9c4da..3bce26e77090 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1642,10 +1642,12 @@ static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
}
/**
- * ixgbe_setup_mac_link_sfp_x550em - Configure the KR PHY for SFP.
- * @hw: pointer to hardware structure
+ * ixgbe_setup_mac_link_sfp_x550em - Configure the KR PHY for SFP.
+ * @hw: pointer to hardware structure
+ * @speed: the link speed to force
+ * @autoneg_wait_to_complete: unused
*
- * Configures the extern PHY and the integrated KR PHY for SFP support.
+ * Configures the extern PHY and the integrated KR PHY for SFP support.
*/
static s32
ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
@@ -1737,6 +1739,8 @@ static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
/**
* ixgbe_setup_mac_link_sfp_n - Setup internal PHY for native SFP
* @hw: pointer to hardware structure
+ * @speed: link speed
+ * @autoneg_wait_to_complete: unused
*
* Configure the the integrated PHY for native SFP support.
*/
@@ -1784,6 +1788,8 @@ ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
/**
* ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP
* @hw: pointer to hardware structure
+ * @speed: link speed
+ * @autoneg_wait_to_complete: unused
*
* Configure the the integrated PHY for SFP support.
*/
@@ -1859,7 +1865,7 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
* ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg_wait_to_complete: true when waiting for completion is needed
+ * @autoneg_wait: true when waiting for completion is needed
*
* Setup internal/external PHY link speed based on link speed, then set
* external PHY auto advertised link speed.
@@ -1943,6 +1949,8 @@ static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
/**
* ixgbe_setup_sgmii - Set up link for sgmii
* @hw: pointer to hardware structure
+ * @speed: unused
+ * @autoneg_wait_to_complete: unused
*/
static s32
ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed,
@@ -2014,6 +2022,8 @@ ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed,
/**
* ixgbe_setup_sgmii_fw - Set up link for sgmii with firmware-controlled PHYs
* @hw: pointer to hardware structure
+ * @speed: the link speed to force
+ * @autoneg_wait: true when waiting for completion is needed
*/
static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait)
@@ -3735,6 +3745,7 @@ static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
* ixgbe_read_phy_reg_x550a - Reads specified PHY register
* @hw: pointer to hardware structure
* @reg_addr: 32 bit address of PHY register to read
+ * @device_type: 5 bit device type
* @phy_data: Pointer to read data from PHY register
*
* Reads a value from a specified PHY register using the SWFW lock and PHY
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 1f4a69134ade..ed5c3aea7939 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -554,7 +554,6 @@ static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
* ixgbevf_is_non_eop - process handling of non-EOP buffers
* @rx_ring: Rx ring being processed
* @rx_desc: Rx descriptor for current buffer
- * @skb: current socket buffer containing buffer in progress
*
* This function updates next to clean. If the buffer is an EOP buffer
* this function exits returning false, otherwise it will place the
@@ -1896,10 +1895,6 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
unsigned int flags = netdev->flags;
int xcast_mode;
- xcast_mode = (flags & IFF_ALLMULTI) ? IXGBEVF_XCAST_MODE_ALLMULTI :
- (flags & (IFF_BROADCAST | IFF_MULTICAST)) ?
- IXGBEVF_XCAST_MODE_MULTI : IXGBEVF_XCAST_MODE_NONE;
-
/* request the most inclusive mode we need */
if (flags & IFF_PROMISC)
xcast_mode = IXGBEVF_XCAST_MODE_PROMISC;
@@ -2745,7 +2740,7 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
/**
* ixgbevf_service_timer - Timer Call-back
- * @data: pointer to adapter cast into an unsigned long
+ * @t: pointer to timer_list struct
**/
static void ixgbevf_service_timer(struct timer_list *t)
{
@@ -2888,7 +2883,7 @@ static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter)
/**
* ixgbevf_watchdog_subtask - worker thread to bring link up
- * @work: pointer to work_struct containing our data
+ * @adapter: board private structure
**/
static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter)
{
@@ -4368,6 +4363,7 @@ static void __exit ixgbevf_exit_module(void)
/**
* ixgbevf_get_hw_dev_name - return device name string
* used by hardware layer to print debugging information
+ * @hw: pointer to private hardware struct
**/
char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
{
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 0c25006ce9af..64c93e8becc6 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -146,6 +146,7 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
/**
* Hyper-V variant; the VF/PF communication is through the PCI
* config space.
+ * @hw: pointer to private hardware struct
*/
static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
{
@@ -303,7 +304,7 @@ static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
/**
* ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents.
- * @adapter: pointer to the port handle
+ * @hw: pointer to hardware structure
* @reta: buffer to fill with RETA contents.
* @num_rx_queues: Number of Rx queues configured for this port
*
@@ -536,6 +537,8 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
/**
* Hyper-V variant - just a stub.
+ * @hw: unused
+ * @netdev: unused
*/
static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
struct net_device *netdev)
@@ -584,6 +587,8 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
/**
* Hyper-V variant - just a stub.
+ * @hw: unused
+ * @xcast_mode: unused
*/
static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
{
@@ -626,6 +631,10 @@ mbx_err:
/**
* Hyper-V variant - just a stub.
+ * @hw: unused
+ * @vlan: unused
+ * @vind: unused
+ * @vlan_on: unused
*/
static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on)
@@ -655,7 +664,7 @@ static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
* @hw: pointer to hardware structure
* @speed: pointer to link speed
* @link_up: true is link is up, false otherwise
- * @autoneg_wait_to_complete: true when waiting for completion is needed
+ * @autoneg_wait_to_complete: unused
*
* Reads the links register to determine if link is up and the current speed
**/
@@ -740,6 +749,10 @@ out:
/**
* Hyper-V variant; there is no mailbox communication.
+ * @hw: pointer to private hardware struct
+ * @speed: pointer to link speed
+ * @link_up: true is link is up, false otherwise
+ * @autoneg_wait_to_complete: unused
*/
static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 19b21b40ab07..c805769d92a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -14,7 +14,7 @@ mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \
fpga/ipsec.o
mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
- en_tx.o en_rx.o en_rx_am.o en_txrx.o en_stats.o vxlan.o \
+ en_tx.o en_rx.o en_dim.o en_txrx.o en_stats.o vxlan.o \
en_arfs.o en_fs_ethtool.o en_selftest.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 5299310f2481..7b988595ac5f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -47,6 +47,7 @@
#include <linux/rhashtable.h>
#include <net/switchdev.h>
#include <net/xdp.h>
+#include <linux/net_dim.h>
#include "wq.h"
#include "mlx5_core.h"
#include "en_stats.h"
@@ -227,12 +228,6 @@ enum mlx5e_priv_flag {
#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
#endif
-struct mlx5e_cq_moder {
- u16 usec;
- u16 pkts;
- u8 cq_period_mode;
-};
-
struct mlx5e_params {
u8 log_sq_size;
u8 rq_wq_type;
@@ -243,8 +238,8 @@ struct mlx5e_params {
u16 num_channels;
u8 num_tc;
bool rx_cqe_compress_def;
- struct mlx5e_cq_moder rx_cq_moderation;
- struct mlx5e_cq_moder tx_cq_moderation;
+ struct net_dim_cq_moder rx_cq_moderation;
+ struct net_dim_cq_moder tx_cq_moderation;
bool lro_en;
u32 lro_wqe_sz;
u16 tx_max_inline;
@@ -254,7 +249,7 @@ struct mlx5e_params {
u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
bool vlan_strip_disable;
bool scatter_fcs_en;
- bool rx_am_enabled;
+ bool rx_dim_enabled;
u32 lro_timeout;
u32 pflags;
struct bpf_prog *xdp_prog;
@@ -473,32 +468,6 @@ struct mlx5e_mpw_info {
u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE];
};
-struct mlx5e_rx_am_stats {
- int ppms; /* packets per msec */
- int bpms; /* bytes per msec */
- int epms; /* events per msec */
-};
-
-struct mlx5e_rx_am_sample {
- ktime_t time;
- u32 pkt_ctr;
- u32 byte_ctr;
- u16 event_ctr;
-};
-
-struct mlx5e_rx_am { /* Adaptive Moderation */
- u8 state;
- struct mlx5e_rx_am_stats prev_stats;
- struct mlx5e_rx_am_sample start_sample;
- struct work_struct work;
- u8 profile_ix;
- u8 mode;
- u8 tune_state;
- u8 steps_right;
- u8 steps_left;
- u8 tired;
-};
-
/* a single cache unit is capable to serve one napi call (for non-striding rq)
* or a MPWQE (for striding rq).
*/
@@ -559,7 +528,7 @@ struct mlx5e_rq {
unsigned long state;
int ix;
- struct mlx5e_rx_am am; /* Adaptive Moderation */
+ struct net_dim dim; /* Dynamic Interrupt Moderation */
/* XDP */
struct bpf_prog *xdp_prog;
@@ -659,6 +628,7 @@ struct mlx5e_tc_table {
struct rhashtable ht;
DECLARE_HASHTABLE(mod_hdr_tbl, 8);
+ DECLARE_HASHTABLE(hairpin_tbl, 8);
};
struct mlx5e_vlan_table {
@@ -864,10 +834,6 @@ void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
-void mlx5e_rx_am(struct mlx5e_rq *rq);
-void mlx5e_rx_am_work(struct work_struct *work);
-struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode);
-
void mlx5e_update_stats(struct mlx5e_priv *priv, bool full);
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
@@ -899,7 +865,7 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv);
void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
-void mlx5e_timestamp_set(struct mlx5e_priv *priv);
+void mlx5e_timestamp_init(struct mlx5e_priv *priv);
struct mlx5e_redirect_rqt_param {
bool is_rss;
@@ -1114,4 +1080,5 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
u16 max_channels);
u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev);
+void mlx5e_rx_dim_work(struct work_struct *work);
#endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 9bcf38f4123b..3d46ef48d5b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -922,8 +922,9 @@ static void mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv *priv,
static void mlx5e_ets_init(struct mlx5e_priv *priv)
{
- int i;
struct ieee_ets ets;
+ int err;
+ int i;
if (!MLX5_CAP_GEN(priv->mdev, ets))
return;
@@ -936,11 +937,16 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv)
ets.prio_tc[i] = i;
}
- /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
- ets.prio_tc[0] = 1;
- ets.prio_tc[1] = 0;
+ if (ets.ets_cap > 1) {
+ /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
+ ets.prio_tc[0] = 1;
+ ets.prio_tc[1] = 0;
+ }
- mlx5e_dcbnl_ieee_setets_core(priv, &ets);
+ err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
+ if (err)
+ netdev_err(priv->netdev,
+ "%s, Failed to init ETS: %d\n", __func__, err);
}
enum {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
new file mode 100644
index 000000000000..602851ab5b14
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/net_dim.h>
+#include "en.h"
+
+void mlx5e_rx_dim_work(struct work_struct *work)
+{
+ struct net_dim *dim = container_of(work, struct net_dim,
+ work);
+ struct mlx5e_rq *rq = container_of(dim, struct mlx5e_rq, dim);
+ struct net_dim_cq_moder cur_profile = net_dim_get_profile(dim->mode,
+ dim->profile_ix);
+
+ mlx5_core_modify_cq_moderation(rq->mdev, &rq->cq.mcq,
+ cur_profile.usec, cur_profile.pkts);
+
+ dim->state = NET_DIM_START_MEASURE;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 1554780d1810..2d1395015ab5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -207,8 +207,7 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
return;
mutex_lock(&priv->state_lock);
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_update_stats(priv, true);
+ mlx5e_update_stats(priv, true);
mutex_unlock(&priv->state_lock);
for (i = 0; i < mlx5e_num_stats_grps; i++)
@@ -465,7 +464,7 @@ int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
coal->rx_max_coalesced_frames = priv->channels.params.rx_cq_moderation.pkts;
coal->tx_coalesce_usecs = priv->channels.params.tx_cq_moderation.usec;
coal->tx_max_coalesced_frames = priv->channels.params.tx_cq_moderation.pkts;
- coal->use_adaptive_rx_coalesce = priv->channels.params.rx_am_enabled;
+ coal->use_adaptive_rx_coalesce = priv->channels.params.rx_dim_enabled;
return 0;
}
@@ -519,7 +518,7 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
new_channels.params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames;
new_channels.params.rx_cq_moderation.usec = coal->rx_coalesce_usecs;
new_channels.params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames;
- new_channels.params.rx_am_enabled = !!coal->use_adaptive_rx_coalesce;
+ new_channels.params.rx_dim_enabled = !!coal->use_adaptive_rx_coalesce;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
@@ -527,7 +526,7 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
}
/* we are opened */
- reset = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_am_enabled;
+ reset = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled;
if (!reset) {
mlx5e_set_priv_channels_coalesce(priv, coal);
priv->channels.params = new_channels.params;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 539bd1d24396..466a4e1244d7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -582,7 +582,8 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
goto err_rq_wq_destroy;
}
- if (xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix) < 0)
+ err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix);
+ if (err < 0)
goto err_rq_wq_destroy;
rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
@@ -677,8 +678,17 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
wqe->data.lkey = rq->mkey_be;
}
- INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
- rq->am.mode = params->rx_cq_moderation.cq_period_mode;
+ INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work);
+
+ switch (params->rx_cq_moderation.cq_period_mode) {
+ case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
+ rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE;
+ break;
+ case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
+ default:
+ rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ }
+
rq->page_cache.head = 0;
rq->page_cache.tail = 0;
@@ -925,7 +935,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
if (err)
goto err_destroy_rq;
- if (params->rx_am_enabled)
+ if (params->rx_dim_enabled)
c->rq.state |= BIT(MLX5E_RQ_STATE_AM);
return 0;
@@ -958,7 +968,7 @@ static void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
static void mlx5e_close_rq(struct mlx5e_rq *rq)
{
- cancel_work_sync(&rq->am.work);
+ cancel_work_sync(&rq->dim.work);
mlx5e_destroy_rq(rq);
mlx5e_free_rx_descs(rq);
mlx5e_free_rq(rq);
@@ -1571,7 +1581,7 @@ static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
}
static int mlx5e_open_cq(struct mlx5e_channel *c,
- struct mlx5e_cq_moder moder,
+ struct net_dim_cq_moder moder,
struct mlx5e_cq_param *param,
struct mlx5e_cq *cq)
{
@@ -1753,7 +1763,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_channel_param *cparam,
struct mlx5e_channel **cp)
{
- struct mlx5e_cq_moder icocq_moder = {0, 0};
+ struct net_dim_cq_moder icocq_moder = {0, 0};
struct net_device *netdev = priv->netdev;
int cpu = mlx5e_get_cpu(priv, ix);
struct mlx5e_channel *c;
@@ -2005,7 +2015,7 @@ static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
mlx5e_build_common_cq_param(priv, param);
- param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+ param->cq_period_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
}
static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
@@ -2675,7 +2685,7 @@ void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
netif_carrier_on(netdev);
}
-void mlx5e_timestamp_set(struct mlx5e_priv *priv)
+void mlx5e_timestamp_init(struct mlx5e_priv *priv)
{
priv->tstamp.tx_type = HWTSTAMP_TX_OFF;
priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
@@ -2696,7 +2706,6 @@ int mlx5e_open_locked(struct net_device *netdev)
mlx5e_activate_priv_channels(priv);
if (priv->profile->update_carrier)
priv->profile->update_carrier(priv);
- mlx5e_timestamp_set(priv);
if (priv->profile->update_stats)
queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
@@ -3228,12 +3237,12 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr)
return 0;
}
-#define MLX5E_SET_FEATURE(netdev, feature, enable) \
+#define MLX5E_SET_FEATURE(features, feature, enable) \
do { \
if (enable) \
- netdev->features |= feature; \
+ *features |= feature; \
else \
- netdev->features &= ~feature; \
+ *features &= ~feature; \
} while (0)
typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
@@ -3356,6 +3365,7 @@ static int set_feature_arfs(struct net_device *netdev, bool enable)
#endif
static int mlx5e_handle_feature(struct net_device *netdev,
+ netdev_features_t *features,
netdev_features_t wanted_features,
netdev_features_t feature,
mlx5e_feature_handler feature_handler)
@@ -3374,34 +3384,40 @@ static int mlx5e_handle_feature(struct net_device *netdev,
return err;
}
- MLX5E_SET_FEATURE(netdev, feature, enable);
+ MLX5E_SET_FEATURE(features, feature, enable);
return 0;
}
static int mlx5e_set_features(struct net_device *netdev,
netdev_features_t features)
{
+ netdev_features_t oper_features = netdev->features;
int err;
- err = mlx5e_handle_feature(netdev, features, NETIF_F_LRO,
- set_feature_lro);
- err |= mlx5e_handle_feature(netdev, features,
+ err = mlx5e_handle_feature(netdev, &oper_features, features,
+ NETIF_F_LRO, set_feature_lro);
+ err |= mlx5e_handle_feature(netdev, &oper_features, features,
NETIF_F_HW_VLAN_CTAG_FILTER,
set_feature_cvlan_filter);
- err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC,
- set_feature_tc_num_filters);
- err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL,
- set_feature_rx_all);
- err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXFCS,
- set_feature_rx_fcs);
- err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX,
- set_feature_rx_vlan);
+ err |= mlx5e_handle_feature(netdev, &oper_features, features,
+ NETIF_F_HW_TC, set_feature_tc_num_filters);
+ err |= mlx5e_handle_feature(netdev, &oper_features, features,
+ NETIF_F_RXALL, set_feature_rx_all);
+ err |= mlx5e_handle_feature(netdev, &oper_features, features,
+ NETIF_F_RXFCS, set_feature_rx_fcs);
+ err |= mlx5e_handle_feature(netdev, &oper_features, features,
+ NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
#ifdef CONFIG_RFS_ACCEL
- err |= mlx5e_handle_feature(netdev, features, NETIF_F_NTUPLE,
- set_feature_arfs);
+ err |= mlx5e_handle_feature(netdev, &oper_features, features,
+ NETIF_F_NTUPLE, set_feature_arfs);
#endif
- return err ? -EINVAL : 0;
+ if (err) {
+ netdev->features = oper_features;
+ return -EINVAL;
+ }
+
+ return 0;
}
static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
@@ -4047,9 +4063,18 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
params->rx_cq_moderation.usec =
MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
- if (params->rx_am_enabled)
- params->rx_cq_moderation =
- mlx5e_am_get_def_profile(cq_period_mode);
+ if (params->rx_dim_enabled) {
+ switch (cq_period_mode) {
+ case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
+ params->rx_cq_moderation =
+ net_dim_get_def_profile(NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE);
+ break;
+ case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
+ default:
+ params->rx_cq_moderation =
+ net_dim_get_def_profile(NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE);
+ }
+ }
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
params->rx_cq_moderation.cq_period_mode ==
@@ -4111,7 +4136,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
- params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
+ params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
mlx5e_set_tx_cq_mode_params(params, cq_period_mode);
@@ -4148,6 +4173,8 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
+
+ mlx5e_timestamp_init(priv);
}
static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index c6a77f8e99a4..10fa6a18fcf9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -884,7 +884,7 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST;
params->log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
- params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
+ params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
@@ -934,6 +934,8 @@ static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
mlx5e_build_rep_params(mdev, &priv->channels.params);
mlx5e_build_rep_netdev(netdev);
+
+ mlx5e_timestamp_init(priv);
}
static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 90354e676f0d..ff234dfefc27 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1175,7 +1175,9 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
u32 cqe_bcnt,
struct sk_buff *skb)
{
+ struct hwtstamp_config *tstamp;
struct net_device *netdev;
+ struct mlx5e_priv *priv;
char *pseudo_header;
u32 qpn;
u8 *dgid;
@@ -1194,6 +1196,9 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
return;
}
+ priv = mlx5i_epriv(netdev);
+ tstamp = &priv->tstamp;
+
g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET;
if ((!g) || dgid[0] != 0xff)
@@ -1214,7 +1219,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
- if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
+ if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
skb_hwtstamps(skb)->hwtstamp =
mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
deleted file mode 100644
index e401d9d245f3..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
+++ /dev/null
@@ -1,341 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "en.h"
-
-/* Adaptive moderation profiles */
-#define MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
-#define MLX5E_RX_AM_DEF_PROFILE_CQE 1
-#define MLX5E_RX_AM_DEF_PROFILE_EQE 1
-#define MLX5E_PARAMS_AM_NUM_PROFILES 5
-
-/* All profiles sizes must be MLX5E_PARAMS_AM_NUM_PROFILES */
-#define MLX5_AM_EQE_PROFILES { \
- {1, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {8, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {64, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {128, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {256, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
-}
-
-#define MLX5_AM_CQE_PROFILES { \
- {2, 256}, \
- {8, 128}, \
- {16, 64}, \
- {32, 64}, \
- {64, 64} \
-}
-
-static const struct mlx5e_cq_moder
-profile[MLX5_CQ_PERIOD_NUM_MODES][MLX5E_PARAMS_AM_NUM_PROFILES] = {
- MLX5_AM_EQE_PROFILES,
- MLX5_AM_CQE_PROFILES,
-};
-
-static inline struct mlx5e_cq_moder mlx5e_am_get_profile(u8 cq_period_mode, int ix)
-{
- struct mlx5e_cq_moder cq_moder;
-
- cq_moder = profile[cq_period_mode][ix];
- cq_moder.cq_period_mode = cq_period_mode;
- return cq_moder;
-}
-
-struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode)
-{
- int default_profile_ix;
-
- if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
- default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_CQE;
- else /* MLX5_CQ_PERIOD_MODE_START_FROM_EQE */
- default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_EQE;
-
- return mlx5e_am_get_profile(rx_cq_period_mode, default_profile_ix);
-}
-
-/* Adaptive moderation logic */
-enum {
- MLX5E_AM_START_MEASURE,
- MLX5E_AM_MEASURE_IN_PROGRESS,
- MLX5E_AM_APPLY_NEW_PROFILE,
-};
-
-enum {
- MLX5E_AM_PARKING_ON_TOP,
- MLX5E_AM_PARKING_TIRED,
- MLX5E_AM_GOING_RIGHT,
- MLX5E_AM_GOING_LEFT,
-};
-
-enum {
- MLX5E_AM_STATS_WORSE,
- MLX5E_AM_STATS_SAME,
- MLX5E_AM_STATS_BETTER,
-};
-
-enum {
- MLX5E_AM_STEPPED,
- MLX5E_AM_TOO_TIRED,
- MLX5E_AM_ON_EDGE,
-};
-
-static bool mlx5e_am_on_top(struct mlx5e_rx_am *am)
-{
- switch (am->tune_state) {
- case MLX5E_AM_PARKING_ON_TOP:
- case MLX5E_AM_PARKING_TIRED:
- return true;
- case MLX5E_AM_GOING_RIGHT:
- return (am->steps_left > 1) && (am->steps_right == 1);
- default: /* MLX5E_AM_GOING_LEFT */
- return (am->steps_right > 1) && (am->steps_left == 1);
- }
-}
-
-static void mlx5e_am_turn(struct mlx5e_rx_am *am)
-{
- switch (am->tune_state) {
- case MLX5E_AM_PARKING_ON_TOP:
- case MLX5E_AM_PARKING_TIRED:
- break;
- case MLX5E_AM_GOING_RIGHT:
- am->tune_state = MLX5E_AM_GOING_LEFT;
- am->steps_left = 0;
- break;
- case MLX5E_AM_GOING_LEFT:
- am->tune_state = MLX5E_AM_GOING_RIGHT;
- am->steps_right = 0;
- break;
- }
-}
-
-static int mlx5e_am_step(struct mlx5e_rx_am *am)
-{
- if (am->tired == (MLX5E_PARAMS_AM_NUM_PROFILES * 2))
- return MLX5E_AM_TOO_TIRED;
-
- switch (am->tune_state) {
- case MLX5E_AM_PARKING_ON_TOP:
- case MLX5E_AM_PARKING_TIRED:
- break;
- case MLX5E_AM_GOING_RIGHT:
- if (am->profile_ix == (MLX5E_PARAMS_AM_NUM_PROFILES - 1))
- return MLX5E_AM_ON_EDGE;
- am->profile_ix++;
- am->steps_right++;
- break;
- case MLX5E_AM_GOING_LEFT:
- if (am->profile_ix == 0)
- return MLX5E_AM_ON_EDGE;
- am->profile_ix--;
- am->steps_left++;
- break;
- }
-
- am->tired++;
- return MLX5E_AM_STEPPED;
-}
-
-static void mlx5e_am_park_on_top(struct mlx5e_rx_am *am)
-{
- am->steps_right = 0;
- am->steps_left = 0;
- am->tired = 0;
- am->tune_state = MLX5E_AM_PARKING_ON_TOP;
-}
-
-static void mlx5e_am_park_tired(struct mlx5e_rx_am *am)
-{
- am->steps_right = 0;
- am->steps_left = 0;
- am->tune_state = MLX5E_AM_PARKING_TIRED;
-}
-
-static void mlx5e_am_exit_parking(struct mlx5e_rx_am *am)
-{
- am->tune_state = am->profile_ix ? MLX5E_AM_GOING_LEFT :
- MLX5E_AM_GOING_RIGHT;
- mlx5e_am_step(am);
-}
-
-#define IS_SIGNIFICANT_DIFF(val, ref) \
- (((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */
-
-static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr,
- struct mlx5e_rx_am_stats *prev)
-{
- if (!prev->bpms)
- return curr->bpms ? MLX5E_AM_STATS_BETTER :
- MLX5E_AM_STATS_SAME;
-
- if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms))
- return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER :
- MLX5E_AM_STATS_WORSE;
-
- if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
- return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER :
- MLX5E_AM_STATS_WORSE;
-
- if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
- return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER :
- MLX5E_AM_STATS_WORSE;
-
- return MLX5E_AM_STATS_SAME;
-}
-
-static bool mlx5e_am_decision(struct mlx5e_rx_am_stats *curr_stats,
- struct mlx5e_rx_am *am)
-{
- int prev_state = am->tune_state;
- int prev_ix = am->profile_ix;
- int stats_res;
- int step_res;
-
- switch (am->tune_state) {
- case MLX5E_AM_PARKING_ON_TOP:
- stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats);
- if (stats_res != MLX5E_AM_STATS_SAME)
- mlx5e_am_exit_parking(am);
- break;
-
- case MLX5E_AM_PARKING_TIRED:
- am->tired--;
- if (!am->tired)
- mlx5e_am_exit_parking(am);
- break;
-
- case MLX5E_AM_GOING_RIGHT:
- case MLX5E_AM_GOING_LEFT:
- stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats);
- if (stats_res != MLX5E_AM_STATS_BETTER)
- mlx5e_am_turn(am);
-
- if (mlx5e_am_on_top(am)) {
- mlx5e_am_park_on_top(am);
- break;
- }
-
- step_res = mlx5e_am_step(am);
- switch (step_res) {
- case MLX5E_AM_ON_EDGE:
- mlx5e_am_park_on_top(am);
- break;
- case MLX5E_AM_TOO_TIRED:
- mlx5e_am_park_tired(am);
- break;
- }
-
- break;
- }
-
- if ((prev_state != MLX5E_AM_PARKING_ON_TOP) ||
- (am->tune_state != MLX5E_AM_PARKING_ON_TOP))
- am->prev_stats = *curr_stats;
-
- return am->profile_ix != prev_ix;
-}
-
-static void mlx5e_am_sample(struct mlx5e_rq *rq,
- struct mlx5e_rx_am_sample *s)
-{
- s->time = ktime_get();
- s->pkt_ctr = rq->stats.packets;
- s->byte_ctr = rq->stats.bytes;
- s->event_ctr = rq->cq.event_ctr;
-}
-
-#define MLX5E_AM_NEVENTS 64
-#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
-#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1))
-
-static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
- struct mlx5e_rx_am_sample *end,
- struct mlx5e_rx_am_stats *curr_stats)
-{
- /* u32 holds up to 71 minutes, should be enough */
- u32 delta_us = ktime_us_delta(end->time, start->time);
- u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr);
- u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr,
- start->byte_ctr);
-
- if (!delta_us)
- return;
-
- curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us);
- curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us);
- curr_stats->epms = DIV_ROUND_UP(MLX5E_AM_NEVENTS * USEC_PER_MSEC,
- delta_us);
-}
-
-void mlx5e_rx_am_work(struct work_struct *work)
-{
- struct mlx5e_rx_am *am = container_of(work, struct mlx5e_rx_am,
- work);
- struct mlx5e_rq *rq = container_of(am, struct mlx5e_rq, am);
- struct mlx5e_cq_moder cur_profile = profile[am->mode][am->profile_ix];
-
- mlx5_core_modify_cq_moderation(rq->mdev, &rq->cq.mcq,
- cur_profile.usec, cur_profile.pkts);
-
- am->state = MLX5E_AM_START_MEASURE;
-}
-
-void mlx5e_rx_am(struct mlx5e_rq *rq)
-{
- struct mlx5e_rx_am *am = &rq->am;
- struct mlx5e_rx_am_sample end_sample;
- struct mlx5e_rx_am_stats curr_stats;
- u16 nevents;
-
- switch (am->state) {
- case MLX5E_AM_MEASURE_IN_PROGRESS:
- nevents = BIT_GAP(BITS_PER_TYPE(u16), rq->cq.event_ctr,
- am->start_sample.event_ctr);
- if (nevents < MLX5E_AM_NEVENTS)
- break;
- mlx5e_am_sample(rq, &end_sample);
- mlx5e_am_calc_stats(&am->start_sample, &end_sample,
- &curr_stats);
- if (mlx5e_am_decision(&curr_stats, am)) {
- am->state = MLX5E_AM_APPLY_NEW_PROFILE;
- schedule_work(&am->work);
- break;
- }
- /* fall through */
- case MLX5E_AM_START_MEASURE:
- mlx5e_am_sample(rq, &am->start_sample);
- am->state = MLX5E_AM_MEASURE_IN_PROGRESS;
- break;
- case MLX5E_AM_APPLY_NEW_PROFILE:
- break;
- }
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 1f1f8af87d4d..5a4608281f38 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -238,15 +238,19 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
int err = 0;
/* Temporarily enable local_lb */
- if (MLX5_CAP_GEN(priv->mdev, disable_local_lb)) {
- mlx5_nic_vport_query_local_lb(priv->mdev, &lbtp->local_lb);
- if (!lbtp->local_lb)
- mlx5_nic_vport_update_local_lb(priv->mdev, true);
+ err = mlx5_nic_vport_query_local_lb(priv->mdev, &lbtp->local_lb);
+ if (err)
+ return err;
+
+ if (!lbtp->local_lb) {
+ err = mlx5_nic_vport_update_local_lb(priv->mdev, true);
+ if (err)
+ return err;
}
err = mlx5e_refresh_tirs(priv, true);
if (err)
- return err;
+ goto out;
lbtp->loopback_ok = false;
init_completion(&lbtp->comp);
@@ -256,16 +260,21 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
lbtp->pt.dev = priv->netdev;
lbtp->pt.af_packet_priv = lbtp;
dev_add_pack(&lbtp->pt);
+
+ return 0;
+
+out:
+ if (!lbtp->local_lb)
+ mlx5_nic_vport_update_local_lb(priv->mdev, false);
+
return err;
}
static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv,
struct mlx5e_lbt_priv *lbtp)
{
- if (MLX5_CAP_GEN(priv->mdev, disable_local_lb)) {
- if (!lbtp->local_lb)
- mlx5_nic_vport_update_local_lb(priv->mdev, false);
- }
+ if (!lbtp->local_lb)
+ mlx5_nic_vport_update_local_lb(priv->mdev, false);
dev_remove_pack(&lbtp->pt);
mlx5e_refresh_tirs(priv, false);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 933275fe03b2..cf528da51243 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -56,12 +56,14 @@ struct mlx5_nic_flow_attr {
u32 action;
u32 flow_tag;
u32 mod_hdr_id;
+ u32 hairpin_tirn;
};
enum {
MLX5E_TC_FLOW_ESWITCH = BIT(0),
MLX5E_TC_FLOW_NIC = BIT(1),
MLX5E_TC_FLOW_OFFLOADED = BIT(2),
+ MLX5E_TC_FLOW_HAIRPIN = BIT(3),
};
struct mlx5e_tc_flow {
@@ -71,6 +73,7 @@ struct mlx5e_tc_flow {
struct mlx5_flow_handle *rule;
struct list_head encap; /* flows sharing the same encap ID */
struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
+ struct list_head hairpin; /* flows sharing the same hairpin */
union {
struct mlx5_esw_flow_attr esw_attr[0];
struct mlx5_nic_flow_attr nic_attr[0];
@@ -93,6 +96,25 @@ enum {
#define MLX5E_TC_TABLE_NUM_GROUPS 4
#define MLX5E_TC_TABLE_MAX_GROUP_SIZE (1 << 16)
+struct mlx5e_hairpin {
+ struct mlx5_hairpin *pair;
+
+ struct mlx5_core_dev *func_mdev;
+ u32 tdn;
+ u32 tirn;
+};
+
+struct mlx5e_hairpin_entry {
+ /* a node of a hash table which keeps all the hairpin entries */
+ struct hlist_node hairpin_hlist;
+
+ /* flows sharing the same hairpin */
+ struct list_head flows;
+
+ int peer_ifindex;
+ struct mlx5e_hairpin *hp;
+};
+
struct mod_hdr_key {
int num_actions;
void *actions;
@@ -222,6 +244,187 @@ static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
}
}
+static
+struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
+{
+ struct net_device *netdev;
+ struct mlx5e_priv *priv;
+
+ netdev = __dev_get_by_index(net, ifindex);
+ priv = netdev_priv(netdev);
+ return priv->mdev;
+}
+
+static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
+{
+ u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
+ void *tirc;
+ int err;
+
+ err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
+ if (err)
+ goto alloc_tdn_err;
+
+ tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+
+ MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
+ MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn);
+ MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
+
+ err = mlx5_core_create_tir(hp->func_mdev, in, MLX5_ST_SZ_BYTES(create_tir_in), &hp->tirn);
+ if (err)
+ goto create_tir_err;
+
+ return 0;
+
+create_tir_err:
+ mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
+alloc_tdn_err:
+ return err;
+}
+
+static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
+{
+ mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
+ mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
+}
+
+static struct mlx5e_hairpin *
+mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
+ int peer_ifindex)
+{
+ struct mlx5_core_dev *func_mdev, *peer_mdev;
+ struct mlx5e_hairpin *hp;
+ struct mlx5_hairpin *pair;
+ int err;
+
+ hp = kzalloc(sizeof(*hp), GFP_KERNEL);
+ if (!hp)
+ return ERR_PTR(-ENOMEM);
+
+ func_mdev = priv->mdev;
+ peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
+
+ pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
+ if (IS_ERR(pair)) {
+ err = PTR_ERR(pair);
+ goto create_pair_err;
+ }
+ hp->pair = pair;
+ hp->func_mdev = func_mdev;
+
+ err = mlx5e_hairpin_create_transport(hp);
+ if (err)
+ goto create_transport_err;
+
+ return hp;
+
+create_transport_err:
+ mlx5_core_hairpin_destroy(hp->pair);
+create_pair_err:
+ kfree(hp);
+ return ERR_PTR(err);
+}
+
+static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
+{
+ mlx5e_hairpin_destroy_transport(hp);
+ mlx5_core_hairpin_destroy(hp->pair);
+ kvfree(hp);
+}
+
+static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
+ int peer_ifindex)
+{
+ struct mlx5e_hairpin_entry *hpe;
+
+ hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
+ hairpin_hlist, peer_ifindex) {
+ if (hpe->peer_ifindex == peer_ifindex)
+ return hpe;
+ }
+
+ return NULL;
+}
+
+static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5e_tc_flow_parse_attr *parse_attr)
+{
+ int peer_ifindex = parse_attr->mirred_ifindex;
+ struct mlx5_hairpin_params params;
+ struct mlx5e_hairpin_entry *hpe;
+ struct mlx5e_hairpin *hp;
+ int err;
+
+ if (!MLX5_CAP_GEN(priv->mdev, hairpin)) {
+ netdev_warn(priv->netdev, "hairpin is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ hpe = mlx5e_hairpin_get(priv, peer_ifindex);
+ if (hpe)
+ goto attach_flow;
+
+ hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
+ if (!hpe)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&hpe->flows);
+ hpe->peer_ifindex = peer_ifindex;
+
+ params.log_data_size = 15;
+ params.log_data_size = min_t(u8, params.log_data_size,
+ MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
+ params.log_data_size = max_t(u8, params.log_data_size,
+ MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
+ params.q_counter = priv->q_counter;
+
+ hp = mlx5e_hairpin_create(priv, &params, peer_ifindex);
+ if (IS_ERR(hp)) {
+ err = PTR_ERR(hp);
+ goto create_hairpin_err;
+ }
+
+ netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x log data size %d\n",
+ hp->tirn, hp->pair->rqn, hp->pair->peer_mdev->priv.name,
+ hp->pair->sqn, params.log_data_size);
+
+ hpe->hp = hp;
+ hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist, peer_ifindex);
+
+attach_flow:
+ flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
+ list_add(&flow->hairpin, &hpe->flows);
+ return 0;
+
+create_hairpin_err:
+ kfree(hpe);
+ return err;
+}
+
+static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow)
+{
+ struct list_head *next = flow->hairpin.next;
+
+ list_del(&flow->hairpin);
+
+ /* no more hairpin flows for us, release the hairpin pair */
+ if (list_empty(next)) {
+ struct mlx5e_hairpin_entry *hpe;
+
+ hpe = list_entry(next, struct mlx5e_hairpin_entry, flows);
+
+ netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
+ hpe->hp->pair->peer_mdev->priv.name);
+
+ mlx5e_hairpin_destroy(hpe->hp);
+ hash_del(&hpe->hairpin_hlist);
+ kfree(hpe);
+ }
+}
+
static struct mlx5_flow_handle *
mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
@@ -229,7 +432,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
{
struct mlx5_nic_flow_attr *attr = flow->nic_attr;
struct mlx5_core_dev *dev = priv->mdev;
- struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_destination dest[2] = {};
struct mlx5_flow_act flow_act = {
.action = attr->action,
.flow_tag = attr->flow_tag,
@@ -238,18 +441,33 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5_fc *counter = NULL;
struct mlx5_flow_handle *rule;
bool table_created = false;
- int err;
+ int err, dest_ix = 0;
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
- dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = priv->fs.vlan.ft.t;
- } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
- counter = mlx5_fc_create(dev, true);
- if (IS_ERR(counter))
- return ERR_CAST(counter);
+ if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) {
+ err = mlx5e_hairpin_flow_add(priv, flow, parse_attr);
+ if (err) {
+ rule = ERR_PTR(err);
+ goto err_add_hairpin_flow;
+ }
+ dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dest[dest_ix].tir_num = attr->hairpin_tirn;
+ } else {
+ dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[dest_ix].ft = priv->fs.vlan.ft.t;
+ }
+ dest_ix++;
+ }
- dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter = counter;
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ counter = mlx5_fc_create(dev, true);
+ if (IS_ERR(counter)) {
+ rule = ERR_CAST(counter);
+ goto err_fc_create;
+ }
+ dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[dest_ix].counter = counter;
+ dest_ix++;
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
@@ -292,7 +510,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
- &flow_act, &dest, 1);
+ &flow_act, dest, dest_ix);
if (IS_ERR(rule))
goto err_add_rule;
@@ -309,7 +527,10 @@ err_create_ft:
mlx5e_detach_mod_hdr(priv, flow);
err_create_mod_hdr_id:
mlx5_fc_destroy(dev, counter);
-
+err_fc_create:
+ if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
+ mlx5e_hairpin_flow_del(priv, flow);
+err_add_hairpin_flow:
return rule;
}
@@ -330,6 +551,9 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
+
+ if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
+ mlx5e_hairpin_flow_del(priv, flow);
}
static void mlx5e_detach_encap(struct mlx5e_priv *priv,
@@ -1422,6 +1646,20 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
return true;
}
+static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
+{
+ struct mlx5_core_dev *fmdev, *pmdev;
+ u16 func_id, peer_id;
+
+ fmdev = priv->mdev;
+ pmdev = peer_priv->mdev;
+
+ func_id = (u16)((fmdev->pdev->bus->number << 8) | PCI_SLOT(fmdev->pdev->devfn));
+ peer_id = (u16)((pmdev->pdev->bus->number << 8) | PCI_SLOT(pmdev->pdev->devfn));
+
+ return (func_id == peer_id);
+}
+
static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow)
@@ -1466,6 +1704,23 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return -EOPNOTSUPP;
}
+ if (is_tcf_mirred_egress_redirect(a)) {
+ struct net_device *peer_dev = tcf_mirred_dev(a);
+
+ if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
+ same_hw_devs(priv, netdev_priv(peer_dev))) {
+ parse_attr->mirred_ifindex = peer_dev->ifindex;
+ flow->flags |= MLX5E_TC_FLOW_HAIRPIN;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ } else {
+ netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
+ peer_dev->name);
+ return -EINVAL;
+ }
+ continue;
+ }
+
if (is_tcf_skbedit_mark(a)) {
u32 mark = tcf_skbedit_mark(a);
@@ -2188,6 +2443,7 @@ int mlx5e_tc_init(struct mlx5e_priv *priv)
struct mlx5e_tc_table *tc = &priv->fs.tc;
hash_init(tc->mod_hdr_tbl);
+ hash_init(tc->hairpin_tbl);
tc->ht_params = mlx5e_tc_flow_ht_params;
return rhashtable_init(&tc->ht, &tc->ht_params);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index ab92298eafc3..f292bb346985 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -78,8 +78,14 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
for (i = 0; i < c->num_tc; i++)
mlx5e_cq_arm(&c->sq[i].cq);
- if (MLX5E_TEST_BIT(c->rq.state, MLX5E_RQ_STATE_AM))
- mlx5e_rx_am(&c->rq);
+ if (MLX5E_TEST_BIT(c->rq.state, MLX5E_RQ_STATE_AM)) {
+ struct net_dim_sample dim_sample;
+ net_dim_sample(c->rq.cq.event_ctr,
+ c->rq.stats.packets,
+ c->rq.stats.bytes,
+ &dim_sample);
+ net_dim(&c->rq.dim, dim_sample);
+ }
mlx5e_cq_arm(&c->rq.cq);
mlx5e_cq_arm(&c->icosq.cq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 7649e36653d9..5ecf2cddc16d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -37,6 +37,7 @@
#include <linux/mlx5/fs.h>
#include "mlx5_core.h"
#include "eswitch.h"
+#include "fs_core.h"
#define UPLINK_VPORT 0xFFFF
@@ -1123,8 +1124,12 @@ static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
+ struct mlx5_fc *counter = vport->ingress.drop_counter;
+ struct mlx5_flow_destination drop_ctr_dst = {0};
+ struct mlx5_flow_destination *dst = NULL;
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec;
+ int dest_num = 0;
int err = 0;
u8 *smac_v;
@@ -1188,9 +1193,18 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
memset(spec, 0, sizeof(*spec));
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+
+ /* Attach drop flow counter */
+ if (counter) {
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ drop_ctr_dst.counter = counter;
+ dst = &drop_ctr_dst;
+ dest_num++;
+ }
vport->ingress.drop_rule =
mlx5_add_flow_rules(vport->ingress.acl, spec,
- &flow_act, NULL, 0);
+ &flow_act, dst, dest_num);
if (IS_ERR(vport->ingress.drop_rule)) {
err = PTR_ERR(vport->ingress.drop_rule);
esw_warn(esw->dev,
@@ -1210,8 +1224,12 @@ out:
static int esw_vport_egress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
+ struct mlx5_fc *counter = vport->egress.drop_counter;
+ struct mlx5_flow_destination drop_ctr_dst = {0};
+ struct mlx5_flow_destination *dst = NULL;
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec;
+ int dest_num = 0;
int err = 0;
esw_vport_cleanup_egress_rules(esw, vport);
@@ -1262,9 +1280,18 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
/* Drop others rule (star rule) */
memset(spec, 0, sizeof(*spec));
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+
+ /* Attach egress drop flow counter */
+ if (counter) {
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ drop_ctr_dst.counter = counter;
+ dst = &drop_ctr_dst;
+ dest_num++;
+ }
vport->egress.drop_rule =
mlx5_add_flow_rules(vport->egress.acl, spec,
- &flow_act, NULL, 0);
+ &flow_act, dst, dest_num);
if (IS_ERR(vport->egress.drop_rule)) {
err = PTR_ERR(vport->egress.drop_rule);
esw_warn(esw->dev,
@@ -1457,6 +1484,41 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
}
}
+static void esw_vport_create_drop_counters(struct mlx5_vport *vport)
+{
+ struct mlx5_core_dev *dev = vport->dev;
+
+ if (MLX5_CAP_ESW_INGRESS_ACL(dev, flow_counter)) {
+ vport->ingress.drop_counter = mlx5_fc_create(dev, false);
+ if (IS_ERR(vport->ingress.drop_counter)) {
+ esw_warn(dev,
+ "vport[%d] configure ingress drop rule counter failed\n",
+ vport->vport);
+ vport->ingress.drop_counter = NULL;
+ }
+ }
+
+ if (MLX5_CAP_ESW_EGRESS_ACL(dev, flow_counter)) {
+ vport->egress.drop_counter = mlx5_fc_create(dev, false);
+ if (IS_ERR(vport->egress.drop_counter)) {
+ esw_warn(dev,
+ "vport[%d] configure egress drop rule counter failed\n",
+ vport->vport);
+ vport->egress.drop_counter = NULL;
+ }
+ }
+}
+
+static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport)
+{
+ struct mlx5_core_dev *dev = vport->dev;
+
+ if (vport->ingress.drop_counter)
+ mlx5_fc_destroy(dev, vport->ingress.drop_counter);
+ if (vport->egress.drop_counter)
+ mlx5_fc_destroy(dev, vport->egress.drop_counter);
+}
+
static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
int enable_events)
{
@@ -1483,6 +1545,10 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
if (!vport_num)
vport->info.trusted = true;
+ /* create steering drop counters for ingress and egress ACLs */
+ if (vport_num && esw->mode == SRIOV_LEGACY)
+ esw_vport_create_drop_counters(vport);
+
esw_vport_change_handle_locked(vport);
esw->enabled_vports++;
@@ -1521,6 +1587,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
esw_vport_disable_egress_acl(esw, vport);
esw_vport_disable_ingress_acl(esw, vport);
+ esw_vport_destroy_drop_counters(vport);
}
esw->enabled_vports--;
mutex_unlock(&esw->state_lock);
@@ -2016,12 +2083,36 @@ unlock:
return err;
}
+static void mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
+ int vport_idx,
+ struct mlx5_vport_drop_stats *stats)
+{
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ struct mlx5_vport *vport = &esw->vports[vport_idx];
+ u64 bytes = 0;
+ u16 idx = 0;
+
+ if (!vport->enabled || esw->mode != SRIOV_LEGACY)
+ return;
+
+ if (vport->egress.drop_counter) {
+ idx = vport->egress.drop_counter->id;
+ mlx5_fc_query(dev, idx, &stats->rx_dropped, &bytes);
+ }
+
+ if (vport->ingress.drop_counter) {
+ idx = vport->ingress.drop_counter->id;
+ mlx5_fc_query(dev, idx, &stats->tx_dropped, &bytes);
+ }
+}
+
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
int vport,
struct ifla_vf_stats *vf_stats)
{
int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
+ struct mlx5_vport_drop_stats stats = {0};
int err = 0;
u32 *out;
@@ -2076,6 +2167,10 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
vf_stats->broadcast =
MLX5_GET_CTR(out, received_eth_broadcast.packets);
+ mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats);
+ vf_stats->rx_dropped = stats.rx_dropped;
+ vf_stats->tx_dropped = stats.tx_dropped;
+
free_out:
kvfree(out);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 3b481182f13a..2fa037066b2f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -73,6 +73,7 @@ struct vport_ingress {
struct mlx5_flow_group *drop_grp;
struct mlx5_flow_handle *allow_rule;
struct mlx5_flow_handle *drop_rule;
+ struct mlx5_fc *drop_counter;
};
struct vport_egress {
@@ -81,6 +82,12 @@ struct vport_egress {
struct mlx5_flow_group *drop_grp;
struct mlx5_flow_handle *allowed_vlan;
struct mlx5_flow_handle *drop_rule;
+ struct mlx5_fc *drop_counter;
+};
+
+struct mlx5_vport_drop_stats {
+ u64 rx_dropped;
+ u64 tx_dropped;
};
struct mlx5_vport_info {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 3e571045626f..05262708f14b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -233,6 +233,8 @@ void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
unsigned long delay);
void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
unsigned long interval);
+int mlx5_fc_query(struct mlx5_core_dev *dev, u16 id,
+ u64 *packets, u64 *bytes);
int mlx5_init_fs(struct mlx5_core_dev *dev);
void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 89d1f8650033..b7ab929d5f8e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -312,6 +312,12 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
}
}
+int mlx5_fc_query(struct mlx5_core_dev *dev, u16 id,
+ u64 *packets, u64 *bytes)
+{
+ return mlx5_cmd_fc_query(dev, id, packets, bytes);
+}
+
void mlx5_fc_query_cached(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 6f338a9219c8..90cb50fe17fd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -254,4 +254,5 @@ const struct ethtool_ops mlx5i_ethtool_ops = {
const struct ethtool_ops mlx5i_pkey_ethtool_ops = {
.get_drvinfo = mlx5i_get_drvinfo,
.get_link = ethtool_op_get_link,
+ .get_ts_info = mlx5i_get_ts_info,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 8812d7208e8f..2743525a40a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -41,7 +41,6 @@
static int mlx5i_open(struct net_device *netdev);
static int mlx5i_close(struct net_device *netdev);
static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu);
-static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
static const struct net_device_ops mlx5i_netdev_ops = {
.ndo_open = mlx5i_open,
@@ -86,6 +85,8 @@ void mlx5i_init(struct mlx5_core_dev *mdev,
mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
mlx5i_build_nic_params(mdev, &priv->channels.params);
+ mlx5e_timestamp_init(priv);
+
/* netdev init */
netdev->hw_features |= NETIF_F_SG;
netdev->hw_features |= NETIF_F_IP_CSUM;
@@ -396,7 +397,7 @@ int mlx5i_dev_init(struct net_device *dev)
return 0;
}
-static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
@@ -450,7 +451,6 @@ static int mlx5i_open(struct net_device *netdev)
mlx5e_refresh_tirs(epriv, false);
mlx5e_activate_priv_channels(epriv);
- mlx5e_timestamp_set(epriv);
mutex_unlock(&epriv->state_lock);
return 0;
@@ -485,7 +485,7 @@ static int mlx5i_close(struct net_device *netdev)
mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
mlx5i_uninit_underlay_qp(epriv);
mlx5e_deactivate_priv_channels(epriv);
- mlx5e_close_channels(&epriv->channels);;
+ mlx5e_close_channels(&epriv->channels);
unlock:
mutex_unlock(&epriv->state_lock);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index 49008022c306..6d9053bcbe95 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -76,9 +76,10 @@ int mlx5i_pkey_del_qpn(struct net_device *netdev, u32 qpn);
/* Get the net-device corresponding to the given underlay QPN */
struct net_device *mlx5i_pkey_get_netdev(struct net_device *netdev, u32 qpn);
-/* Shared ndo functionts */
+/* Shared ndo functions */
int mlx5i_dev_init(struct net_device *dev);
void mlx5i_dev_cleanup(struct net_device *dev);
+int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
/* Parent profile functions */
void mlx5i_init(struct mlx5_core_dev *mdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index 531b02cc979b..b69e9d847a6b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -140,6 +140,7 @@ static int mlx5i_pkey_close(struct net_device *netdev);
static int mlx5i_pkey_dev_init(struct net_device *dev);
static void mlx5i_pkey_dev_cleanup(struct net_device *netdev);
static int mlx5i_pkey_change_mtu(struct net_device *netdev, int new_mtu);
+static int mlx5i_pkey_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
static const struct net_device_ops mlx5i_pkey_netdev_ops = {
.ndo_open = mlx5i_pkey_open,
@@ -147,6 +148,7 @@ static const struct net_device_ops mlx5i_pkey_netdev_ops = {
.ndo_init = mlx5i_pkey_dev_init,
.ndo_uninit = mlx5i_pkey_dev_cleanup,
.ndo_change_mtu = mlx5i_pkey_change_mtu,
+ .ndo_do_ioctl = mlx5i_pkey_ioctl,
};
/* Child NDOs */
@@ -174,6 +176,11 @@ static int mlx5i_pkey_dev_init(struct net_device *dev)
return mlx5i_dev_init(dev);
}
+static int mlx5i_pkey_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ return mlx5i_ioctl(dev, ifr, cmd);
+}
+
static void mlx5i_pkey_dev_cleanup(struct net_device *netdev)
{
return mlx5i_dev_cleanup(netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index fa8aed62b231..5701f125e99c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -423,9 +423,13 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev,
switch (clock->ptp_info.pin_config[pin].func) {
case PTP_PF_EXTTS:
+ ptp_event.index = pin;
+ ptp_event.timestamp = timecounter_cyc2time(&clock->tc,
+ be64_to_cpu(eqe->data.pps.time_stamp));
if (clock->pps_info.enabled) {
ptp_event.type = PTP_CLOCK_PPSUSR;
- ptp_event.pps_times.ts_real = ns_to_timespec64(eqe->data.pps.time_stamp);
+ ptp_event.pps_times.ts_real =
+ ns_to_timespec64(ptp_event.timestamp);
} else {
ptp_event.type = PTP_CLOCK_EXTTS;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 8a89c7e8cd63..0f88fd30a09a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -319,6 +319,7 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)
struct mlx5_eq_table *table = &priv->eq_table;
int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
int nvec;
+ int err;
nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
MLX5_EQ_VEC_COMP_BASE;
@@ -328,21 +329,23 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)
priv->irq_info = kcalloc(nvec, sizeof(*priv->irq_info), GFP_KERNEL);
if (!priv->irq_info)
- goto err_free_msix;
+ return -ENOMEM;
nvec = pci_alloc_irq_vectors(dev->pdev,
MLX5_EQ_VEC_COMP_BASE + 1, nvec,
PCI_IRQ_MSIX);
- if (nvec < 0)
- return nvec;
+ if (nvec < 0) {
+ err = nvec;
+ goto err_free_irq_info;
+ }
table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
return 0;
-err_free_msix:
+err_free_irq_info:
kfree(priv->irq_info);
- return -ENOMEM;
+ return err;
}
static void mlx5_free_irq_vectors(struct mlx5_core_dev *dev)
@@ -578,8 +581,7 @@ static int mlx5_core_set_hca_defaults(struct mlx5_core_dev *dev)
int ret = 0;
/* Disable local_lb by default */
- if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
- MLX5_CAP_GEN(dev, disable_local_lb))
+ if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH)
ret = mlx5_nic_vport_update_local_lb(dev, false);
return ret;
@@ -1121,9 +1123,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto err_stop_poll;
}
- if (boot && mlx5_init_once(dev, priv)) {
- dev_err(&pdev->dev, "sw objs init failed\n");
- goto err_stop_poll;
+ if (boot) {
+ err = mlx5_init_once(dev, priv);
+ if (err) {
+ dev_err(&pdev->dev, "sw objs init failed\n");
+ goto err_stop_poll;
+ }
}
err = mlx5_alloc_irq_vectors(dev);
@@ -1133,8 +1138,9 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
}
dev->priv.uar = mlx5_get_uars_page(dev);
- if (!dev->priv.uar) {
+ if (IS_ERR(dev->priv.uar)) {
dev_err(&pdev->dev, "Failed allocating uar, aborting\n");
+ err = PTR_ERR(dev->priv.uar);
goto err_disable_msix;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index 5e128d7a9ffd..a09ebbaf3b68 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -398,3 +398,187 @@ void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn)
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_destroy_rqt);
+
+static int mlx5_hairpin_create_rq(struct mlx5_core_dev *mdev,
+ struct mlx5_hairpin_params *params, u32 *rqn)
+{
+ u32 in[MLX5_ST_SZ_DW(create_rq_in)] = {0};
+ void *rqc, *wq;
+
+ rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
+ wq = MLX5_ADDR_OF(rqc, rqc, wq);
+
+ MLX5_SET(rqc, rqc, hairpin, 1);
+ MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
+ MLX5_SET(rqc, rqc, counter_set_id, params->q_counter);
+
+ MLX5_SET(wq, wq, log_hairpin_data_sz, params->log_data_size);
+
+ return mlx5_core_create_rq(mdev, in, MLX5_ST_SZ_BYTES(create_rq_in), rqn);
+}
+
+static int mlx5_hairpin_create_sq(struct mlx5_core_dev *mdev,
+ struct mlx5_hairpin_params *params, u32 *sqn)
+{
+ u32 in[MLX5_ST_SZ_DW(create_sq_in)] = {0};
+ void *sqc, *wq;
+
+ sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+ wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ MLX5_SET(sqc, sqc, hairpin, 1);
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
+
+ MLX5_SET(wq, wq, log_hairpin_data_sz, params->log_data_size);
+
+ return mlx5_core_create_sq(mdev, in, MLX5_ST_SZ_BYTES(create_sq_in), sqn);
+}
+
+static int mlx5_hairpin_create_queues(struct mlx5_hairpin *hp,
+ struct mlx5_hairpin_params *params)
+{
+ int err;
+
+ err = mlx5_hairpin_create_rq(hp->func_mdev, params, &hp->rqn);
+ if (err)
+ goto out_err_rq;
+
+ err = mlx5_hairpin_create_sq(hp->peer_mdev, params, &hp->sqn);
+ if (err)
+ goto out_err_sq;
+
+ return 0;
+
+out_err_sq:
+ mlx5_core_destroy_rq(hp->func_mdev, hp->rqn);
+out_err_rq:
+ return err;
+}
+
+static void mlx5_hairpin_destroy_queues(struct mlx5_hairpin *hp)
+{
+ mlx5_core_destroy_rq(hp->func_mdev, hp->rqn);
+ mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn);
+}
+
+static int mlx5_hairpin_modify_rq(struct mlx5_core_dev *func_mdev, u32 rqn,
+ int curr_state, int next_state,
+ u16 peer_vhca, u32 peer_sq)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_rq_in)] = {0};
+ void *rqc;
+
+ rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+
+ if (next_state == MLX5_RQC_STATE_RDY) {
+ MLX5_SET(rqc, rqc, hairpin_peer_sq, peer_sq);
+ MLX5_SET(rqc, rqc, hairpin_peer_vhca, peer_vhca);
+ }
+
+ MLX5_SET(modify_rq_in, in, rq_state, curr_state);
+ MLX5_SET(rqc, rqc, state, next_state);
+
+ return mlx5_core_modify_rq(func_mdev, rqn,
+ in, MLX5_ST_SZ_BYTES(modify_rq_in));
+}
+
+static int mlx5_hairpin_modify_sq(struct mlx5_core_dev *peer_mdev, u32 sqn,
+ int curr_state, int next_state,
+ u16 peer_vhca, u32 peer_rq)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_sq_in)] = {0};
+ void *sqc;
+
+ sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+
+ if (next_state == MLX5_RQC_STATE_RDY) {
+ MLX5_SET(sqc, sqc, hairpin_peer_rq, peer_rq);
+ MLX5_SET(sqc, sqc, hairpin_peer_vhca, peer_vhca);
+ }
+
+ MLX5_SET(modify_sq_in, in, sq_state, curr_state);
+ MLX5_SET(sqc, sqc, state, next_state);
+
+ return mlx5_core_modify_sq(peer_mdev, sqn,
+ in, MLX5_ST_SZ_BYTES(modify_sq_in));
+}
+
+static int mlx5_hairpin_pair_queues(struct mlx5_hairpin *hp)
+{
+ int err;
+
+ /* set peer SQ */
+ err = mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn,
+ MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY,
+ MLX5_CAP_GEN(hp->func_mdev, vhca_id), hp->rqn);
+ if (err)
+ goto err_modify_sq;
+
+ /* set func RQ */
+ err = mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn,
+ MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY,
+ MLX5_CAP_GEN(hp->peer_mdev, vhca_id), hp->sqn);
+
+ if (err)
+ goto err_modify_rq;
+
+ return 0;
+
+err_modify_rq:
+ mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn, MLX5_SQC_STATE_RDY,
+ MLX5_SQC_STATE_RST, 0, 0);
+err_modify_sq:
+ return err;
+}
+
+static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
+{
+ /* unset func RQ */
+ mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn, MLX5_RQC_STATE_RDY,
+ MLX5_RQC_STATE_RST, 0, 0);
+
+ /* unset peer SQ */
+ mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn, MLX5_SQC_STATE_RDY,
+ MLX5_SQC_STATE_RST, 0, 0);
+}
+
+struct mlx5_hairpin *
+mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev,
+ struct mlx5_core_dev *peer_mdev,
+ struct mlx5_hairpin_params *params)
+{
+ struct mlx5_hairpin *hp;
+ int size, err;
+
+ size = sizeof(*hp);
+ hp = kzalloc(size, GFP_KERNEL);
+ if (!hp)
+ return ERR_PTR(-ENOMEM);
+
+ hp->func_mdev = func_mdev;
+ hp->peer_mdev = peer_mdev;
+
+ /* alloc and pair func --> peer hairpin */
+ err = mlx5_hairpin_create_queues(hp, params);
+ if (err)
+ goto err_create_queues;
+
+ err = mlx5_hairpin_pair_queues(hp);
+ if (err)
+ goto err_pair_queues;
+
+ return hp;
+
+err_pair_queues:
+ mlx5_hairpin_destroy_queues(hp);
+err_create_queues:
+ kfree(hp);
+ return ERR_PTR(err);
+}
+
+void mlx5_core_hairpin_destroy(struct mlx5_hairpin *hp)
+{
+ mlx5_hairpin_unpair_queues(hp);
+ mlx5_hairpin_destroy_queues(hp);
+ kfree(hp);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 222b25908d01..8b97066dd1f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -168,18 +168,16 @@ struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev)
struct mlx5_uars_page *ret;
mutex_lock(&mdev->priv.bfregs.reg_head.lock);
- if (list_empty(&mdev->priv.bfregs.reg_head.list)) {
- ret = alloc_uars_page(mdev, false);
- if (IS_ERR(ret)) {
- ret = NULL;
- goto out;
- }
- list_add(&ret->list, &mdev->priv.bfregs.reg_head.list);
- } else {
+ if (!list_empty(&mdev->priv.bfregs.reg_head.list)) {
ret = list_first_entry(&mdev->priv.bfregs.reg_head.list,
struct mlx5_uars_page, list);
kref_get(&ret->ref_count);
+ goto out;
}
+ ret = alloc_uars_page(mdev, false);
+ if (IS_ERR(ret))
+ goto out;
+ list_add(&ret->list, &mdev->priv.bfregs.reg_head.list);
out:
mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index d653b0025b13..a1296a62497d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -908,23 +908,33 @@ int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable)
void *in;
int err;
- mlx5_core_dbg(mdev, "%s local_lb\n", enable ? "enable" : "disable");
+ if (!MLX5_CAP_GEN(mdev, disable_local_lb_mc) &&
+ !MLX5_CAP_GEN(mdev, disable_local_lb_uc))
+ return 0;
+
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
MLX5_SET(modify_nic_vport_context_in, in,
- field_select.disable_mc_local_lb, 1);
- MLX5_SET(modify_nic_vport_context_in, in,
nic_vport_context.disable_mc_local_lb, !enable);
-
- MLX5_SET(modify_nic_vport_context_in, in,
- field_select.disable_uc_local_lb, 1);
MLX5_SET(modify_nic_vport_context_in, in,
nic_vport_context.disable_uc_local_lb, !enable);
+ if (MLX5_CAP_GEN(mdev, disable_local_lb_mc))
+ MLX5_SET(modify_nic_vport_context_in, in,
+ field_select.disable_mc_local_lb, 1);
+
+ if (MLX5_CAP_GEN(mdev, disable_local_lb_uc))
+ MLX5_SET(modify_nic_vport_context_in, in,
+ field_select.disable_uc_local_lb, 1);
+
err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+ if (!err)
+ mlx5_core_dbg(mdev, "%s local_lb\n",
+ enable ? "enable" : "disable");
+
kvfree(in);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index f3315bc874ad..3529b545675d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -113,6 +113,7 @@ struct mlxsw_core {
struct mlxsw_thermal *thermal;
struct mlxsw_core_port *ports;
unsigned int max_ports;
+ bool reload_fail;
unsigned long driver_priv[0];
/* driver_priv has to be always the last item */
};
@@ -962,7 +963,28 @@ mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
pool_type, p_cur, p_max);
}
+static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ const struct mlxsw_bus *mlxsw_bus = mlxsw_core->bus;
+ int err;
+
+ if (!mlxsw_bus->reset)
+ return -EOPNOTSUPP;
+
+ mlxsw_core_bus_device_unregister(mlxsw_core, true);
+ mlxsw_bus->reset(mlxsw_core->bus_priv);
+ err = mlxsw_core_bus_device_register(mlxsw_core->bus_info,
+ mlxsw_core->bus,
+ mlxsw_core->bus_priv, true,
+ devlink);
+ if (err)
+ mlxsw_core->reload_fail = true;
+ return err;
+}
+
static const struct devlink_ops mlxsw_devlink_ops = {
+ .reload = mlxsw_devlink_core_bus_device_reload,
.port_type_set = mlxsw_devlink_port_type_set,
.port_split = mlxsw_devlink_port_split,
.port_unsplit = mlxsw_devlink_port_unsplit,
@@ -980,23 +1002,26 @@ static const struct devlink_ops mlxsw_devlink_ops = {
int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const struct mlxsw_bus *mlxsw_bus,
- void *bus_priv)
+ void *bus_priv, bool reload,
+ struct devlink *devlink)
{
const char *device_kind = mlxsw_bus_info->device_kind;
struct mlxsw_core *mlxsw_core;
struct mlxsw_driver *mlxsw_driver;
- struct devlink *devlink;
size_t alloc_size;
int err;
mlxsw_driver = mlxsw_core_driver_get(device_kind);
if (!mlxsw_driver)
return -EINVAL;
- alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
- devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size);
- if (!devlink) {
- err = -ENOMEM;
- goto err_devlink_alloc;
+
+ if (!reload) {
+ alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
+ devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size);
+ if (!devlink) {
+ err = -ENOMEM;
+ goto err_devlink_alloc;
+ }
}
mlxsw_core = devlink_priv(devlink);
@@ -1012,6 +1037,12 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_bus_init;
+ if (mlxsw_driver->resources_register && !reload) {
+ err = mlxsw_driver->resources_register(mlxsw_core);
+ if (err)
+ goto err_register_resources;
+ }
+
err = mlxsw_ports_init(mlxsw_core);
if (err)
goto err_ports_init;
@@ -1032,9 +1063,11 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_emad_init;
- err = devlink_register(devlink, mlxsw_bus_info->dev);
- if (err)
- goto err_devlink_register;
+ if (!reload) {
+ err = devlink_register(devlink, mlxsw_bus_info->dev);
+ if (err)
+ goto err_devlink_register;
+ }
err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
if (err)
@@ -1057,7 +1090,8 @@ err_driver_init:
mlxsw_thermal_fini(mlxsw_core->thermal);
err_thermal_init:
err_hwmon_init:
- devlink_unregister(devlink);
+ if (!reload)
+ devlink_unregister(devlink);
err_devlink_register:
mlxsw_emad_fini(mlxsw_core);
err_emad_init:
@@ -1067,26 +1101,40 @@ err_alloc_lag_mapping:
err_ports_init:
mlxsw_bus->fini(bus_priv);
err_bus_init:
- devlink_free(devlink);
+ if (!reload)
+ devlink_resources_unregister(devlink, NULL);
+err_register_resources:
+ if (!reload)
+ devlink_free(devlink);
err_devlink_alloc:
mlxsw_core_driver_put(device_kind);
return err;
}
EXPORT_SYMBOL(mlxsw_core_bus_device_register);
-void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
+void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
+ bool reload)
{
const char *device_kind = mlxsw_core->bus_info->device_kind;
struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ if (mlxsw_core->reload_fail)
+ goto reload_fail;
+
if (mlxsw_core->driver->fini)
mlxsw_core->driver->fini(mlxsw_core);
mlxsw_thermal_fini(mlxsw_core->thermal);
- devlink_unregister(devlink);
+ if (!reload)
+ devlink_unregister(devlink);
mlxsw_emad_fini(mlxsw_core);
kfree(mlxsw_core->lag.mapping);
mlxsw_ports_fini(mlxsw_core);
+ if (!reload)
+ devlink_resources_unregister(devlink, NULL);
mlxsw_core->bus->fini(mlxsw_core->bus_priv);
+ if (reload)
+ return;
+reload_fail:
devlink_free(devlink);
mlxsw_core_driver_put(device_kind);
}
@@ -1791,6 +1839,22 @@ void mlxsw_core_flush_owq(void)
}
EXPORT_SYMBOL(mlxsw_core_flush_owq);
+int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_config_profile *profile,
+ u64 *p_single_size, u64 *p_double_size,
+ u64 *p_linear_size)
+{
+ struct mlxsw_driver *driver = mlxsw_core->driver;
+
+ if (!driver->kvd_sizes_get)
+ return -EINVAL;
+
+ return driver->kvd_sizes_get(mlxsw_core, profile,
+ p_single_size, p_double_size,
+ p_linear_size);
+}
+EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get);
+
static int __init mlxsw_core_module_init(void)
{
int err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 6e966af72fc4..5ddafd74dc00 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -66,8 +66,9 @@ void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver);
int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const struct mlxsw_bus *mlxsw_bus,
- void *bus_priv);
-void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core);
+ void *bus_priv, bool reload,
+ struct devlink *devlink);
+void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, bool reload);
struct mlxsw_tx_info {
u8 local_port;
@@ -308,10 +309,20 @@ struct mlxsw_driver {
u32 *p_cur, u32 *p_max);
void (*txhdr_construct)(struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info);
+ int (*resources_register)(struct mlxsw_core *mlxsw_core);
+ int (*kvd_sizes_get)(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_config_profile *profile,
+ u64 *p_single_size, u64 *p_double_size,
+ u64 *p_linear_size);
u8 txhdr_len;
const struct mlxsw_config_profile *profile;
};
+int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_config_profile *profile,
+ u64 *p_single_size, u64 *p_double_size,
+ u64 *p_linear_size);
+
bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
enum mlxsw_res_id res_id);
@@ -332,6 +343,7 @@ struct mlxsw_bus {
const struct mlxsw_config_profile *profile,
struct mlxsw_res *res);
void (*fini)(void *bus_priv);
+ void (*reset)(void *bus_priv);
bool (*skb_transmit_busy)(void *bus_priv,
const struct mlxsw_tx_info *tx_info);
int (*skb_transmit)(void *bus_priv, struct sk_buff *skb,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index c0dcfa05b077..25f9915ebd82 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -539,7 +539,8 @@ static int mlxsw_i2c_probe(struct i2c_client *client,
mlxsw_i2c->dev = &client->dev;
err = mlxsw_core_bus_device_register(&mlxsw_i2c->bus_info,
- &mlxsw_i2c_bus, mlxsw_i2c);
+ &mlxsw_i2c_bus, mlxsw_i2c, false,
+ NULL);
if (err) {
dev_err(&client->dev, "Fail to register core bus\n");
return err;
@@ -557,7 +558,7 @@ static int mlxsw_i2c_remove(struct i2c_client *client)
{
struct mlxsw_i2c *mlxsw_i2c = i2c_get_clientdata(client);
- mlxsw_core_bus_device_unregister(mlxsw_i2c->core);
+ mlxsw_core_bus_device_unregister(mlxsw_i2c->core, false);
mutex_destroy(&mlxsw_i2c->cmd.lock);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h
index 28427f0758c7..31c886edc791 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/item.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/item.h
@@ -42,7 +42,7 @@
struct mlxsw_item {
unsigned short offset; /* bytes in container */
- unsigned short step; /* step in bytes for indexed items */
+ short step; /* step in bytes for indexed items */
unsigned short in_step_offset; /* offset within one step */
unsigned char shift; /* shift in bits */
unsigned char element_size; /* size of element in bit array */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 23f7d828cf67..85faa87bf42d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -154,6 +154,7 @@ struct mlxsw_pci {
} comp;
} cmd;
struct mlxsw_bus_info bus_info;
+ const struct pci_device_id *id;
};
static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
@@ -1052,38 +1053,18 @@ static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox,
}
static int
-mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_config_profile *profile,
+mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_pci *mlxsw_pci,
+ const struct mlxsw_config_profile *profile,
struct mlxsw_res *res)
{
- u32 single_size, double_size, linear_size;
-
- if (!MLXSW_RES_VALID(res, KVD_SINGLE_MIN_SIZE) ||
- !MLXSW_RES_VALID(res, KVD_DOUBLE_MIN_SIZE) ||
- !profile->used_kvd_split_data)
- return -EIO;
-
- linear_size = profile->kvd_linear_size;
+ u64 single_size, double_size, linear_size;
+ int err;
- /* The hash part is what left of the kvd without the
- * linear part. It is split to the single size and
- * double size by the parts ratio from the profile.
- * Both sizes must be a multiplications of the
- * granularity from the profile.
- */
- double_size = MLXSW_RES_GET(res, KVD_SIZE) - linear_size;
- double_size *= profile->kvd_hash_double_parts;
- double_size /= profile->kvd_hash_double_parts +
- profile->kvd_hash_single_parts;
- double_size /= profile->kvd_hash_granularity;
- double_size *= profile->kvd_hash_granularity;
- single_size = MLXSW_RES_GET(res, KVD_SIZE) - double_size -
- linear_size;
-
- /* Check results are legal. */
- if (single_size < MLXSW_RES_GET(res, KVD_SINGLE_MIN_SIZE) ||
- double_size < MLXSW_RES_GET(res, KVD_DOUBLE_MIN_SIZE) ||
- MLXSW_RES_GET(res, KVD_SIZE) < linear_size)
- return -EIO;
+ err = mlxsw_core_kvd_sizes_get(mlxsw_pci->core, profile,
+ &single_size, &double_size,
+ &linear_size);
+ if (err)
+ return err;
MLXSW_RES_SET(res, KVD_SINGLE_SIZE, single_size);
MLXSW_RES_SET(res, KVD_DOUBLE_SIZE, double_size);
@@ -1184,7 +1165,7 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mbox, profile->adaptive_routing_group_cap);
}
if (MLXSW_RES_VALID(res, KVD_SIZE)) {
- err = mlxsw_pci_profile_get_kvd_sizes(profile, res);
+ err = mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci, profile, res);
if (err)
return err;
@@ -1622,16 +1603,6 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
return err;
}
-static const struct mlxsw_bus mlxsw_pci_bus = {
- .kind = "pci",
- .init = mlxsw_pci_init,
- .fini = mlxsw_pci_fini,
- .skb_transmit_busy = mlxsw_pci_skb_transmit_busy,
- .skb_transmit = mlxsw_pci_skb_transmit,
- .cmd_exec = mlxsw_pci_cmd_exec,
- .features = MLXSW_BUS_F_TXRX,
-};
-
static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
const struct pci_device_id *id)
{
@@ -1643,7 +1614,12 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
return 0;
}
- wmb(); /* reset needs to be written before we read control register */
+ /* Reset needs to be written before we read control register, and
+ * we must wait for the HW to become responsive once again
+ */
+ wmb();
+ msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
+
end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
do {
u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
@@ -1655,6 +1631,41 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
return 0;
}
+static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci)
+{
+ pci_free_irq_vectors(mlxsw_pci->pdev);
+}
+
+static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
+{
+ int err;
+
+ err = pci_alloc_irq_vectors(mlxsw_pci->pdev, 1, 1, PCI_IRQ_MSIX);
+ if (err < 0)
+ dev_err(&mlxsw_pci->pdev->dev, "MSI-X init failed\n");
+ return err;
+}
+
+static void mlxsw_pci_reset(void *bus_priv)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+
+ mlxsw_pci_free_irq_vectors(mlxsw_pci);
+ mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id);
+ mlxsw_pci_alloc_irq_vectors(mlxsw_pci);
+}
+
+static const struct mlxsw_bus mlxsw_pci_bus = {
+ .kind = "pci",
+ .init = mlxsw_pci_init,
+ .fini = mlxsw_pci_fini,
+ .skb_transmit_busy = mlxsw_pci_skb_transmit_busy,
+ .skb_transmit = mlxsw_pci_skb_transmit,
+ .cmd_exec = mlxsw_pci_cmd_exec,
+ .features = MLXSW_BUS_F_TXRX,
+ .reset = mlxsw_pci_reset,
+};
+
static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
const char *driver_name = pdev->driver->name;
@@ -1716,7 +1727,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_sw_reset;
}
- err = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
+ err = mlxsw_pci_alloc_irq_vectors(mlxsw_pci);
if (err < 0) {
dev_err(&pdev->dev, "MSI-X init failed\n");
goto err_msix_init;
@@ -1725,9 +1736,11 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mlxsw_pci->bus_info.device_kind = driver_name;
mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
mlxsw_pci->bus_info.dev = &pdev->dev;
+ mlxsw_pci->id = id;
err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
- &mlxsw_pci_bus, mlxsw_pci);
+ &mlxsw_pci_bus, mlxsw_pci, false,
+ NULL);
if (err) {
dev_err(&pdev->dev, "cannot register bus device\n");
goto err_bus_device_register;
@@ -1736,7 +1749,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
err_bus_device_register:
- pci_free_irq_vectors(mlxsw_pci->pdev);
+ mlxsw_pci_free_irq_vectors(mlxsw_pci);
err_msix_init:
err_sw_reset:
iounmap(mlxsw_pci->hw_addr);
@@ -1755,8 +1768,8 @@ static void mlxsw_pci_remove(struct pci_dev *pdev)
{
struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
- mlxsw_core_bus_device_unregister(mlxsw_pci->core);
- pci_free_irq_vectors(mlxsw_pci->pdev);
+ mlxsw_core_bus_device_unregister(mlxsw_pci->core, false);
+ mlxsw_pci_free_irq_vectors(mlxsw_pci);
iounmap(mlxsw_pci->hw_addr);
pci_release_regions(mlxsw_pci->pdev);
pci_disable_device(mlxsw_pci->pdev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index a6441208e9d9..fb082ad21b00 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -59,6 +59,7 @@
#define MLXSW_PCI_SW_RESET 0xF0010
#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000
+#define MLXSW_PCI_SW_RESET_WAIT_MSECS 100
#define MLXSW_PCI_FW_READY 0xA1844
#define MLXSW_PCI_FW_READY_MASK 0xFFFF
#define MLXSW_PCI_FW_READY_MAGIC 0x5E
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 6c4e08b8058a..0e08be41c8e0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -4827,6 +4827,42 @@ static inline void mlxsw_reg_ratr_counter_pack(char *payload, u64 counter_index,
mlxsw_reg_ratr_counter_set_type_set(payload, set_type);
}
+/* RDPM - Router DSCP to Priority Mapping
+ * --------------------------------------
+ * Controls the mapping from DSCP field to switch priority on routed packets
+ */
+#define MLXSW_REG_RDPM_ID 0x8009
+#define MLXSW_REG_RDPM_BASE_LEN 0x00
+#define MLXSW_REG_RDPM_DSCP_ENTRY_REC_LEN 0x01
+#define MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT 64
+#define MLXSW_REG_RDPM_LEN 0x40
+#define MLXSW_REG_RDPM_LAST_ENTRY (MLXSW_REG_RDPM_BASE_LEN + \
+ MLXSW_REG_RDPM_LEN - \
+ MLXSW_REG_RDPM_DSCP_ENTRY_REC_LEN)
+
+MLXSW_REG_DEFINE(rdpm, MLXSW_REG_RDPM_ID, MLXSW_REG_RDPM_LEN);
+
+/* reg_dscp_entry_e
+ * Enable update of the specific entry
+ * Access: Index
+ */
+MLXSW_ITEM8_INDEXED(reg, rdpm, dscp_entry_e, MLXSW_REG_RDPM_LAST_ENTRY, 7, 1,
+ -MLXSW_REG_RDPM_DSCP_ENTRY_REC_LEN, 0x00, false);
+
+/* reg_dscp_entry_prio
+ * Switch Priority
+ * Access: RW
+ */
+MLXSW_ITEM8_INDEXED(reg, rdpm, dscp_entry_prio, MLXSW_REG_RDPM_LAST_ENTRY, 0, 4,
+ -MLXSW_REG_RDPM_DSCP_ENTRY_REC_LEN, 0x00, false);
+
+static inline void mlxsw_reg_rdpm_pack(char *payload, unsigned short index,
+ u8 prio)
+{
+ mlxsw_reg_rdpm_dscp_entry_e_set(payload, index, 1);
+ mlxsw_reg_rdpm_dscp_entry_prio_set(payload, index, prio);
+}
+
/* RICNT - Router Interface Counter Register
* -----------------------------------------
* The RICNT register retrieves per port performance counters
@@ -7640,6 +7676,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(rtar),
MLXSW_REG(ratr),
MLXSW_REG(rtdp),
+ MLXSW_REG(rdpm),
MLXSW_REG(ricnt),
MLXSW_REG(rrcr),
MLXSW_REG(ralta),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index aa96cc6a0fa6..7e2b552c2237 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -76,12 +76,7 @@
#define MLXSW_FWREV_MAJOR 13
#define MLXSW_FWREV_MINOR 1530
#define MLXSW_FWREV_SUBMINOR 152
-
-static const struct mlxsw_fw_rev mlxsw_sp_supported_fw_rev = {
- .major = MLXSW_FWREV_MAJOR,
- .minor = MLXSW_FWREV_MINOR,
- .subminor = MLXSW_FWREV_SUBMINOR
-};
+#define MLXSW_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
#define MLXSW_SP_FW_FILENAME \
"mellanox/mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \
@@ -339,28 +334,25 @@ static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
}
-static bool mlxsw_sp_fw_rev_ge(const struct mlxsw_fw_rev *a,
- const struct mlxsw_fw_rev *b)
-{
- if (a->major != b->major)
- return a->major > b->major;
- if (a->minor != b->minor)
- return a->minor > b->minor;
- return a->subminor >= b->subminor;
-}
-
static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
{
const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
const struct firmware *firmware;
int err;
- if (mlxsw_sp_fw_rev_ge(rev, &mlxsw_sp_supported_fw_rev))
+ /* Validate driver & FW are compatible */
+ if (rev->major != MLXSW_FWREV_MAJOR) {
+ WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n",
+ rev->major, MLXSW_FWREV_MAJOR);
+ return -EINVAL;
+ }
+ if (MLXSW_FWREV_MINOR_TO_BRANCH(rev->minor) ==
+ MLXSW_FWREV_MINOR_TO_BRANCH(MLXSW_FWREV_MINOR))
return 0;
- dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d out of data\n",
+ dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n",
rev->major, rev->minor, rev->subminor);
- dev_info(mlxsw_sp->bus_info->dev, "Upgrading firmware using file %s\n",
+ dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n",
MLXSW_SP_FW_FILENAME);
err = request_firmware_direct(&firmware, MLXSW_SP_FW_FILENAME,
@@ -1747,72 +1739,186 @@ static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int
-mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tc_cls_flower_offload *f,
- bool ingress)
+mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block,
+ struct tc_cls_flower_offload *f)
{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block);
+
switch (f->command) {
case TC_CLSFLOWER_REPLACE:
- return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, f);
+ return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f);
case TC_CLSFLOWER_DESTROY:
- mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, f);
+ mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f);
return 0;
case TC_CLSFLOWER_STATS:
- return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress, f);
+ return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f);
default:
return -EOPNOTSUPP;
}
}
-static int mlxsw_sp_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
- void *cb_priv, bool ingress)
+static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type,
+ void *type_data,
+ void *cb_priv, bool ingress)
{
struct mlxsw_sp_port *mlxsw_sp_port = cb_priv;
- if (!tc_can_offload(mlxsw_sp_port->dev))
- return -EOPNOTSUPP;
-
switch (type) {
case TC_SETUP_CLSMATCHALL:
+ if (!tc_can_offload(mlxsw_sp_port->dev))
+ return -EOPNOTSUPP;
+
return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data,
ingress);
case TC_SETUP_CLSFLOWER:
- return mlxsw_sp_setup_tc_cls_flower(mlxsw_sp_port, type_data,
- ingress);
+ return 0;
default:
return -EOPNOTSUPP;
}
}
-static int mlxsw_sp_setup_tc_block_cb_ig(enum tc_setup_type type,
- void *type_data, void *cb_priv)
+static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type,
+ void *type_data,
+ void *cb_priv)
{
- return mlxsw_sp_setup_tc_block_cb(type, type_data, cb_priv, true);
+ return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
+ cb_priv, true);
}
-static int mlxsw_sp_setup_tc_block_cb_eg(enum tc_setup_type type,
- void *type_data, void *cb_priv)
+static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type,
+ void *type_data,
+ void *cb_priv)
{
- return mlxsw_sp_setup_tc_block_cb(type, type_data, cb_priv, false);
+ return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
+ cb_priv, false);
+}
+
+static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct mlxsw_sp_acl_block *acl_block = cb_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSMATCHALL:
+ return 0;
+ case TC_SETUP_CLSFLOWER:
+ if (mlxsw_sp_acl_block_disabled(acl_block))
+ return -EOPNOTSUPP;
+
+ return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tcf_block *block, bool ingress)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_acl_block *acl_block;
+ struct tcf_block_cb *block_cb;
+ int err;
+
+ block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower,
+ mlxsw_sp);
+ if (!block_cb) {
+ acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, block->net);
+ if (!acl_block)
+ return -ENOMEM;
+ block_cb = __tcf_block_cb_register(block,
+ mlxsw_sp_setup_tc_block_cb_flower,
+ mlxsw_sp, acl_block);
+ if (IS_ERR(block_cb)) {
+ err = PTR_ERR(block_cb);
+ goto err_cb_register;
+ }
+ } else {
+ acl_block = tcf_block_cb_priv(block_cb);
+ }
+ tcf_block_cb_incref(block_cb);
+ err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block,
+ mlxsw_sp_port, ingress);
+ if (err)
+ goto err_block_bind;
+
+ if (ingress)
+ mlxsw_sp_port->ing_acl_block = acl_block;
+ else
+ mlxsw_sp_port->eg_acl_block = acl_block;
+
+ return 0;
+
+err_block_bind:
+ if (!tcf_block_cb_decref(block_cb)) {
+ __tcf_block_cb_unregister(block_cb);
+err_cb_register:
+ mlxsw_sp_acl_block_destroy(acl_block);
+ }
+ return err;
+}
+
+static void
+mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tcf_block *block, bool ingress)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_acl_block *acl_block;
+ struct tcf_block_cb *block_cb;
+ int err;
+
+ block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower,
+ mlxsw_sp);
+ if (!block_cb)
+ return;
+
+ if (ingress)
+ mlxsw_sp_port->ing_acl_block = NULL;
+ else
+ mlxsw_sp_port->eg_acl_block = NULL;
+
+ acl_block = tcf_block_cb_priv(block_cb);
+ err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block,
+ mlxsw_sp_port, ingress);
+ if (!err && !tcf_block_cb_decref(block_cb)) {
+ __tcf_block_cb_unregister(block_cb);
+ mlxsw_sp_acl_block_destroy(acl_block);
+ }
}
static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_block_offload *f)
{
tc_setup_cb_t *cb;
+ bool ingress;
+ int err;
- if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
- cb = mlxsw_sp_setup_tc_block_cb_ig;
- else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
- cb = mlxsw_sp_setup_tc_block_cb_eg;
- else
+ if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
+ cb = mlxsw_sp_setup_tc_block_cb_matchall_ig;
+ ingress = true;
+ } else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
+ cb = mlxsw_sp_setup_tc_block_cb_matchall_eg;
+ ingress = false;
+ } else {
return -EOPNOTSUPP;
+ }
switch (f->command) {
case TC_BLOCK_BIND:
- return tcf_block_cb_register(f->block, cb, mlxsw_sp_port,
- mlxsw_sp_port);
+ err = tcf_block_cb_register(f->block, cb, mlxsw_sp_port,
+ mlxsw_sp_port);
+ if (err)
+ return err;
+ err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port,
+ f->block, ingress);
+ if (err) {
+ tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
+ return err;
+ }
+ return 0;
case TC_BLOCK_UNBIND:
+ mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port,
+ f->block, ingress);
tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
return 0;
default:
@@ -1830,6 +1936,8 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
case TC_SETUP_QDISC_RED:
return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
+ case TC_SETUP_QDISC_PRIO:
+ return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
default:
return -EOPNOTSUPP;
}
@@ -1840,10 +1948,18 @@ static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- if (!enable && (mlxsw_sp_port->acl_rule_count ||
- !list_empty(&mlxsw_sp_port->mall_tc_list))) {
- netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
- return -EINVAL;
+ if (!enable) {
+ if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) ||
+ mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) ||
+ !list_empty(&mlxsw_sp_port->mall_tc_list)) {
+ netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
+ return -EINVAL;
+ }
+ mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block);
+ mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block);
+ } else {
+ mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block);
+ mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block);
}
return 0;
}
@@ -3085,6 +3201,13 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_fids_init;
}
+ err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_qdiscs_init;
+ }
+
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
if (IS_ERR(mlxsw_sp_port_vlan)) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
@@ -3113,6 +3236,8 @@ err_register_netdev:
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
err_port_vlan_get:
+ mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
+err_port_qdiscs_init:
mlxsw_sp_port_fids_fini(mlxsw_sp_port);
err_port_fids_init:
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
@@ -3148,6 +3273,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
+ mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
mlxsw_sp_port_fids_fini(mlxsw_sp_port);
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
@@ -3979,6 +4105,253 @@ static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
.resource_query_enable = 1,
};
+static bool
+mlxsw_sp_resource_kvd_granularity_validate(struct netlink_ext_ack *extack,
+ u64 size)
+{
+ const struct mlxsw_config_profile *profile;
+
+ profile = &mlxsw_sp_config_profile;
+ if (size % profile->kvd_hash_granularity) {
+ NL_SET_ERR_MSG_MOD(extack, "resource set with wrong granularity");
+ return false;
+ }
+ return true;
+}
+
+static int
+mlxsw_sp_resource_kvd_size_validate(struct devlink *devlink, u64 size,
+ struct netlink_ext_ack *extack)
+{
+ NL_SET_ERR_MSG_MOD(extack, "kvd size cannot be changed");
+ return -EINVAL;
+}
+
+static int
+mlxsw_sp_resource_kvd_linear_size_validate(struct devlink *devlink, u64 size,
+ struct netlink_ext_ack *extack)
+{
+ if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+mlxsw_sp_resource_kvd_hash_single_size_validate(struct devlink *devlink, u64 size,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+
+ if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size))
+ return -EINVAL;
+
+ if (size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE)) {
+ NL_SET_ERR_MSG_MOD(extack, "hash single size is smaller than minimum");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int
+mlxsw_sp_resource_kvd_hash_double_size_validate(struct devlink *devlink, u64 size,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+
+ if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size))
+ return -EINVAL;
+
+ if (size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) {
+ NL_SET_ERR_MSG_MOD(extack, "hash double size is smaller than minimum");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static u64 mlxsw_sp_resource_kvd_linear_occ_get(struct devlink *devlink)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+ return mlxsw_sp_kvdl_occ_get(mlxsw_sp);
+}
+
+static struct devlink_resource_ops mlxsw_sp_resource_kvd_ops = {
+ .size_validate = mlxsw_sp_resource_kvd_size_validate,
+};
+
+static struct devlink_resource_ops mlxsw_sp_resource_kvd_linear_ops = {
+ .size_validate = mlxsw_sp_resource_kvd_linear_size_validate,
+ .occ_get = mlxsw_sp_resource_kvd_linear_occ_get,
+};
+
+static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_single_ops = {
+ .size_validate = mlxsw_sp_resource_kvd_hash_single_size_validate,
+};
+
+static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_double_ops = {
+ .size_validate = mlxsw_sp_resource_kvd_hash_double_size_validate,
+};
+
+static struct devlink_resource_size_params mlxsw_sp_kvd_size_params;
+static struct devlink_resource_size_params mlxsw_sp_linear_size_params;
+static struct devlink_resource_size_params mlxsw_sp_hash_single_size_params;
+static struct devlink_resource_size_params mlxsw_sp_hash_double_size_params;
+
+static void
+mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core)
+{
+ u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
+ KVD_SINGLE_MIN_SIZE);
+ u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
+ KVD_DOUBLE_MIN_SIZE);
+ u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
+ u32 linear_size_min = 0;
+
+ /* KVD top resource */
+ mlxsw_sp_kvd_size_params.size_min = kvd_size;
+ mlxsw_sp_kvd_size_params.size_max = kvd_size;
+ mlxsw_sp_kvd_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
+ mlxsw_sp_kvd_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
+
+ /* Linear part init */
+ mlxsw_sp_linear_size_params.size_min = linear_size_min;
+ mlxsw_sp_linear_size_params.size_max = kvd_size - single_size_min -
+ double_size_min;
+ mlxsw_sp_linear_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
+ mlxsw_sp_linear_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
+
+ /* Hash double part init */
+ mlxsw_sp_hash_double_size_params.size_min = double_size_min;
+ mlxsw_sp_hash_double_size_params.size_max = kvd_size - single_size_min -
+ linear_size_min;
+ mlxsw_sp_hash_double_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
+ mlxsw_sp_hash_double_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
+
+ /* Hash single part init */
+ mlxsw_sp_hash_single_size_params.size_min = single_size_min;
+ mlxsw_sp_hash_single_size_params.size_max = kvd_size - double_size_min -
+ linear_size_min;
+ mlxsw_sp_hash_single_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
+ mlxsw_sp_hash_single_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
+}
+
+static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ u32 kvd_size, single_size, double_size, linear_size;
+ const struct mlxsw_config_profile *profile;
+ int err;
+
+ profile = &mlxsw_sp_config_profile;
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
+ return -EIO;
+
+ mlxsw_sp_resource_size_params_prepare(mlxsw_core);
+ kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
+ true, kvd_size,
+ MLXSW_SP_RESOURCE_KVD,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &mlxsw_sp_kvd_size_params,
+ &mlxsw_sp_resource_kvd_ops);
+ if (err)
+ return err;
+
+ linear_size = profile->kvd_linear_size;
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
+ false, linear_size,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ MLXSW_SP_RESOURCE_KVD,
+ &mlxsw_sp_linear_size_params,
+ &mlxsw_sp_resource_kvd_linear_ops);
+ if (err)
+ return err;
+
+ double_size = kvd_size - linear_size;
+ double_size *= profile->kvd_hash_double_parts;
+ double_size /= profile->kvd_hash_double_parts +
+ profile->kvd_hash_single_parts;
+ double_size = rounddown(double_size, profile->kvd_hash_granularity);
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
+ false, double_size,
+ MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+ MLXSW_SP_RESOURCE_KVD,
+ &mlxsw_sp_hash_double_size_params,
+ &mlxsw_sp_resource_kvd_hash_double_ops);
+ if (err)
+ return err;
+
+ single_size = kvd_size - double_size - linear_size;
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
+ false, single_size,
+ MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+ MLXSW_SP_RESOURCE_KVD,
+ &mlxsw_sp_hash_single_size_params,
+ &mlxsw_sp_resource_kvd_hash_single_ops);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_config_profile *profile,
+ u64 *p_single_size, u64 *p_double_size,
+ u64 *p_linear_size)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ u32 double_size;
+ int err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
+ !profile->used_kvd_split_data)
+ return -EIO;
+
+ /* The hash part is what left of the kvd without the
+ * linear part. It is split to the single size and
+ * double size by the parts ratio from the profile.
+ * Both sizes must be a multiplications of the
+ * granularity from the profile. In case the user
+ * provided the sizes they are obtained via devlink.
+ */
+ err = devlink_resource_size_get(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ p_linear_size);
+ if (err)
+ *p_linear_size = profile->kvd_linear_size;
+
+ err = devlink_resource_size_get(devlink,
+ MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+ p_double_size);
+ if (err) {
+ double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
+ *p_linear_size;
+ double_size *= profile->kvd_hash_double_parts;
+ double_size /= profile->kvd_hash_double_parts +
+ profile->kvd_hash_single_parts;
+ *p_double_size = rounddown(double_size,
+ profile->kvd_hash_granularity);
+ }
+
+ err = devlink_resource_size_get(devlink,
+ MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+ p_single_size);
+ if (err)
+ *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
+ *p_double_size - *p_linear_size;
+
+ /* Check results are legal. */
+ if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
+ *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
+ MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
+ return -EIO;
+
+ return 0;
+}
+
static struct mlxsw_driver mlxsw_sp_driver = {
.kind = mlxsw_sp_driver_name,
.priv_size = sizeof(struct mlxsw_sp),
@@ -3998,6 +4371,8 @@ static struct mlxsw_driver mlxsw_sp_driver = {
.sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
.txhdr_construct = mlxsw_sp_txhdr_construct,
+ .resources_register = mlxsw_sp_resources_register,
+ .kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp_config_profile,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 346b8b688b6f..58ff79211c09 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -66,6 +66,18 @@
#define MLXSW_SP_KVD_LINEAR_SIZE 98304 /* entries */
#define MLXSW_SP_KVD_GRANULARITY 128
+#define MLXSW_SP_RESOURCE_NAME_KVD "kvd"
+#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR "linear"
+#define MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE "hash_single"
+#define MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE "hash_double"
+
+enum mlxsw_sp_resource_id {
+ MLXSW_SP_RESOURCE_KVD,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+ MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+};
+
struct mlxsw_sp_port;
struct mlxsw_sp_rif;
@@ -204,29 +216,6 @@ struct mlxsw_sp_port_vlan {
struct list_head bridge_vlan_node;
};
-enum mlxsw_sp_qdisc_type {
- MLXSW_SP_QDISC_NO_QDISC,
- MLXSW_SP_QDISC_RED,
-};
-
-struct mlxsw_sp_qdisc {
- u32 handle;
- enum mlxsw_sp_qdisc_type type;
- struct red_stats xstats_base;
- union {
- struct {
- u64 tail_drop_base;
- u64 ecn_base;
- u64 wred_drop_base;
- } red;
- } xstats;
-
- u64 tx_bytes;
- u64 tx_packets;
- u64 drops;
- u64 overlimits;
-};
-
/* No need an internal lock; At worse - miss a single periodic iteration */
struct mlxsw_sp_port_xstats {
u64 ecn;
@@ -269,8 +258,10 @@ struct mlxsw_sp_port {
} periodic_hw_stats;
struct mlxsw_sp_port_sample *sample;
struct list_head vlans_list;
- struct mlxsw_sp_qdisc root_qdisc;
+ struct mlxsw_sp_qdisc *root_qdisc;
unsigned acl_rule_count;
+ struct mlxsw_sp_acl_block *ing_acl_block;
+ struct mlxsw_sp_acl_block *eg_acl_block;
};
static inline bool
@@ -459,6 +450,7 @@ void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
unsigned int entry_count,
unsigned int *p_alloc_size);
+u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp);
struct mlxsw_sp_acl_rule_info {
unsigned int priority;
@@ -478,8 +470,11 @@ struct mlxsw_sp_acl_profile_ops {
void *priv, void *ruleset_priv);
void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
- struct net_device *dev, bool ingress);
- void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
+ void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
u16 (*ruleset_group_id)(void *ruleset_priv);
size_t rule_priv_size;
int (*rule_add)(struct mlxsw_sp *mlxsw_sp,
@@ -499,17 +494,34 @@ struct mlxsw_sp_acl_ops {
enum mlxsw_sp_acl_profile profile);
};
+struct mlxsw_sp_acl_block;
struct mlxsw_sp_acl_ruleset;
/* spectrum_acl.c */
struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
+struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block);
+unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block);
+void mlxsw_sp_acl_block_disable_inc(struct mlxsw_sp_acl_block *block);
+void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block *block);
+bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block *block);
+struct mlxsw_sp_acl_block *mlxsw_sp_acl_block_create(struct mlxsw_sp *mlxsw_sp,
+ struct net *net);
+void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block);
+int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
+int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
struct mlxsw_sp_acl_ruleset *
-mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
- bool ingress, u32 chain_index,
+mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block, u32 chain_index,
enum mlxsw_sp_acl_profile profile);
struct mlxsw_sp_acl_ruleset *
-mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
- bool ingress, u32 chain_index,
+mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block, u32 chain_index,
enum mlxsw_sp_acl_profile profile);
void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset);
@@ -576,16 +588,23 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);
extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops;
/* spectrum_flower.c */
-int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
struct tc_cls_flower_offload *f);
-void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
struct tc_cls_flower_offload *f);
-int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
struct tc_cls_flower_offload *f);
/* spectrum_qdisc.c */
+int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_red_qopt_offload *p);
+int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_prio_qopt_offload *p);
/* spectrum_fid.c */
int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 93dcd315f7d6..9439bfa4ecc2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -39,6 +39,7 @@
#include <linux/string.h>
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
+#include <net/net_namespace.h>
#include <net/tc_act/tc_vlan.h>
#include "reg.h"
@@ -70,9 +71,23 @@ struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
return acl->afk;
}
-struct mlxsw_sp_acl_ruleset_ht_key {
- struct net_device *dev; /* dev this ruleset is bound to */
+struct mlxsw_sp_acl_block_binding {
+ struct list_head list;
+ struct net_device *dev;
+ struct mlxsw_sp_port *mlxsw_sp_port;
bool ingress;
+};
+
+struct mlxsw_sp_acl_block {
+ struct list_head binding_list;
+ struct mlxsw_sp_acl_ruleset *ruleset_zero;
+ struct mlxsw_sp *mlxsw_sp;
+ unsigned int rule_count;
+ unsigned int disable_count;
+};
+
+struct mlxsw_sp_acl_ruleset_ht_key {
+ struct mlxsw_sp_acl_block *block;
u32 chain_index;
const struct mlxsw_sp_acl_profile_ops *ops;
};
@@ -118,8 +133,185 @@ struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp)
return mlxsw_sp->acl->dummy_fid;
}
+struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block)
+{
+ return block->mlxsw_sp;
+}
+
+unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block)
+{
+ return block ? block->rule_count : 0;
+}
+
+void mlxsw_sp_acl_block_disable_inc(struct mlxsw_sp_acl_block *block)
+{
+ if (block)
+ block->disable_count++;
+}
+
+void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block *block)
+{
+ if (block)
+ block->disable_count--;
+}
+
+bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block *block)
+{
+ return block->disable_count;
+}
+
+static int
+mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_acl_block_binding *binding)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
+ const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+
+ return ops->ruleset_bind(mlxsw_sp, ruleset->priv,
+ binding->mlxsw_sp_port, binding->ingress);
+}
+
+static void
+mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_acl_block_binding *binding)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
+ const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+
+ ops->ruleset_unbind(mlxsw_sp, ruleset->priv,
+ binding->mlxsw_sp_port, binding->ingress);
+}
+
+static bool mlxsw_sp_acl_ruleset_block_bound(struct mlxsw_sp_acl_block *block)
+{
+ return block->ruleset_zero;
+}
+
+static int
+mlxsw_sp_acl_ruleset_block_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ruleset *ruleset,
+ struct mlxsw_sp_acl_block *block)
+{
+ struct mlxsw_sp_acl_block_binding *binding;
+ int err;
+
+ block->ruleset_zero = ruleset;
+ list_for_each_entry(binding, &block->binding_list, list) {
+ err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
+ if (err)
+ goto rollback;
+ }
+ return 0;
+
+rollback:
+ list_for_each_entry_continue_reverse(binding, &block->binding_list,
+ list)
+ mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
+ block->ruleset_zero = NULL;
+
+ return err;
+}
+
+static void
+mlxsw_sp_acl_ruleset_block_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ruleset *ruleset,
+ struct mlxsw_sp_acl_block *block)
+{
+ struct mlxsw_sp_acl_block_binding *binding;
+
+ list_for_each_entry(binding, &block->binding_list, list)
+ mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
+ block->ruleset_zero = NULL;
+}
+
+struct mlxsw_sp_acl_block *mlxsw_sp_acl_block_create(struct mlxsw_sp *mlxsw_sp,
+ struct net *net)
+{
+ struct mlxsw_sp_acl_block *block;
+
+ block = kzalloc(sizeof(*block), GFP_KERNEL);
+ if (!block)
+ return NULL;
+ INIT_LIST_HEAD(&block->binding_list);
+ block->mlxsw_sp = mlxsw_sp;
+ return block;
+}
+
+void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block)
+{
+ WARN_ON(!list_empty(&block->binding_list));
+ kfree(block);
+}
+
+static struct mlxsw_sp_acl_block_binding *
+mlxsw_sp_acl_block_lookup(struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port, bool ingress)
+{
+ struct mlxsw_sp_acl_block_binding *binding;
+
+ list_for_each_entry(binding, &block->binding_list, list)
+ if (binding->mlxsw_sp_port == mlxsw_sp_port &&
+ binding->ingress == ingress)
+ return binding;
+ return NULL;
+}
+
+int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
+{
+ struct mlxsw_sp_acl_block_binding *binding;
+ int err;
+
+ if (WARN_ON(mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress)))
+ return -EEXIST;
+
+ binding = kzalloc(sizeof(*binding), GFP_KERNEL);
+ if (!binding)
+ return -ENOMEM;
+ binding->mlxsw_sp_port = mlxsw_sp_port;
+ binding->ingress = ingress;
+
+ if (mlxsw_sp_acl_ruleset_block_bound(block)) {
+ err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
+ if (err)
+ goto err_ruleset_bind;
+ }
+
+ list_add(&binding->list, &block->binding_list);
+ return 0;
+
+err_ruleset_bind:
+ kfree(binding);
+ return err;
+}
+
+int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
+{
+ struct mlxsw_sp_acl_block_binding *binding;
+
+ binding = mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress);
+ if (!binding)
+ return -ENOENT;
+
+ list_del(&binding->list);
+
+ if (mlxsw_sp_acl_ruleset_block_bound(block))
+ mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
+
+ kfree(binding);
+ return 0;
+}
+
static struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block, u32 chain_index,
const struct mlxsw_sp_acl_profile_ops *ops)
{
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
@@ -132,6 +324,8 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
if (!ruleset)
return ERR_PTR(-ENOMEM);
ruleset->ref_count = 1;
+ ruleset->ht_key.block = block;
+ ruleset->ht_key.chain_index = chain_index;
ruleset->ht_key.ops = ops;
err = rhashtable_init(&ruleset->rule_ht, &mlxsw_sp_acl_rule_ht_params);
@@ -142,68 +336,50 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_ops_ruleset_add;
- return ruleset;
-
-err_ops_ruleset_add:
- rhashtable_destroy(&ruleset->rule_ht);
-err_rhashtable_init:
- kfree(ruleset);
- return ERR_PTR(err);
-}
-
-static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_ruleset *ruleset)
-{
- const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
-
- ops->ruleset_del(mlxsw_sp, ruleset->priv);
- rhashtable_destroy(&ruleset->rule_ht);
- kfree(ruleset);
-}
-
-static int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_ruleset *ruleset,
- struct net_device *dev, bool ingress,
- u32 chain_index)
-{
- const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
- struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
- int err;
-
- ruleset->ht_key.dev = dev;
- ruleset->ht_key.ingress = ingress;
- ruleset->ht_key.chain_index = chain_index;
err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
mlxsw_sp_acl_ruleset_ht_params);
if (err)
- return err;
- if (!ruleset->ht_key.chain_index) {
+ goto err_ht_insert;
+
+ if (!chain_index) {
/* We only need ruleset with chain index 0, the implicit one,
* to be directly bound to device. The rest of the rulesets
* are bound by "Goto action set".
*/
- err = ops->ruleset_bind(mlxsw_sp, ruleset->priv, dev, ingress);
+ err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset, block);
if (err)
- goto err_ops_ruleset_bind;
+ goto err_ruleset_bind;
}
- return 0;
-err_ops_ruleset_bind:
+ return ruleset;
+
+err_ruleset_bind:
rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
mlxsw_sp_acl_ruleset_ht_params);
- return err;
+err_ht_insert:
+ ops->ruleset_del(mlxsw_sp, ruleset->priv);
+err_ops_ruleset_add:
+ rhashtable_destroy(&ruleset->rule_ht);
+err_rhashtable_init:
+ kfree(ruleset);
+ return ERR_PTR(err);
}
-static void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_ruleset *ruleset)
+static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ruleset *ruleset)
{
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+ struct mlxsw_sp_acl_block *block = ruleset->ht_key.block;
+ u32 chain_index = ruleset->ht_key.chain_index;
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
- if (!ruleset->ht_key.chain_index)
- ops->ruleset_unbind(mlxsw_sp, ruleset->priv);
+ if (!chain_index)
+ mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset, block);
rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
mlxsw_sp_acl_ruleset_ht_params);
+ ops->ruleset_del(mlxsw_sp, ruleset->priv);
+ rhashtable_destroy(&ruleset->rule_ht);
+ kfree(ruleset);
}
static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
@@ -216,20 +392,18 @@ static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
{
if (--ruleset->ref_count)
return;
- mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, ruleset);
mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
}
static struct mlxsw_sp_acl_ruleset *
-__mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl, struct net_device *dev,
- bool ingress, u32 chain_index,
+__mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl,
+ struct mlxsw_sp_acl_block *block, u32 chain_index,
const struct mlxsw_sp_acl_profile_ops *ops)
{
struct mlxsw_sp_acl_ruleset_ht_key ht_key;
memset(&ht_key, 0, sizeof(ht_key));
- ht_key.dev = dev;
- ht_key.ingress = ingress;
+ ht_key.block = block;
ht_key.chain_index = chain_index;
ht_key.ops = ops;
return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
@@ -237,8 +411,8 @@ __mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl, struct net_device *dev,
}
struct mlxsw_sp_acl_ruleset *
-mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
- bool ingress, u32 chain_index,
+mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block, u32 chain_index,
enum mlxsw_sp_acl_profile profile)
{
const struct mlxsw_sp_acl_profile_ops *ops;
@@ -248,45 +422,31 @@ mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
ops = acl->ops->profile_ops(mlxsw_sp, profile);
if (!ops)
return ERR_PTR(-EINVAL);
- ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, dev, ingress,
- chain_index, ops);
+ ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
if (!ruleset)
return ERR_PTR(-ENOENT);
return ruleset;
}
struct mlxsw_sp_acl_ruleset *
-mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
- bool ingress, u32 chain_index,
+mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block, u32 chain_index,
enum mlxsw_sp_acl_profile profile)
{
const struct mlxsw_sp_acl_profile_ops *ops;
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
struct mlxsw_sp_acl_ruleset *ruleset;
- int err;
ops = acl->ops->profile_ops(mlxsw_sp, profile);
if (!ops)
return ERR_PTR(-EINVAL);
- ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, dev, ingress,
- chain_index, ops);
+ ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
if (ruleset) {
mlxsw_sp_acl_ruleset_ref_inc(ruleset);
return ruleset;
}
- ruleset = mlxsw_sp_acl_ruleset_create(mlxsw_sp, ops);
- if (IS_ERR(ruleset))
- return ruleset;
- err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, ruleset, dev,
- ingress, chain_index);
- if (err)
- goto err_ruleset_bind;
- return ruleset;
-
-err_ruleset_bind:
- mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
- return ERR_PTR(err);
+ return mlxsw_sp_acl_ruleset_create(mlxsw_sp, block, chain_index, ops);
}
void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
@@ -535,6 +695,7 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
goto err_rhashtable_insert;
list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
+ ruleset->ht_key.block->rule_count++;
return 0;
err_rhashtable_insert:
@@ -548,6 +709,7 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+ ruleset->ht_key.block->rule_count--;
list_del(&rule->list);
rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
mlxsw_sp_acl_rule_ht_params);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index 7e8284b46968..c6e180c2be1e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -154,10 +154,6 @@ struct mlxsw_sp_acl_tcam_group {
struct list_head region_list;
unsigned int region_count;
struct rhashtable chunk_ht;
- struct {
- u16 local_port;
- bool ingress;
- } bound;
struct mlxsw_sp_acl_tcam_group_ops *ops;
const struct mlxsw_sp_acl_tcam_pattern *patterns;
unsigned int patterns_count;
@@ -262,35 +258,29 @@ static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp *mlxsw_sp,
static int
mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_group *group,
- struct net_device *dev, bool ingress)
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
{
- struct mlxsw_sp_port *mlxsw_sp_port;
char ppbt_pl[MLXSW_REG_PPBT_LEN];
- if (!mlxsw_sp_port_dev_check(dev))
- return -EINVAL;
-
- mlxsw_sp_port = netdev_priv(dev);
- group->bound.local_port = mlxsw_sp_port->local_port;
- group->bound.ingress = ingress;
- mlxsw_reg_ppbt_pack(ppbt_pl,
- group->bound.ingress ? MLXSW_REG_PXBT_E_IACL :
- MLXSW_REG_PXBT_E_EACL,
- MLXSW_REG_PXBT_OP_BIND, group->bound.local_port,
+ mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
+ MLXSW_REG_PXBT_E_EACL,
+ MLXSW_REG_PXBT_OP_BIND, mlxsw_sp_port->local_port,
group->id);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
}
static void
mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_group *group)
+ struct mlxsw_sp_acl_tcam_group *group,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
{
char ppbt_pl[MLXSW_REG_PPBT_LEN];
- mlxsw_reg_ppbt_pack(ppbt_pl,
- group->bound.ingress ? MLXSW_REG_PXBT_E_IACL :
- MLXSW_REG_PXBT_E_EACL,
- MLXSW_REG_PXBT_OP_UNBIND, group->bound.local_port,
+ mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
+ MLXSW_REG_PXBT_E_EACL,
+ MLXSW_REG_PXBT_OP_UNBIND, mlxsw_sp_port->local_port,
group->id);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
}
@@ -1056,21 +1046,25 @@ mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp,
static int
mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
void *ruleset_priv,
- struct net_device *dev, bool ingress)
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
{
struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->group,
- dev, ingress);
+ mlxsw_sp_port, ingress);
}
static void
mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
- void *ruleset_priv)
+ void *ruleset_priv,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
{
struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
- mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group);
+ mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group,
+ mlxsw_sp_port, ingress);
}
static u16
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index 96fdba78acab..f56fa18d6b26 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -771,14 +771,33 @@ static struct devlink_dpipe_table_ops mlxsw_sp_host4_ops = {
.size_get = mlxsw_sp_dpipe_table_host4_size_get,
};
+#define MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST4 1
+
static int mlxsw_sp_dpipe_host4_table_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ int err;
- return devlink_dpipe_table_register(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
- &mlxsw_sp_host4_ops,
- mlxsw_sp, false);
+ err = devlink_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
+ &mlxsw_sp_host4_ops,
+ mlxsw_sp, false);
+ if (err)
+ return err;
+
+ err = devlink_dpipe_table_resource_set(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
+ MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+ MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST4);
+ if (err)
+ goto err_resource_set;
+
+ return 0;
+
+err_resource_set:
+ devlink_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST4);
+ return err;
}
static void mlxsw_sp_dpipe_host4_table_fini(struct mlxsw_sp *mlxsw_sp)
@@ -829,14 +848,33 @@ static struct devlink_dpipe_table_ops mlxsw_sp_host6_ops = {
.size_get = mlxsw_sp_dpipe_table_host6_size_get,
};
+#define MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST6 2
+
static int mlxsw_sp_dpipe_host6_table_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ int err;
- return devlink_dpipe_table_register(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
- &mlxsw_sp_host6_ops,
- mlxsw_sp, false);
+ err = devlink_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
+ &mlxsw_sp_host6_ops,
+ mlxsw_sp, false);
+ if (err)
+ return err;
+
+ err = devlink_dpipe_table_resource_set(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
+ MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+ MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST6);
+ if (err)
+ goto err_resource_set;
+
+ return 0;
+
+err_resource_set:
+ devlink_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST6);
+ return err;
}
static void mlxsw_sp_dpipe_host6_table_fini(struct mlxsw_sp *mlxsw_sp)
@@ -1213,14 +1251,33 @@ static struct devlink_dpipe_table_ops mlxsw_sp_dpipe_table_adj_ops = {
.size_get = mlxsw_sp_dpipe_table_adj_size_get,
};
+#define MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_ADJ 1
+
static int mlxsw_sp_dpipe_adj_table_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ int err;
- return devlink_dpipe_table_register(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
- &mlxsw_sp_dpipe_table_adj_ops,
- mlxsw_sp, false);
+ err = devlink_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
+ &mlxsw_sp_dpipe_table_adj_ops,
+ mlxsw_sp, false);
+ if (err)
+ return err;
+
+ err = devlink_dpipe_table_resource_set(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_ADJ);
+ if (err)
+ goto err_resource_set;
+
+ return 0;
+
+err_resource_set:
+ devlink_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ);
+ return err;
}
static void mlxsw_sp_dpipe_adj_table_fini(struct mlxsw_sp *mlxsw_sp)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 42e8a36b9b95..cf7b97d40d78 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -35,6 +35,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
+#include <net/net_namespace.h>
#include <net/flow_dissector.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
@@ -45,7 +46,7 @@
#include "core_acl_flex_keys.h"
static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
- struct net_device *dev, bool ingress,
+ struct mlxsw_sp_acl_block *block,
struct mlxsw_sp_acl_rule_info *rulei,
struct tcf_exts *exts)
{
@@ -80,8 +81,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset;
u16 group_id;
- ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, dev,
- ingress,
+ ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block,
chain_index,
MLXSW_SP_ACL_PROFILE_FLOWER);
if (IS_ERR(ruleset))
@@ -104,9 +104,6 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return err;
out_dev = tcf_mirred_dev(a);
- if (out_dev == dev)
- out_dev = NULL;
-
err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
out_dev);
if (err)
@@ -265,7 +262,7 @@ static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
}
static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
- struct net_device *dev, bool ingress,
+ struct mlxsw_sp_acl_block *block,
struct mlxsw_sp_acl_rule_info *rulei,
struct tc_cls_flower_offload *f)
{
@@ -383,21 +380,19 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- return mlxsw_sp_flower_parse_actions(mlxsw_sp, dev, ingress,
- rulei, f->exts);
+ return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, f->exts);
}
-int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
struct tc_cls_flower_offload *f)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct net_device *dev = mlxsw_sp_port->dev;
struct mlxsw_sp_acl_rule_info *rulei;
struct mlxsw_sp_acl_ruleset *ruleset;
struct mlxsw_sp_acl_rule *rule;
int err;
- ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, dev, ingress,
+ ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
f->common.chain_index,
MLXSW_SP_ACL_PROFILE_FLOWER);
if (IS_ERR(ruleset))
@@ -410,7 +405,7 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
}
rulei = mlxsw_sp_acl_rule_rulei(rule);
- err = mlxsw_sp_flower_parse(mlxsw_sp, dev, ingress, rulei, f);
+ err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei, f);
if (err)
goto err_flower_parse;
@@ -423,7 +418,6 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
goto err_rule_add;
mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
- mlxsw_sp_port->acl_rule_count++;
return 0;
err_rule_add:
@@ -435,15 +429,15 @@ err_rule_create:
return err;
}
-void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
struct tc_cls_flower_offload *f)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_acl_ruleset *ruleset;
struct mlxsw_sp_acl_rule *rule;
- ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
- ingress, f->common.chain_index,
+ ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
+ f->common.chain_index,
MLXSW_SP_ACL_PROFILE_FLOWER);
if (IS_ERR(ruleset))
return;
@@ -455,13 +449,12 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
}
mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
- mlxsw_sp_port->acl_rule_count--;
}
-int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
struct tc_cls_flower_offload *f)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_acl_ruleset *ruleset;
struct mlxsw_sp_acl_rule *rule;
u64 packets;
@@ -469,8 +462,8 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
u64 bytes;
int err;
- ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
- ingress, f->common.chain_index,
+ ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
+ f->common.chain_index,
MLXSW_SP_ACL_PROFILE_FLOWER);
if (WARN_ON(IS_ERR(ruleset)))
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
index 310c38247b5c..55f9d2d70f9e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
@@ -286,6 +286,32 @@ static void mlxsw_sp_kvdl_parts_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_kvdl_part_fini(mlxsw_sp, i);
}
+static u64 mlxsw_sp_kvdl_part_occ(struct mlxsw_sp_kvdl_part *part)
+{
+ unsigned int nr_entries;
+ int bit = -1;
+ u64 occ = 0;
+
+ nr_entries = (part->info->end_index -
+ part->info->start_index + 1) /
+ part->info->alloc_size;
+ while ((bit = find_next_bit(part->usage, nr_entries, bit + 1))
+ < nr_entries)
+ occ += part->info->alloc_size;
+ return occ;
+}
+
+u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_kvdl_part *part;
+ u64 occ = 0;
+
+ list_for_each_entry(part, &mlxsw_sp->kvdl->parts_list, list)
+ occ += mlxsw_sp_kvdl_part_occ(part);
+
+ return occ;
+}
+
int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_kvdl *kvdl;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index c33beac5def0..0b7670459051 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -41,12 +41,157 @@
#include "spectrum.h"
#include "reg.h"
+#define MLXSW_SP_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - band - 1)
+
+enum mlxsw_sp_qdisc_type {
+ MLXSW_SP_QDISC_NO_QDISC,
+ MLXSW_SP_QDISC_RED,
+ MLXSW_SP_QDISC_PRIO,
+};
+
+struct mlxsw_sp_qdisc_ops {
+ enum mlxsw_sp_qdisc_type type;
+ int (*check_params)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params);
+ int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
+ int (*destroy)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
+ int (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_qopt_offload_stats *stats_ptr);
+ int (*get_xstats)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *xstats_ptr);
+ void (*clean_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
+ /* unoffload - to be used for a qdisc that stops being offloaded without
+ * being destroyed.
+ */
+ void (*unoffload)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
+};
+
+struct mlxsw_sp_qdisc {
+ u32 handle;
+ u8 tclass_num;
+ union {
+ struct red_stats red;
+ } xstats_base;
+ struct mlxsw_sp_qdisc_stats {
+ u64 tx_bytes;
+ u64 tx_packets;
+ u64 drops;
+ u64 overlimits;
+ u64 backlog;
+ } stats_base;
+
+ struct mlxsw_sp_qdisc_ops *ops;
+};
+
+static bool
+mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle,
+ enum mlxsw_sp_qdisc_type type)
+{
+ return mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
+ mlxsw_sp_qdisc->ops->type == type &&
+ mlxsw_sp_qdisc->handle == handle;
+}
+
+static int
+mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ int err = 0;
+
+ if (!mlxsw_sp_qdisc)
+ return 0;
+
+ if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->destroy)
+ err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+
+ mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
+ mlxsw_sp_qdisc->ops = NULL;
+ return err;
+}
+
+static int
+mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc_ops *ops, void *params)
+{
+ int err;
+
+ if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type)
+ /* In case this location contained a different qdisc of the
+ * same type we can override the old qdisc configuration.
+ * Otherwise, we need to remove the old qdisc before setting the
+ * new one.
+ */
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+ err = ops->check_params(mlxsw_sp_port, mlxsw_sp_qdisc, params);
+ if (err)
+ goto err_bad_param;
+
+ err = ops->replace(mlxsw_sp_port, mlxsw_sp_qdisc, params);
+ if (err)
+ goto err_config;
+
+ if (mlxsw_sp_qdisc->handle != handle) {
+ mlxsw_sp_qdisc->ops = ops;
+ if (ops->clean_stats)
+ ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
+ }
+
+ mlxsw_sp_qdisc->handle = handle;
+ return 0;
+
+err_bad_param:
+err_config:
+ if (mlxsw_sp_qdisc->handle == handle && ops->unoffload)
+ ops->unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, params);
+
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+ return err;
+}
+
+static int
+mlxsw_sp_qdisc_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_qopt_offload_stats *stats_ptr)
+{
+ if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
+ mlxsw_sp_qdisc->ops->get_stats)
+ return mlxsw_sp_qdisc->ops->get_stats(mlxsw_sp_port,
+ mlxsw_sp_qdisc,
+ stats_ptr);
+
+ return -EOPNOTSUPP;
+}
+
+static int
+mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *xstats_ptr)
+{
+ if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
+ mlxsw_sp_qdisc->ops->get_xstats)
+ return mlxsw_sp_qdisc->ops->get_xstats(mlxsw_sp_port,
+ mlxsw_sp_qdisc,
+ xstats_ptr);
+
+ return -EOPNOTSUPP;
+}
+
static int
mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
int tclass_num, u32 min, u32 max,
u32 probability, bool is_ecn)
{
- char cwtp_cmd[max_t(u8, MLXSW_REG_CWTP_LEN, MLXSW_REG_CWTPM_LEN)];
+ char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
+ char cwtp_cmd[MLXSW_REG_CWTP_LEN];
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
int err;
@@ -60,10 +205,10 @@ mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
if (err)
return err;
- mlxsw_reg_cwtpm_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num,
+ mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
MLXSW_REG_CWTP_DEFAULT_PROFILE, true, is_ecn);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtp_cmd);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
}
static int
@@ -79,80 +224,78 @@ mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port *mlxsw_sp_port,
}
static void
-mlxsw_sp_setup_tc_qdisc_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- int tclass_num)
+mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base;
+ u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
+ struct mlxsw_sp_qdisc_stats *stats_base;
struct mlxsw_sp_port_xstats *xstats;
struct rtnl_link_stats64 *stats;
+ struct red_stats *red_base;
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
stats = &mlxsw_sp_port->periodic_hw_stats.stats;
+ stats_base = &mlxsw_sp_qdisc->stats_base;
+ red_base = &mlxsw_sp_qdisc->xstats_base.red;
- mlxsw_sp_qdisc->tx_packets = stats->tx_packets;
- mlxsw_sp_qdisc->tx_bytes = stats->tx_bytes;
+ stats_base->tx_packets = stats->tx_packets;
+ stats_base->tx_bytes = stats->tx_bytes;
- switch (mlxsw_sp_qdisc->type) {
- case MLXSW_SP_QDISC_RED:
- xstats_base->prob_mark = xstats->ecn;
- xstats_base->prob_drop = xstats->wred_drop[tclass_num];
- xstats_base->pdrop = xstats->tail_drop[tclass_num];
+ red_base->prob_mark = xstats->ecn;
+ red_base->prob_drop = xstats->wred_drop[tclass_num];
+ red_base->pdrop = xstats->tail_drop[tclass_num];
- mlxsw_sp_qdisc->overlimits = xstats_base->prob_drop +
- xstats_base->prob_mark;
- mlxsw_sp_qdisc->drops = xstats_base->prob_drop +
- xstats_base->pdrop;
- break;
- default:
- break;
- }
+ stats_base->overlimits = red_base->prob_drop + red_base->prob_mark;
+ stats_base->drops = red_base->prob_drop + red_base->pdrop;
+
+ stats_base->backlog = 0;
}
static int
-mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- int tclass_num)
+mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- int err;
-
- if (mlxsw_sp_qdisc->handle != handle)
- return 0;
-
- err = mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port, tclass_num);
- mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
- mlxsw_sp_qdisc->type = MLXSW_SP_QDISC_NO_QDISC;
-
- return err;
+ return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port,
+ mlxsw_sp_qdisc->tclass_num);
}
static int
-mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- int tclass_num,
- struct tc_red_qopt_offload_params *p)
+mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u32 min, max;
- u64 prob;
- int err = 0;
+ struct tc_red_qopt_offload_params *p = params;
if (p->min > p->max) {
dev_err(mlxsw_sp->bus_info->dev,
"spectrum: RED: min %u is bigger then max %u\n", p->min,
p->max);
- goto err_bad_param;
+ return -EINVAL;
}
if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) {
dev_err(mlxsw_sp->bus_info->dev,
"spectrum: RED: max value %u is too big\n", p->max);
- goto err_bad_param;
+ return -EINVAL;
}
if (p->min == 0 || p->max == 0) {
dev_err(mlxsw_sp->bus_info->dev,
"spectrum: RED: 0 value is illegal for min and max\n");
- goto err_bad_param;
+ return -EINVAL;
}
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct tc_red_qopt_offload_params *p = params;
+ u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
+ u32 min, max;
+ u64 prob;
/* calculate probability in percentage */
prob = p->probability;
@@ -161,116 +304,309 @@ mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
prob = DIV_ROUND_UP(prob, 1 << 16);
min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min);
max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max);
- err = mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num, min,
- max, prob, p->is_ecn);
- if (err)
- goto err_config;
-
- mlxsw_sp_qdisc->type = MLXSW_SP_QDISC_RED;
- if (mlxsw_sp_qdisc->handle != handle)
- mlxsw_sp_setup_tc_qdisc_clean_stats(mlxsw_sp_port,
- mlxsw_sp_qdisc,
- tclass_num);
+ return mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num, min,
+ max, prob, p->is_ecn);
+}
- mlxsw_sp_qdisc->handle = handle;
- return 0;
+static void
+mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_red_qopt_offload_params *p = params;
+ u64 backlog;
-err_bad_param:
- err = -EINVAL;
-err_config:
- mlxsw_sp_qdisc_red_destroy(mlxsw_sp_port, mlxsw_sp_qdisc->handle,
- mlxsw_sp_qdisc, tclass_num);
- return err;
+ backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ mlxsw_sp_qdisc->stats_base.backlog);
+ p->qstats->backlog -= backlog;
}
static int
-mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- int tclass_num, struct red_stats *res)
+ void *xstats_ptr)
{
- struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base;
+ struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base.red;
+ u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_port_xstats *xstats;
-
- if (mlxsw_sp_qdisc->handle != handle ||
- mlxsw_sp_qdisc->type != MLXSW_SP_QDISC_RED)
- return -EOPNOTSUPP;
+ struct red_stats *res = xstats_ptr;
+ int early_drops, marks, pdrops;
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
- res->prob_drop = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
- res->prob_mark = xstats->ecn - xstats_base->prob_mark;
- res->pdrop = xstats->tail_drop[tclass_num] - xstats_base->pdrop;
+ early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
+ marks = xstats->ecn - xstats_base->prob_mark;
+ pdrops = xstats->tail_drop[tclass_num] - xstats_base->pdrop;
+
+ res->pdrop += pdrops;
+ res->prob_drop += early_drops;
+ res->prob_mark += marks;
+
+ xstats_base->pdrop += pdrops;
+ xstats_base->prob_drop += early_drops;
+ xstats_base->prob_mark += marks;
return 0;
}
static int
-mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- int tclass_num,
- struct tc_red_qopt_offload_stats *res)
+ struct tc_qopt_offload_stats *stats_ptr)
{
- u64 tx_bytes, tx_packets, overlimits, drops;
+ u64 tx_bytes, tx_packets, overlimits, drops, backlog;
+ u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
+ struct mlxsw_sp_qdisc_stats *stats_base;
struct mlxsw_sp_port_xstats *xstats;
struct rtnl_link_stats64 *stats;
- if (mlxsw_sp_qdisc->handle != handle ||
- mlxsw_sp_qdisc->type != MLXSW_SP_QDISC_RED)
- return -EOPNOTSUPP;
-
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
stats = &mlxsw_sp_port->periodic_hw_stats.stats;
+ stats_base = &mlxsw_sp_qdisc->stats_base;
- tx_bytes = stats->tx_bytes - mlxsw_sp_qdisc->tx_bytes;
- tx_packets = stats->tx_packets - mlxsw_sp_qdisc->tx_packets;
+ tx_bytes = stats->tx_bytes - stats_base->tx_bytes;
+ tx_packets = stats->tx_packets - stats_base->tx_packets;
overlimits = xstats->wred_drop[tclass_num] + xstats->ecn -
- mlxsw_sp_qdisc->overlimits;
+ stats_base->overlimits;
drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] -
- mlxsw_sp_qdisc->drops;
-
- _bstats_update(res->bstats, tx_bytes, tx_packets);
- res->qstats->overlimits += overlimits;
- res->qstats->drops += drops;
- res->qstats->backlog += mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
- xstats->backlog[tclass_num]);
-
- mlxsw_sp_qdisc->drops += drops;
- mlxsw_sp_qdisc->overlimits += overlimits;
- mlxsw_sp_qdisc->tx_bytes += tx_bytes;
- mlxsw_sp_qdisc->tx_packets += tx_packets;
+ stats_base->drops;
+ backlog = xstats->backlog[tclass_num];
+
+ _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
+ stats_ptr->qstats->overlimits += overlimits;
+ stats_ptr->qstats->drops += drops;
+ stats_ptr->qstats->backlog +=
+ mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ backlog) -
+ mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ stats_base->backlog);
+
+ stats_base->backlog = backlog;
+ stats_base->drops += drops;
+ stats_base->overlimits += overlimits;
+ stats_base->tx_bytes += tx_bytes;
+ stats_base->tx_packets += tx_packets;
return 0;
}
#define MLXSW_SP_PORT_DEFAULT_TCLASS 0
+static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
+ .type = MLXSW_SP_QDISC_RED,
+ .check_params = mlxsw_sp_qdisc_red_check_params,
+ .replace = mlxsw_sp_qdisc_red_replace,
+ .unoffload = mlxsw_sp_qdisc_red_unoffload,
+ .destroy = mlxsw_sp_qdisc_red_destroy,
+ .get_stats = mlxsw_sp_qdisc_get_red_stats,
+ .get_xstats = mlxsw_sp_qdisc_get_red_xstats,
+ .clean_stats = mlxsw_sp_setup_tc_qdisc_red_clean_stats,
+};
+
int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_red_qopt_offload *p)
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
- int tclass_num;
if (p->parent != TC_H_ROOT)
return -EOPNOTSUPP;
- mlxsw_sp_qdisc = &mlxsw_sp_port->root_qdisc;
- tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
+ mlxsw_sp_qdisc = mlxsw_sp_port->root_qdisc;
+
+ if (p->command == TC_RED_REPLACE)
+ return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
+ mlxsw_sp_qdisc,
+ &mlxsw_sp_qdisc_ops_red,
+ &p->set);
+
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
+ MLXSW_SP_QDISC_RED))
+ return -EOPNOTSUPP;
switch (p->command) {
- case TC_RED_REPLACE:
- return mlxsw_sp_qdisc_red_replace(mlxsw_sp_port, p->handle,
- mlxsw_sp_qdisc, tclass_num,
- &p->set);
case TC_RED_DESTROY:
- return mlxsw_sp_qdisc_red_destroy(mlxsw_sp_port, p->handle,
- mlxsw_sp_qdisc, tclass_num);
+ return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
case TC_RED_XSTATS:
- return mlxsw_sp_qdisc_get_red_xstats(mlxsw_sp_port, p->handle,
- mlxsw_sp_qdisc, tclass_num,
- p->xstats);
+ return mlxsw_sp_qdisc_get_xstats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ p->xstats);
case TC_RED_STATS:
- return mlxsw_sp_qdisc_get_red_stats(mlxsw_sp_port, p->handle,
- mlxsw_sp_qdisc, tclass_num,
- &p->stats);
+ return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ &p->stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ int i;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
+ MLXSW_SP_PORT_DEFAULT_TCLASS);
+
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_prio_qopt_offload_params *p = params;
+
+ if (p->bands > IEEE_8021QAZ_MAX_TCS)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_prio_qopt_offload_params *p = params;
+ int tclass, i;
+ int err;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(p->priomap[i]);
+ err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, tclass);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+mlxsw_sp_qdisc_prio_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_prio_qopt_offload_params *p = params;
+ u64 backlog;
+
+ backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ mlxsw_sp_qdisc->stats_base.backlog);
+ p->qstats->backlog -= backlog;
+}
+
+static int
+mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_qopt_offload_stats *stats_ptr)
+{
+ u64 tx_bytes, tx_packets, drops = 0, backlog = 0;
+ struct mlxsw_sp_qdisc_stats *stats_base;
+ struct mlxsw_sp_port_xstats *xstats;
+ struct rtnl_link_stats64 *stats;
+ int i;
+
+ xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
+ stats = &mlxsw_sp_port->periodic_hw_stats.stats;
+ stats_base = &mlxsw_sp_qdisc->stats_base;
+
+ tx_bytes = stats->tx_bytes - stats_base->tx_bytes;
+ tx_packets = stats->tx_packets - stats_base->tx_packets;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ drops += xstats->tail_drop[i];
+ backlog += xstats->backlog[i];
+ }
+ drops = drops - stats_base->drops;
+
+ _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
+ stats_ptr->qstats->drops += drops;
+ stats_ptr->qstats->backlog +=
+ mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ backlog) -
+ mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ stats_base->backlog);
+ stats_base->backlog = backlog;
+ stats_base->drops += drops;
+ stats_base->tx_bytes += tx_bytes;
+ stats_base->tx_packets += tx_packets;
+ return 0;
+}
+
+static void
+mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ struct mlxsw_sp_qdisc_stats *stats_base;
+ struct mlxsw_sp_port_xstats *xstats;
+ struct rtnl_link_stats64 *stats;
+ int i;
+
+ xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
+ stats = &mlxsw_sp_port->periodic_hw_stats.stats;
+ stats_base = &mlxsw_sp_qdisc->stats_base;
+
+ stats_base->tx_packets = stats->tx_packets;
+ stats_base->tx_bytes = stats->tx_bytes;
+
+ stats_base->drops = 0;
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ stats_base->drops += xstats->tail_drop[i];
+
+ mlxsw_sp_qdisc->stats_base.backlog = 0;
+}
+
+static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
+ .type = MLXSW_SP_QDISC_PRIO,
+ .check_params = mlxsw_sp_qdisc_prio_check_params,
+ .replace = mlxsw_sp_qdisc_prio_replace,
+ .unoffload = mlxsw_sp_qdisc_prio_unoffload,
+ .destroy = mlxsw_sp_qdisc_prio_destroy,
+ .get_stats = mlxsw_sp_qdisc_get_prio_stats,
+ .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
+};
+
+int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_prio_qopt_offload *p)
+{
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
+
+ if (p->parent != TC_H_ROOT)
+ return -EOPNOTSUPP;
+
+ mlxsw_sp_qdisc = mlxsw_sp_port->root_qdisc;
+ if (p->command == TC_PRIO_REPLACE)
+ return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
+ mlxsw_sp_qdisc,
+ &mlxsw_sp_qdisc_ops_prio,
+ &p->replace_params);
+
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
+ MLXSW_SP_QDISC_PRIO))
+ return -EOPNOTSUPP;
+
+ switch (p->command) {
+ case TC_PRIO_DESTROY:
+ return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+ case TC_PRIO_STATS:
+ return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ &p->stats);
default:
return -EOPNOTSUPP;
}
}
+
+int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_port->root_qdisc = kzalloc(sizeof(*mlxsw_sp_port->root_qdisc),
+ GFP_KERNEL);
+ if (!mlxsw_sp_port->root_qdisc)
+ return -ENOMEM;
+
+ mlxsw_sp_port->root_qdisc->tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
+
+ return 0;
+}
+
+void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ kfree(mlxsw_sp_port->root_qdisc);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 434b3922b34f..31891ae11c9b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -821,13 +821,18 @@ static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
int err;
- err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
- if (err)
- return err;
fib->lpm_tree = new_tree;
mlxsw_sp_lpm_tree_hold(new_tree);
+ err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
+ if (err)
+ goto err_tree_bind;
mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
return 0;
+
+err_tree_bind:
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
+ fib->lpm_tree = old_tree;
+ return err;
}
static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
@@ -868,11 +873,14 @@ err_tree_replace:
return err;
no_replace:
- err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
- if (err)
- return err;
fib->lpm_tree = new_tree;
mlxsw_sp_lpm_tree_hold(new_tree);
+ err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
+ if (err) {
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
+ fib->lpm_tree = NULL;
+ return err;
+ }
return 0;
}
@@ -2628,7 +2636,8 @@ struct mlxsw_sp_nexthop_group_cmp_arg {
static bool
mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
- const struct in6_addr *gw, int ifindex)
+ const struct in6_addr *gw, int ifindex,
+ int weight)
{
int i;
@@ -2636,7 +2645,7 @@ mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
const struct mlxsw_sp_nexthop *nh;
nh = &nh_grp->nexthops[i];
- if (nh->ifindex == ifindex &&
+ if (nh->ifindex == ifindex && nh->nh_weight == weight &&
ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
return true;
}
@@ -2655,11 +2664,13 @@ mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
struct in6_addr *gw;
- int ifindex;
+ int ifindex, weight;
ifindex = mlxsw_sp_rt6->rt->dst.dev->ifindex;
+ weight = mlxsw_sp_rt6->rt->rt6i_nh_weight;
gw = &mlxsw_sp_rt6->rt->rt6i_gateway;
- if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex))
+ if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
+ weight))
return false;
}
@@ -4762,7 +4773,7 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev = rt->dst.dev;
nh->nh_grp = nh_grp;
- nh->nh_weight = 1;
+ nh->nh_weight = rt->rt6i_nh_weight;
memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr));
mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
@@ -7008,6 +7019,24 @@ static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
}
#endif
+static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
+{
+ char rdpm_pl[MLXSW_REG_RDPM_LEN];
+ unsigned int i;
+
+ MLXSW_REG_ZERO(rdpm, rdpm_pl);
+
+ /* HW is determining switch priority based on DSCP-bits, but the
+ * kernel is still doing that based on the ToS. Since there's a
+ * mismatch in bits we need to make sure to translate the right
+ * value ToS would observe, skipping the 2 least-significant ECN bits.
+ */
+ for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
+ mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
+}
+
static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
{
char rgcr_pl[MLXSW_REG_RGCR_LEN];
@@ -7020,6 +7049,7 @@ static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
+ mlxsw_reg_rgcr_usp_set(rgcr_pl, true);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
if (err)
return err;
@@ -7095,6 +7125,10 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_mp_hash_init;
+ err = mlxsw_sp_dscp_init(mlxsw_sp);
+ if (err)
+ goto err_dscp_init;
+
mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
mlxsw_sp_router_fib_dump_flush);
@@ -7104,6 +7138,7 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
return 0;
err_register_fib_notifier:
+err_dscp_init:
err_mp_hash_init:
unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
err_register_netevent_notifier:
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 6e5ef984398b..d5866d708dfa 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -22,6 +22,7 @@ nfp-objs := \
nfp_hwmon.o \
nfp_main.o \
nfp_net_common.o \
+ nfp_net_ctrl.o \
nfp_net_debugdump.o \
nfp_net_ethtool.o \
nfp_net_main.o \
@@ -44,6 +45,7 @@ endif
ifeq ($(CONFIG_BPF_SYSCALL),y)
nfp-objs += \
+ bpf/cmsg.o \
bpf/main.o \
bpf/offload.o \
bpf/verifier.o \
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
new file mode 100644
index 000000000000..80d3aa0fc9d3
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
@@ -0,0 +1,453 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/bpf.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/jiffies.h>
+#include <linux/skbuff.h>
+#include <linux/wait.h>
+
+#include "../nfp_app.h"
+#include "../nfp_net.h"
+#include "fw.h"
+#include "main.h"
+
+#define cmsg_warn(bpf, msg...) nn_dp_warn(&(bpf)->app->ctrl->dp, msg)
+
+#define NFP_BPF_TAG_ALLOC_SPAN (U16_MAX / 4)
+
+static bool nfp_bpf_all_tags_busy(struct nfp_app_bpf *bpf)
+{
+ u16 used_tags;
+
+ used_tags = bpf->tag_alloc_next - bpf->tag_alloc_last;
+
+ return used_tags > NFP_BPF_TAG_ALLOC_SPAN;
+}
+
+static int nfp_bpf_alloc_tag(struct nfp_app_bpf *bpf)
+{
+ /* All FW communication for BPF is request-reply. To make sure we
+ * don't reuse the message ID too early after timeout - limit the
+ * number of requests in flight.
+ */
+ if (nfp_bpf_all_tags_busy(bpf)) {
+ cmsg_warn(bpf, "all FW request contexts busy!\n");
+ return -EAGAIN;
+ }
+
+ WARN_ON(__test_and_set_bit(bpf->tag_alloc_next, bpf->tag_allocator));
+ return bpf->tag_alloc_next++;
+}
+
+static void nfp_bpf_free_tag(struct nfp_app_bpf *bpf, u16 tag)
+{
+ WARN_ON(!__test_and_clear_bit(tag, bpf->tag_allocator));
+
+ while (!test_bit(bpf->tag_alloc_last, bpf->tag_allocator) &&
+ bpf->tag_alloc_last != bpf->tag_alloc_next)
+ bpf->tag_alloc_last++;
+}
+
+static struct sk_buff *
+nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
+{
+ struct sk_buff *skb;
+
+ skb = nfp_app_ctrl_msg_alloc(bpf->app, size, GFP_KERNEL);
+ skb_put(skb, size);
+
+ return skb;
+}
+
+static struct sk_buff *
+nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n)
+{
+ unsigned int size;
+
+ size = sizeof(struct cmsg_req_map_op);
+ size += sizeof(struct cmsg_key_value_pair) * n;
+
+ return nfp_bpf_cmsg_alloc(bpf, size);
+}
+
+static unsigned int nfp_bpf_cmsg_get_tag(struct sk_buff *skb)
+{
+ struct cmsg_hdr *hdr;
+
+ hdr = (struct cmsg_hdr *)skb->data;
+
+ return be16_to_cpu(hdr->tag);
+}
+
+static struct sk_buff *__nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
+{
+ unsigned int msg_tag;
+ struct sk_buff *skb;
+
+ skb_queue_walk(&bpf->cmsg_replies, skb) {
+ msg_tag = nfp_bpf_cmsg_get_tag(skb);
+ if (msg_tag == tag) {
+ nfp_bpf_free_tag(bpf, tag);
+ __skb_unlink(skb, &bpf->cmsg_replies);
+ return skb;
+ }
+ }
+
+ return NULL;
+}
+
+static struct sk_buff *nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
+{
+ struct sk_buff *skb;
+
+ nfp_ctrl_lock(bpf->app->ctrl);
+ skb = __nfp_bpf_reply(bpf, tag);
+ nfp_ctrl_unlock(bpf->app->ctrl);
+
+ return skb;
+}
+
+static struct sk_buff *nfp_bpf_reply_drop_tag(struct nfp_app_bpf *bpf, u16 tag)
+{
+ struct sk_buff *skb;
+
+ nfp_ctrl_lock(bpf->app->ctrl);
+ skb = __nfp_bpf_reply(bpf, tag);
+ if (!skb)
+ nfp_bpf_free_tag(bpf, tag);
+ nfp_ctrl_unlock(bpf->app->ctrl);
+
+ return skb;
+}
+
+static struct sk_buff *
+nfp_bpf_cmsg_wait_reply(struct nfp_app_bpf *bpf, enum nfp_bpf_cmsg_type type,
+ int tag)
+{
+ struct sk_buff *skb;
+ int i, err;
+
+ for (i = 0; i < 50; i++) {
+ udelay(4);
+ skb = nfp_bpf_reply(bpf, tag);
+ if (skb)
+ return skb;
+ }
+
+ err = wait_event_interruptible_timeout(bpf->cmsg_wq,
+ skb = nfp_bpf_reply(bpf, tag),
+ msecs_to_jiffies(5000));
+ /* We didn't get a response - try last time and atomically drop
+ * the tag even if no response is matched.
+ */
+ if (!skb)
+ skb = nfp_bpf_reply_drop_tag(bpf, tag);
+ if (err < 0) {
+ cmsg_warn(bpf, "%s waiting for response to 0x%02x: %d\n",
+ err == ERESTARTSYS ? "interrupted" : "error",
+ type, err);
+ return ERR_PTR(err);
+ }
+ if (!skb) {
+ cmsg_warn(bpf, "timeout waiting for response to 0x%02x\n",
+ type);
+ return ERR_PTR(-ETIMEDOUT);
+ }
+
+ return skb;
+}
+
+static struct sk_buff *
+nfp_bpf_cmsg_communicate(struct nfp_app_bpf *bpf, struct sk_buff *skb,
+ enum nfp_bpf_cmsg_type type, unsigned int reply_size)
+{
+ struct cmsg_hdr *hdr;
+ int tag;
+
+ nfp_ctrl_lock(bpf->app->ctrl);
+ tag = nfp_bpf_alloc_tag(bpf);
+ if (tag < 0) {
+ nfp_ctrl_unlock(bpf->app->ctrl);
+ dev_kfree_skb_any(skb);
+ return ERR_PTR(tag);
+ }
+
+ hdr = (void *)skb->data;
+ hdr->ver = CMSG_MAP_ABI_VERSION;
+ hdr->type = type;
+ hdr->tag = cpu_to_be16(tag);
+
+ __nfp_app_ctrl_tx(bpf->app, skb);
+
+ nfp_ctrl_unlock(bpf->app->ctrl);
+
+ skb = nfp_bpf_cmsg_wait_reply(bpf, type, tag);
+ if (IS_ERR(skb))
+ return skb;
+
+ hdr = (struct cmsg_hdr *)skb->data;
+ /* 0 reply_size means caller will do the validation */
+ if (reply_size && skb->len != reply_size) {
+ cmsg_warn(bpf, "cmsg drop - wrong size %d != %d!\n",
+ skb->len, reply_size);
+ goto err_free;
+ }
+ if (hdr->type != __CMSG_REPLY(type)) {
+ cmsg_warn(bpf, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
+ hdr->type, __CMSG_REPLY(type));
+ goto err_free;
+ }
+
+ return skb;
+err_free:
+ dev_kfree_skb_any(skb);
+ return ERR_PTR(-EIO);
+}
+
+static int
+nfp_bpf_ctrl_rc_to_errno(struct nfp_app_bpf *bpf,
+ struct cmsg_reply_map_simple *reply)
+{
+ static const int res_table[] = {
+ [CMSG_RC_SUCCESS] = 0,
+ [CMSG_RC_ERR_MAP_FD] = -EBADFD,
+ [CMSG_RC_ERR_MAP_NOENT] = -ENOENT,
+ [CMSG_RC_ERR_MAP_ERR] = -EINVAL,
+ [CMSG_RC_ERR_MAP_PARSE] = -EIO,
+ [CMSG_RC_ERR_MAP_EXIST] = -EEXIST,
+ [CMSG_RC_ERR_MAP_NOMEM] = -ENOMEM,
+ [CMSG_RC_ERR_MAP_E2BIG] = -E2BIG,
+ };
+ u32 rc;
+
+ rc = be32_to_cpu(reply->rc);
+ if (rc >= ARRAY_SIZE(res_table)) {
+ cmsg_warn(bpf, "FW responded with invalid status: %u\n", rc);
+ return -EIO;
+ }
+
+ return res_table[rc];
+}
+
+long long int
+nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map)
+{
+ struct cmsg_reply_map_alloc_tbl *reply;
+ struct cmsg_req_map_alloc_tbl *req;
+ struct sk_buff *skb;
+ u32 tid;
+ int err;
+
+ skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
+ if (!skb)
+ return -ENOMEM;
+
+ req = (void *)skb->data;
+ req->key_size = cpu_to_be32(map->key_size);
+ req->value_size = cpu_to_be32(map->value_size);
+ req->max_entries = cpu_to_be32(map->max_entries);
+ req->map_type = cpu_to_be32(map->map_type);
+ req->map_flags = 0;
+
+ skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_ALLOC,
+ sizeof(*reply));
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ reply = (void *)skb->data;
+ err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
+ if (err)
+ goto err_free;
+
+ tid = be32_to_cpu(reply->tid);
+ dev_consume_skb_any(skb);
+
+ return tid;
+err_free:
+ dev_kfree_skb_any(skb);
+ return err;
+}
+
+void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map)
+{
+ struct cmsg_reply_map_free_tbl *reply;
+ struct cmsg_req_map_free_tbl *req;
+ struct sk_buff *skb;
+ int err;
+
+ skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
+ if (!skb) {
+ cmsg_warn(bpf, "leaking map - failed to allocate msg\n");
+ return;
+ }
+
+ req = (void *)skb->data;
+ req->tid = cpu_to_be32(nfp_map->tid);
+
+ skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_FREE,
+ sizeof(*reply));
+ if (IS_ERR(skb)) {
+ cmsg_warn(bpf, "leaking map - I/O error\n");
+ return;
+ }
+
+ reply = (void *)skb->data;
+ err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
+ if (err)
+ cmsg_warn(bpf, "leaking map - FW responded with: %d\n", err);
+
+ dev_consume_skb_any(skb);
+}
+
+static int
+nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
+ enum nfp_bpf_cmsg_type op,
+ u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value)
+{
+ struct nfp_bpf_map *nfp_map = offmap->dev_priv;
+ struct nfp_app_bpf *bpf = nfp_map->bpf;
+ struct bpf_map *map = &offmap->map;
+ struct cmsg_reply_map_op *reply;
+ struct cmsg_req_map_op *req;
+ struct sk_buff *skb;
+ int err;
+
+ /* FW messages have no space for more than 32 bits of flags */
+ if (flags >> 32)
+ return -EOPNOTSUPP;
+
+ skb = nfp_bpf_cmsg_map_req_alloc(bpf, 1);
+ if (!skb)
+ return -ENOMEM;
+
+ req = (void *)skb->data;
+ req->tid = cpu_to_be32(nfp_map->tid);
+ req->count = cpu_to_be32(1);
+ req->flags = cpu_to_be32(flags);
+
+ /* Copy inputs */
+ if (key)
+ memcpy(&req->elem[0].key, key, map->key_size);
+ if (value)
+ memcpy(&req->elem[0].value, value, map->value_size);
+
+ skb = nfp_bpf_cmsg_communicate(bpf, skb, op,
+ sizeof(*reply) + sizeof(*reply->elem));
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ reply = (void *)skb->data;
+ err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
+ if (err)
+ goto err_free;
+
+ /* Copy outputs */
+ if (out_key)
+ memcpy(out_key, &reply->elem[0].key, map->key_size);
+ if (out_value)
+ memcpy(out_value, &reply->elem[0].value, map->value_size);
+
+ dev_consume_skb_any(skb);
+
+ return 0;
+err_free:
+ dev_kfree_skb_any(skb);
+ return err;
+}
+
+int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
+ void *key, void *value, u64 flags)
+{
+ return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_UPDATE,
+ key, value, flags, NULL, NULL);
+}
+
+int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key)
+{
+ return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_DELETE,
+ key, NULL, 0, NULL, NULL);
+}
+
+int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
+ void *key, void *value)
+{
+ return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_LOOKUP,
+ key, NULL, 0, NULL, value);
+}
+
+int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
+ void *next_key)
+{
+ return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETFIRST,
+ NULL, NULL, 0, next_key, NULL);
+}
+
+int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
+ void *key, void *next_key)
+{
+ return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETNEXT,
+ key, NULL, 0, next_key, NULL);
+}
+
+void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
+{
+ struct nfp_app_bpf *bpf = app->priv;
+ unsigned int tag;
+
+ if (unlikely(skb->len < sizeof(struct cmsg_reply_map_simple))) {
+ cmsg_warn(bpf, "cmsg drop - too short %d!\n", skb->len);
+ goto err_free;
+ }
+
+ nfp_ctrl_lock(bpf->app->ctrl);
+
+ tag = nfp_bpf_cmsg_get_tag(skb);
+ if (unlikely(!test_bit(tag, bpf->tag_allocator))) {
+ cmsg_warn(bpf, "cmsg drop - no one is waiting for tag %u!\n",
+ tag);
+ goto err_unlock;
+ }
+
+ __skb_queue_tail(&bpf->cmsg_replies, skb);
+ wake_up_interruptible_all(&bpf->cmsg_wq);
+
+ nfp_ctrl_unlock(bpf->app->ctrl);
+
+ return;
+err_unlock:
+ nfp_ctrl_unlock(bpf->app->ctrl);
+err_free:
+ dev_kfree_skb_any(skb);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/fw.h b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
index 7206aa1522db..cfcc7bcb2c67 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/fw.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
@@ -38,7 +38,14 @@
#include <linux/types.h>
enum bpf_cap_tlv_type {
+ NFP_BPF_CAP_TYPE_FUNC = 1,
NFP_BPF_CAP_TYPE_ADJUST_HEAD = 2,
+ NFP_BPF_CAP_TYPE_MAPS = 3,
+};
+
+struct nfp_bpf_cap_tlv_func {
+ __le32 func_id;
+ __le32 func_addr;
};
struct nfp_bpf_cap_tlv_adjust_head {
@@ -51,4 +58,100 @@ struct nfp_bpf_cap_tlv_adjust_head {
#define NFP_BPF_ADJUST_HEAD_NO_META BIT(0)
+struct nfp_bpf_cap_tlv_maps {
+ __le32 types;
+ __le32 max_maps;
+ __le32 max_elems;
+ __le32 max_key_sz;
+ __le32 max_val_sz;
+ __le32 max_elem_sz;
+};
+
+/*
+ * Types defined for map related control messages
+ */
+#define CMSG_MAP_ABI_VERSION 1
+
+enum nfp_bpf_cmsg_type {
+ CMSG_TYPE_MAP_ALLOC = 1,
+ CMSG_TYPE_MAP_FREE = 2,
+ CMSG_TYPE_MAP_LOOKUP = 3,
+ CMSG_TYPE_MAP_UPDATE = 4,
+ CMSG_TYPE_MAP_DELETE = 5,
+ CMSG_TYPE_MAP_GETNEXT = 6,
+ CMSG_TYPE_MAP_GETFIRST = 7,
+ __CMSG_TYPE_MAP_MAX,
+};
+
+#define CMSG_TYPE_MAP_REPLY_BIT 7
+#define __CMSG_REPLY(req) (BIT(CMSG_TYPE_MAP_REPLY_BIT) | (req))
+
+#define CMSG_MAP_KEY_LW 16
+#define CMSG_MAP_VALUE_LW 16
+
+enum nfp_bpf_cmsg_status {
+ CMSG_RC_SUCCESS = 0,
+ CMSG_RC_ERR_MAP_FD = 1,
+ CMSG_RC_ERR_MAP_NOENT = 2,
+ CMSG_RC_ERR_MAP_ERR = 3,
+ CMSG_RC_ERR_MAP_PARSE = 4,
+ CMSG_RC_ERR_MAP_EXIST = 5,
+ CMSG_RC_ERR_MAP_NOMEM = 6,
+ CMSG_RC_ERR_MAP_E2BIG = 7,
+};
+
+struct cmsg_hdr {
+ u8 type;
+ u8 ver;
+ __be16 tag;
+};
+
+struct cmsg_reply_map_simple {
+ struct cmsg_hdr hdr;
+ __be32 rc;
+};
+
+struct cmsg_req_map_alloc_tbl {
+ struct cmsg_hdr hdr;
+ __be32 key_size; /* in bytes */
+ __be32 value_size; /* in bytes */
+ __be32 max_entries;
+ __be32 map_type;
+ __be32 map_flags; /* reserved */
+};
+
+struct cmsg_reply_map_alloc_tbl {
+ struct cmsg_reply_map_simple reply_hdr;
+ __be32 tid;
+};
+
+struct cmsg_req_map_free_tbl {
+ struct cmsg_hdr hdr;
+ __be32 tid;
+};
+
+struct cmsg_reply_map_free_tbl {
+ struct cmsg_reply_map_simple reply_hdr;
+ __be32 count;
+};
+
+struct cmsg_key_value_pair {
+ __be32 key[CMSG_MAP_KEY_LW];
+ __be32 value[CMSG_MAP_VALUE_LW];
+};
+
+struct cmsg_req_map_op {
+ struct cmsg_hdr hdr;
+ __be32 tid;
+ __be32 count;
+ __be32 flags;
+ struct cmsg_key_value_pair elem[0];
+};
+
+struct cmsg_reply_map_op {
+ struct cmsg_reply_map_simple reply_hdr;
+ __be32 count;
+ __be32 resv;
+ struct cmsg_key_value_pair elem[0];
+};
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 0de59f04da84..56451edf01c2 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -85,7 +85,7 @@ static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn)
static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog)
{
- return nfp_prog->start_off + nfp_prog->prog_len;
+ return nfp_prog->prog_len;
}
static bool
@@ -100,12 +100,6 @@ nfp_prog_confirm_current_offset(struct nfp_prog *nfp_prog, unsigned int off)
return !WARN_ON_ONCE(nfp_prog_current_offset(nfp_prog) != off);
}
-static unsigned int
-nfp_prog_offset_to_index(struct nfp_prog *nfp_prog, unsigned int offset)
-{
- return offset - nfp_prog->start_off;
-}
-
/* --- Emitters --- */
static void
__emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
@@ -195,22 +189,28 @@ __emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_ev_pip ev_pip,
nfp_prog_push(nfp_prog, insn);
}
-static void emit_br_def(struct nfp_prog *nfp_prog, u16 addr, u8 defer)
+static void
+emit_br_relo(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer,
+ enum nfp_relo_type relo)
{
- if (defer > 2) {
+ if (mask == BR_UNC && defer > 2) {
pr_err("BUG: branch defer out of bounds %d\n", defer);
nfp_prog->error = -EFAULT;
return;
}
- __emit_br(nfp_prog, BR_UNC, BR_EV_PIP_UNCOND, BR_CSS_NONE, addr, defer);
+
+ __emit_br(nfp_prog, mask,
+ mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND,
+ BR_CSS_NONE, addr, defer);
+
+ nfp_prog->prog[nfp_prog->prog_len - 1] |=
+ FIELD_PREP(OP_RELO_TYPE, relo);
}
static void
emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer)
{
- __emit_br(nfp_prog, mask,
- mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND,
- BR_CSS_NONE, addr, defer);
+ emit_br_relo(nfp_prog, mask, addr, defer, RELO_BR_REL);
}
static void
@@ -483,6 +483,21 @@ static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm)
}
}
+static void
+wrp_immed_relo(struct nfp_prog *nfp_prog, swreg dst, u32 imm,
+ enum nfp_relo_type relo)
+{
+ if (imm > 0xffff) {
+ pr_err("relocation of a large immediate!\n");
+ nfp_prog->error = -EFAULT;
+ return;
+ }
+ emit_immed(nfp_prog, dst, imm, IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B);
+
+ nfp_prog->prog[nfp_prog->prog_len - 1] |=
+ FIELD_PREP(OP_RELO_TYPE, relo);
+}
+
/* ur_load_imm_any() - encode immediate or use tmp register (unrestricted)
* If the @imm is small enough encode it directly in operand and return
* otherwise load @imm to a spare register and return its encoding.
@@ -515,16 +530,6 @@ static void wrp_nops(struct nfp_prog *nfp_prog, unsigned int count)
emit_nop(nfp_prog);
}
-static void
-wrp_br_special(struct nfp_prog *nfp_prog, enum br_mask mask,
- enum br_special special)
-{
- emit_br(nfp_prog, mask, 0, 0);
-
- nfp_prog->prog[nfp_prog->prog_len - 1] |=
- FIELD_PREP(OP_BR_SPECIAL, special);
-}
-
static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src)
{
emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src);
@@ -548,27 +553,51 @@ wrp_reg_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, u8 field_len,
emit_ld_field_any(nfp_prog, dst, mask, src, sc, offset * 8, true);
}
+static void
+addr40_offset(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
+ swreg *rega, swreg *regb)
+{
+ if (offset == reg_imm(0)) {
+ *rega = reg_a(src_gpr);
+ *regb = reg_b(src_gpr + 1);
+ return;
+ }
+
+ emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(src_gpr), ALU_OP_ADD, offset);
+ emit_alu(nfp_prog, imm_b(nfp_prog), reg_b(src_gpr + 1), ALU_OP_ADD_C,
+ reg_imm(0));
+ *rega = imm_a(nfp_prog);
+ *regb = imm_b(nfp_prog);
+}
+
/* NFP has Command Push Pull bus which supports bluk memory operations. */
static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
bool descending_seq = meta->ldst_gather_len < 0;
s16 len = abs(meta->ldst_gather_len);
swreg src_base, off;
+ bool src_40bit_addr;
unsigned int i;
u8 xfer_num;
off = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
+ src_40bit_addr = meta->ptr.type == PTR_TO_MAP_VALUE;
src_base = reg_a(meta->insn.src_reg * 2);
xfer_num = round_up(len, 4) / 4;
+ if (src_40bit_addr)
+ addr40_offset(nfp_prog, meta->insn.src_reg, off, &src_base,
+ &off);
+
/* Setup PREV_ALU fields to override memory read length. */
if (len > 32)
wrp_immed(nfp_prog, reg_none(),
CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1));
/* Memory read from source addr into transfer-in registers. */
- emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0, src_base,
- off, xfer_num - 1, true, len > 32);
+ emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP,
+ src_40bit_addr ? CMD_MODE_40b_BA : CMD_MODE_32b, 0,
+ src_base, off, xfer_num - 1, true, len > 32);
/* Move from transfer-in to transfer-out. */
for (i = 0; i < xfer_num; i++)
@@ -706,20 +735,20 @@ data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size)
}
static int
-data_ld_host_order(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
- u8 dst_gpr, int size)
+data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr,
+ swreg lreg, swreg rreg, int size, enum cmd_mode mode)
{
unsigned int i;
u8 mask, sz;
- /* We load the value from the address indicated in @offset and then
+ /* We load the value from the address indicated in rreg + lreg and then
* mask out the data we don't need. Note: this is little endian!
*/
sz = max(size, 4);
mask = size < 4 ? GENMASK(size - 1, 0) : 0;
- emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0,
- reg_a(src_gpr), offset, sz / 4 - 1, true);
+ emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, mode, 0,
+ lreg, rreg, sz / 4 - 1, true);
i = 0;
if (mask)
@@ -736,6 +765,26 @@ data_ld_host_order(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
}
static int
+data_ld_host_order_addr32(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
+ u8 dst_gpr, u8 size)
+{
+ return data_ld_host_order(nfp_prog, dst_gpr, reg_a(src_gpr), offset,
+ size, CMD_MODE_32b);
+}
+
+static int
+data_ld_host_order_addr40(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
+ u8 dst_gpr, u8 size)
+{
+ swreg rega, regb;
+
+ addr40_offset(nfp_prog, src_gpr, offset, &rega, &regb);
+
+ return data_ld_host_order(nfp_prog, dst_gpr, rega, regb,
+ size, CMD_MODE_40b_BA);
+}
+
+static int
construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size)
{
swreg tmp_reg;
@@ -749,7 +798,7 @@ construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size)
imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size));
emit_alu(nfp_prog, reg_none(),
plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog));
- wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
+ emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT);
/* Load data */
return data_ld(nfp_prog, imm_b(nfp_prog), 0, size);
@@ -762,7 +811,7 @@ static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size)
/* Check packet length */
tmp_reg = ur_load_imm_any(nfp_prog, offset + size, imm_a(nfp_prog));
emit_alu(nfp_prog, reg_none(), plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg);
- wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
+ emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT);
/* Load data */
tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
@@ -1269,7 +1318,7 @@ static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
emit_ld_field(nfp_prog, pptr_reg(nfp_prog), 0x3, tmp, SHF_SC_NONE, 0);
/* Skip over the -EINVAL ret code (defer 2) */
- emit_br_def(nfp_prog, end, 2);
+ emit_br(nfp_prog, BR_UNC, end, 2);
emit_alu(nfp_prog, plen_reg(nfp_prog),
plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
@@ -1289,6 +1338,56 @@ static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return 0;
}
+static int
+map_lookup_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ struct bpf_offloaded_map *offmap;
+ struct nfp_bpf_map *nfp_map;
+ bool load_lm_ptr;
+ u32 ret_tgt;
+ s64 lm_off;
+ swreg tid;
+
+ offmap = (struct bpf_offloaded_map *)meta->arg1.map_ptr;
+ nfp_map = offmap->dev_priv;
+
+ /* We only have to reload LM0 if the key is not at start of stack */
+ lm_off = nfp_prog->stack_depth;
+ lm_off += meta->arg2.var_off.value + meta->arg2.off;
+ load_lm_ptr = meta->arg2_var_off || lm_off;
+
+ /* Set LM0 to start of key */
+ if (load_lm_ptr)
+ emit_csr_wr(nfp_prog, reg_b(2 * 2), NFP_CSR_ACT_LM_ADDR0);
+
+ /* Load map ID into a register, it should actually fit as an immediate
+ * but in case it doesn't deal with it here, not in the delay slots.
+ */
+ tid = ur_load_imm_any(nfp_prog, nfp_map->tid, imm_a(nfp_prog));
+
+ emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + BPF_FUNC_map_lookup_elem,
+ 2, RELO_BR_HELPER);
+ ret_tgt = nfp_prog_current_offset(nfp_prog) + 2;
+
+ /* Load map ID into A0 */
+ wrp_mov(nfp_prog, reg_a(0), tid);
+
+ /* Load the return address into B0 */
+ wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL);
+
+ if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt))
+ return -EINVAL;
+
+ /* Reset the LM0 pointer */
+ if (!load_lm_ptr)
+ return 0;
+
+ emit_csr_wr(nfp_prog, stack_reg(nfp_prog), NFP_CSR_ACT_LM_ADDR0);
+ wrp_nops(nfp_prog, 3);
+
+ return 0;
+}
+
/* --- Callbacks --- */
static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
@@ -1723,8 +1822,20 @@ mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
- return data_ld_host_order(nfp_prog, meta->insn.src_reg * 2, tmp_reg,
- meta->insn.dst_reg * 2, size);
+ return data_ld_host_order_addr32(nfp_prog, meta->insn.src_reg * 2,
+ tmp_reg, meta->insn.dst_reg * 2, size);
+}
+
+static int
+mem_ldx_emem(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ unsigned int size)
+{
+ swreg tmp_reg;
+
+ tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
+
+ return data_ld_host_order_addr40(nfp_prog, meta->insn.src_reg * 2,
+ tmp_reg, meta->insn.dst_reg * 2, size);
}
static int
@@ -1748,6 +1859,9 @@ mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
return mem_ldx_stack(nfp_prog, meta, size,
meta->ptr.off + meta->ptr.var_off.value);
+ if (meta->ptr.type == PTR_TO_MAP_VALUE)
+ return mem_ldx_emem(nfp_prog, meta, size);
+
return -EOPNOTSUPP;
}
@@ -1924,6 +2038,26 @@ static int jle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true);
}
+static int jsgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_imm(nfp_prog, meta, BR_BLT, true);
+}
+
+static int jsge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_imm(nfp_prog, meta, BR_BGE, false);
+}
+
+static int jslt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_imm(nfp_prog, meta, BR_BLT, false);
+}
+
+static int jsle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_imm(nfp_prog, meta, BR_BGE, true);
+}
+
static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
@@ -2013,6 +2147,26 @@ static int jle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true);
}
+static int jsgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_reg(nfp_prog, meta, BR_BLT, true);
+}
+
+static int jsge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_reg(nfp_prog, meta, BR_BGE, false);
+}
+
+static int jslt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_reg(nfp_prog, meta, BR_BLT, false);
+}
+
+static int jsle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_reg(nfp_prog, meta, BR_BGE, true);
+}
+
static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE);
@@ -2028,6 +2182,8 @@ static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
switch (meta->insn.imm) {
case BPF_FUNC_xdp_adjust_head:
return adjust_head(nfp_prog, meta);
+ case BPF_FUNC_map_lookup_elem:
+ return map_lookup_stack(nfp_prog, meta);
default:
WARN_ONCE(1, "verifier allowed unsupported function\n");
return -EOPNOTSUPP;
@@ -2036,7 +2192,7 @@ static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- wrp_br_special(nfp_prog, BR_UNC, OP_BR_GO_OUT);
+ emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 0, RELO_BR_GO_OUT);
return 0;
}
@@ -2097,6 +2253,10 @@ static const instr_cb_t instr_cb[256] = {
[BPF_JMP | BPF_JGE | BPF_K] = jge_imm,
[BPF_JMP | BPF_JLT | BPF_K] = jlt_imm,
[BPF_JMP | BPF_JLE | BPF_K] = jle_imm,
+ [BPF_JMP | BPF_JSGT | BPF_K] = jsgt_imm,
+ [BPF_JMP | BPF_JSGE | BPF_K] = jsge_imm,
+ [BPF_JMP | BPF_JSLT | BPF_K] = jslt_imm,
+ [BPF_JMP | BPF_JSLE | BPF_K] = jsle_imm,
[BPF_JMP | BPF_JSET | BPF_K] = jset_imm,
[BPF_JMP | BPF_JNE | BPF_K] = jne_imm,
[BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg,
@@ -2104,24 +2264,16 @@ static const instr_cb_t instr_cb[256] = {
[BPF_JMP | BPF_JGE | BPF_X] = jge_reg,
[BPF_JMP | BPF_JLT | BPF_X] = jlt_reg,
[BPF_JMP | BPF_JLE | BPF_X] = jle_reg,
+ [BPF_JMP | BPF_JSGT | BPF_X] = jsgt_reg,
+ [BPF_JMP | BPF_JSGE | BPF_X] = jsge_reg,
+ [BPF_JMP | BPF_JSLT | BPF_X] = jslt_reg,
+ [BPF_JMP | BPF_JSLE | BPF_X] = jsle_reg,
[BPF_JMP | BPF_JSET | BPF_X] = jset_reg,
[BPF_JMP | BPF_JNE | BPF_X] = jne_reg,
[BPF_JMP | BPF_CALL] = call,
[BPF_JMP | BPF_EXIT] = goto_out,
};
-/* --- Misc code --- */
-static void br_set_offset(u64 *instr, u16 offset)
-{
- u16 addr_lo, addr_hi;
-
- addr_lo = offset & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO));
- addr_hi = offset != addr_lo;
- *instr &= ~(OP_BR_ADDR_HI | OP_BR_ADDR_LO);
- *instr |= FIELD_PREP(OP_BR_ADDR_HI, addr_hi);
- *instr |= FIELD_PREP(OP_BR_ADDR_LO, addr_lo);
-}
-
/* --- Assembler logic --- */
static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
{
@@ -2137,11 +2289,9 @@ static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
continue;
if (list_is_last(&meta->l, &nfp_prog->insns))
- idx = nfp_prog->last_bpf_off;
+ br_idx = nfp_prog->last_bpf_off;
else
- idx = list_next_entry(meta, l)->off - 1;
-
- br_idx = nfp_prog_offset_to_index(nfp_prog, idx);
+ br_idx = list_next_entry(meta, l)->off - 1;
if (!nfp_is_br(nfp_prog->prog[br_idx])) {
pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n",
@@ -2149,7 +2299,8 @@ static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
return -ELOOP;
}
/* Leave special branches for later */
- if (FIELD_GET(OP_BR_SPECIAL, nfp_prog->prog[br_idx]))
+ if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) !=
+ RELO_BR_REL)
continue;
if (!meta->jmp_dst) {
@@ -2164,38 +2315,13 @@ static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
return -ELOOP;
}
- for (idx = nfp_prog_offset_to_index(nfp_prog, meta->off);
- idx <= br_idx; idx++) {
+ for (idx = meta->off; idx <= br_idx; idx++) {
if (!nfp_is_br(nfp_prog->prog[idx]))
continue;
br_set_offset(&nfp_prog->prog[idx], jmp_dst->off);
}
}
- /* Fixup 'goto out's separately, they can be scattered around */
- for (br_idx = 0; br_idx < nfp_prog->prog_len; br_idx++) {
- enum br_special special;
-
- if ((nfp_prog->prog[br_idx] & OP_BR_BASE_MASK) != OP_BR_BASE)
- continue;
-
- special = FIELD_GET(OP_BR_SPECIAL, nfp_prog->prog[br_idx]);
- switch (special) {
- case OP_BR_NORMAL:
- break;
- case OP_BR_GO_OUT:
- br_set_offset(&nfp_prog->prog[br_idx],
- nfp_prog->tgt_out);
- break;
- case OP_BR_GO_ABORT:
- br_set_offset(&nfp_prog->prog[br_idx],
- nfp_prog->tgt_abort);
- break;
- }
-
- nfp_prog->prog[br_idx] &= ~OP_BR_SPECIAL;
- }
-
return 0;
}
@@ -2223,7 +2349,7 @@ static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
/* Target for aborts */
nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
- emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
+ emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16);
@@ -2250,7 +2376,7 @@ static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
emit_shf(nfp_prog, reg_b(2),
reg_imm(0xf), SHF_OP_AND, reg_b(3), SHF_SC_R_SHF, 0);
- emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
+ emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
emit_shf(nfp_prog, reg_b(2),
reg_a(2), SHF_OP_OR, reg_b(2), SHF_SC_L_SHF, 4);
@@ -2269,7 +2395,7 @@ static void nfp_outro_xdp(struct nfp_prog *nfp_prog)
/* Target for aborts */
nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
- emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
+ emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16);
@@ -2290,7 +2416,7 @@ static void nfp_outro_xdp(struct nfp_prog *nfp_prog)
emit_shf(nfp_prog, reg_b(2),
reg_imm(0xff), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0);
- emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
+ emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16);
@@ -2706,25 +2832,38 @@ static int nfp_bpf_optimize(struct nfp_prog *nfp_prog)
return 0;
}
-static int nfp_bpf_ustore_calc(struct nfp_prog *nfp_prog, __le64 *ustore)
+static int nfp_bpf_ustore_calc(u64 *prog, unsigned int len)
{
+ __le64 *ustore = (__force __le64 *)prog;
int i;
- for (i = 0; i < nfp_prog->prog_len; i++) {
+ for (i = 0; i < len; i++) {
int err;
- err = nfp_ustore_check_valid_no_ecc(nfp_prog->prog[i]);
+ err = nfp_ustore_check_valid_no_ecc(prog[i]);
if (err)
return err;
- nfp_prog->prog[i] = nfp_ustore_calc_ecc_insn(nfp_prog->prog[i]);
-
- ustore[i] = cpu_to_le64(nfp_prog->prog[i]);
+ ustore[i] = cpu_to_le64(nfp_ustore_calc_ecc_insn(prog[i]));
}
return 0;
}
+static void nfp_bpf_prog_trim(struct nfp_prog *nfp_prog)
+{
+ void *prog;
+
+ prog = kvmalloc_array(nfp_prog->prog_len, sizeof(u64), GFP_KERNEL);
+ if (!prog)
+ return;
+
+ nfp_prog->__prog_alloc_len = nfp_prog->prog_len * sizeof(u64);
+ memcpy(prog, nfp_prog->prog, nfp_prog->__prog_alloc_len);
+ kvfree(nfp_prog->prog);
+ nfp_prog->prog = prog;
+}
+
int nfp_bpf_jit(struct nfp_prog *nfp_prog)
{
int ret;
@@ -2740,5 +2879,102 @@ int nfp_bpf_jit(struct nfp_prog *nfp_prog)
return -EINVAL;
}
- return nfp_bpf_ustore_calc(nfp_prog, (__force __le64 *)nfp_prog->prog);
+ nfp_bpf_prog_trim(nfp_prog);
+
+ return ret;
+}
+
+void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt)
+{
+ struct nfp_insn_meta *meta;
+
+ /* Another pass to record jump information. */
+ list_for_each_entry(meta, &nfp_prog->insns, l) {
+ u64 code = meta->insn.code;
+
+ if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_EXIT &&
+ BPF_OP(code) != BPF_CALL) {
+ struct nfp_insn_meta *dst_meta;
+ unsigned short dst_indx;
+
+ dst_indx = meta->n + 1 + meta->insn.off;
+ dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_indx,
+ cnt);
+
+ meta->jmp_dst = dst_meta;
+ dst_meta->flags |= FLAG_INSN_IS_JUMP_DST;
+ }
+ }
+}
+
+bool nfp_bpf_supported_opcode(u8 code)
+{
+ return !!instr_cb[code];
+}
+
+void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv)
+{
+ unsigned int i;
+ u64 *prog;
+ int err;
+
+ prog = kmemdup(nfp_prog->prog, nfp_prog->prog_len * sizeof(u64),
+ GFP_KERNEL);
+ if (!prog)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < nfp_prog->prog_len; i++) {
+ enum nfp_relo_type special;
+ u32 val;
+
+ special = FIELD_GET(OP_RELO_TYPE, prog[i]);
+ switch (special) {
+ case RELO_NONE:
+ continue;
+ case RELO_BR_REL:
+ br_add_offset(&prog[i], bv->start_off);
+ break;
+ case RELO_BR_GO_OUT:
+ br_set_offset(&prog[i],
+ nfp_prog->tgt_out + bv->start_off);
+ break;
+ case RELO_BR_GO_ABORT:
+ br_set_offset(&prog[i],
+ nfp_prog->tgt_abort + bv->start_off);
+ break;
+ case RELO_BR_NEXT_PKT:
+ br_set_offset(&prog[i], bv->tgt_done);
+ break;
+ case RELO_BR_HELPER:
+ val = br_get_offset(prog[i]);
+ val -= BR_OFF_RELO;
+ switch (val) {
+ case BPF_FUNC_map_lookup_elem:
+ val = nfp_prog->bpf->helpers.map_lookup;
+ break;
+ default:
+ pr_err("relocation of unknown helper %d\n",
+ val);
+ err = -EINVAL;
+ goto err_free_prog;
+ }
+ br_set_offset(&prog[i], val);
+ break;
+ case RELO_IMMED_REL:
+ immed_add_value(&prog[i], bv->start_off);
+ break;
+ }
+
+ prog[i] &= ~OP_RELO_TYPE;
+ }
+
+ err = nfp_bpf_ustore_calc(prog, nfp_prog->prog_len);
+ if (err)
+ goto err_free_prog;
+
+ return prog;
+
+err_free_prog:
+ kfree(prog);
+ return ERR_PTR(err);
}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index 4b63167906ca..4ee11bf2aed7 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -87,16 +87,21 @@ static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn)
static int
nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
{
+ struct nfp_bpf_vnic *bv;
int err;
- nn->app_priv = kzalloc(sizeof(struct nfp_bpf_vnic), GFP_KERNEL);
- if (!nn->app_priv)
+ bv = kzalloc(sizeof(*bv), GFP_KERNEL);
+ if (!bv)
return -ENOMEM;
+ nn->app_priv = bv;
err = nfp_app_nic_vnic_alloc(app, nn, id);
if (err)
goto err_free_priv;
+ bv->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
+ bv->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
+
return 0;
err_free_priv:
kfree(nn->app_priv);
@@ -191,7 +196,27 @@ static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev,
static bool nfp_bpf_tc_busy(struct nfp_app *app, struct nfp_net *nn)
{
- return nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF;
+ struct nfp_bpf_vnic *bv = nn->app_priv;
+
+ return !!bv->tc_prog;
+}
+
+static int
+nfp_bpf_change_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ unsigned int max_mtu;
+
+ if (~nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
+ return 0;
+
+ max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
+ if (new_mtu > max_mtu) {
+ nn_info(nn, "BPF offload active, MTU over %u not supported\n",
+ max_mtu);
+ return -EBUSY;
+ }
+ return 0;
}
static int
@@ -226,6 +251,45 @@ nfp_bpf_parse_cap_adjust_head(struct nfp_app_bpf *bpf, void __iomem *value,
return 0;
}
+static int
+nfp_bpf_parse_cap_func(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
+{
+ struct nfp_bpf_cap_tlv_func __iomem *cap = value;
+
+ if (length < sizeof(*cap)) {
+ nfp_err(bpf->app->cpp, "truncated function TLV: %d\n", length);
+ return -EINVAL;
+ }
+
+ switch (readl(&cap->func_id)) {
+ case BPF_FUNC_map_lookup_elem:
+ bpf->helpers.map_lookup = readl(&cap->func_addr);
+ break;
+ }
+
+ return 0;
+}
+
+static int
+nfp_bpf_parse_cap_maps(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
+{
+ struct nfp_bpf_cap_tlv_maps __iomem *cap = value;
+
+ if (length < sizeof(*cap)) {
+ nfp_err(bpf->app->cpp, "truncated maps TLV: %d\n", length);
+ return -EINVAL;
+ }
+
+ bpf->maps.types = readl(&cap->types);
+ bpf->maps.max_maps = readl(&cap->max_maps);
+ bpf->maps.max_elems = readl(&cap->max_elems);
+ bpf->maps.max_key_sz = readl(&cap->max_key_sz);
+ bpf->maps.max_val_sz = readl(&cap->max_val_sz);
+ bpf->maps.max_elem_sz = readl(&cap->max_elem_sz);
+
+ return 0;
+}
+
static int nfp_bpf_parse_capabilities(struct nfp_app *app)
{
struct nfp_cpp *cpp = app->pf->cpp;
@@ -251,11 +315,19 @@ static int nfp_bpf_parse_capabilities(struct nfp_app *app)
goto err_release_free;
switch (type) {
+ case NFP_BPF_CAP_TYPE_FUNC:
+ if (nfp_bpf_parse_cap_func(app->priv, value, length))
+ goto err_release_free;
+ break;
case NFP_BPF_CAP_TYPE_ADJUST_HEAD:
if (nfp_bpf_parse_cap_adjust_head(app->priv, value,
length))
goto err_release_free;
break;
+ case NFP_BPF_CAP_TYPE_MAPS:
+ if (nfp_bpf_parse_cap_maps(app->priv, value, length))
+ goto err_release_free;
+ break;
default:
nfp_dbg(cpp, "unknown BPF capability: %d\n", type);
break;
@@ -288,6 +360,10 @@ static int nfp_bpf_init(struct nfp_app *app)
bpf->app = app;
app->priv = bpf;
+ skb_queue_head_init(&bpf->cmsg_replies);
+ init_waitqueue_head(&bpf->cmsg_wq);
+ INIT_LIST_HEAD(&bpf->map_list);
+
err = nfp_bpf_parse_capabilities(app);
if (err)
goto err_free_bpf;
@@ -301,26 +377,34 @@ err_free_bpf:
static void nfp_bpf_clean(struct nfp_app *app)
{
- kfree(app->priv);
+ struct nfp_app_bpf *bpf = app->priv;
+
+ WARN_ON(!skb_queue_empty(&bpf->cmsg_replies));
+ WARN_ON(!list_empty(&bpf->map_list));
+ WARN_ON(bpf->maps_in_use || bpf->map_elems_in_use);
+ kfree(bpf);
}
const struct nfp_app_type app_bpf = {
.id = NFP_APP_BPF_NIC,
.name = "ebpf",
+ .ctrl_cap_mask = 0,
+
.init = nfp_bpf_init,
.clean = nfp_bpf_clean,
+ .change_mtu = nfp_bpf_change_mtu,
+
.extra_cap = nfp_bpf_extra_cap,
.vnic_alloc = nfp_bpf_vnic_alloc,
.vnic_free = nfp_bpf_vnic_free,
+ .ctrl_msg_rx = nfp_bpf_ctrl_msg_rx,
+
.setup_tc = nfp_bpf_setup_tc,
.tc_busy = nfp_bpf_tc_busy,
+ .bpf = nfp_ndo_bpf,
.xdp_offload = nfp_bpf_xdp_offload,
-
- .bpf_verifier_prep = nfp_bpf_verifier_prep,
- .bpf_translate = nfp_bpf_translate,
- .bpf_destroy = nfp_bpf_destroy,
};
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index 89a9b6393882..c476bca15ba4 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -37,22 +37,40 @@
#include <linux/bitfield.h>
#include <linux/bpf.h>
#include <linux/bpf_verifier.h>
+#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/skbuff.h>
#include <linux/types.h>
+#include <linux/wait.h>
#include "../nfp_asm.h"
+#include "fw.h"
-/* For branch fixup logic use up-most byte of branch instruction as scratch
+/* For relocation logic use up-most byte of branch instruction as scratch
* area. Remember to clear this before sending instructions to HW!
*/
-#define OP_BR_SPECIAL 0xff00000000000000ULL
-
-enum br_special {
- OP_BR_NORMAL = 0,
- OP_BR_GO_OUT,
- OP_BR_GO_ABORT,
+#define OP_RELO_TYPE 0xff00000000000000ULL
+
+enum nfp_relo_type {
+ RELO_NONE = 0,
+ /* standard internal jumps */
+ RELO_BR_REL,
+ /* internal jumps to parts of the outro */
+ RELO_BR_GO_OUT,
+ RELO_BR_GO_ABORT,
+ /* external jumps to fixed addresses */
+ RELO_BR_NEXT_PKT,
+ RELO_BR_HELPER,
+ /* immediate relocation against load address */
+ RELO_IMMED_REL,
};
+/* To make absolute relocated branches (branches other than RELO_BR_REL)
+ * distinguishable in user space dumps from normal jumps, add a large offset
+ * to them.
+ */
+#define BR_OFF_RELO 15000
+
enum static_regs {
STATIC_REG_IMM = 21, /* Bank AB */
STATIC_REG_STACK = 22, /* Bank A */
@@ -82,16 +100,49 @@ enum pkt_vec {
* struct nfp_app_bpf - bpf app priv structure
* @app: backpointer to the app
*
+ * @tag_allocator: bitmap of control message tags in use
+ * @tag_alloc_next: next tag bit to allocate
+ * @tag_alloc_last: next tag bit to be freed
+ *
+ * @cmsg_replies: received cmsg replies waiting to be consumed
+ * @cmsg_wq: work queue for waiting for cmsg replies
+ *
+ * @map_list: list of offloaded maps
+ * @maps_in_use: number of currently offloaded maps
+ * @map_elems_in_use: number of elements allocated to offloaded maps
+ *
* @adjust_head: adjust head capability
* @flags: extra flags for adjust head
* @off_min: minimal packet offset within buffer required
* @off_max: maximum packet offset within buffer required
* @guaranteed_sub: amount of negative adjustment guaranteed possible
* @guaranteed_add: amount of positive adjustment guaranteed possible
+ *
+ * @maps: map capability
+ * @types: supported map types
+ * @max_maps: max number of maps supported
+ * @max_elems: max number of entries in each map
+ * @max_key_sz: max size of map key
+ * @max_val_sz: max size of map value
+ * @max_elem_sz: max size of map entry (key + value)
+ *
+ * @helpers: helper addressess for various calls
+ * @map_lookup: map lookup helper address
*/
struct nfp_app_bpf {
struct nfp_app *app;
+ DECLARE_BITMAP(tag_allocator, U16_MAX + 1);
+ u16 tag_alloc_next;
+ u16 tag_alloc_last;
+
+ struct sk_buff_head cmsg_replies;
+ struct wait_queue_head cmsg_wq;
+
+ struct list_head map_list;
+ unsigned int maps_in_use;
+ unsigned int map_elems_in_use;
+
struct nfp_bpf_cap_adjust_head {
u32 flags;
int off_min;
@@ -99,6 +150,33 @@ struct nfp_app_bpf {
int guaranteed_sub;
int guaranteed_add;
} adjust_head;
+
+ struct {
+ u32 types;
+ u32 max_maps;
+ u32 max_elems;
+ u32 max_key_sz;
+ u32 max_val_sz;
+ u32 max_elem_sz;
+ } maps;
+
+ struct {
+ u32 map_lookup;
+ } helpers;
+};
+
+/**
+ * struct nfp_bpf_map - private per-map data attached to BPF maps for offload
+ * @offmap: pointer to the offloaded BPF map
+ * @bpf: back pointer to bpf app private structure
+ * @tid: table id identifying map on datapath
+ * @l: link on the nfp_app_bpf->map_list list
+ */
+struct nfp_bpf_map {
+ struct bpf_offloaded_map *offmap;
+ struct nfp_app_bpf *bpf;
+ u32 tid;
+ struct list_head l;
};
struct nfp_prog;
@@ -120,9 +198,12 @@ typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
* @ptr: pointer type for memory operations
* @ldst_gather_len: memcpy length gathered from load/store sequence
* @paired_st: the paired store insn at the head of the sequence
- * @arg2: arg2 for call instructions
* @ptr_not_const: pointer is not always constant
* @jmp_dst: destination info for jump instructions
+ * @func_id: function id for call instructions
+ * @arg1: arg1 for call instructions
+ * @arg2: arg2 for call instructions
+ * @arg2_var_off: arg2 changes stack offset on different paths
* @off: index of first generated machine instruction (in nfp_prog.prog)
* @n: eBPF instruction number
* @flags: eBPF instruction extra optimization flags
@@ -140,7 +221,12 @@ struct nfp_insn_meta {
bool ptr_not_const;
};
struct nfp_insn_meta *jmp_dst;
- struct bpf_reg_state arg2;
+ struct {
+ u32 func_id;
+ struct bpf_reg_state arg1;
+ struct bpf_reg_state arg2;
+ bool arg2_var_off;
+ };
};
unsigned int off;
unsigned short n;
@@ -191,11 +277,9 @@ static inline bool is_mbpf_store(const struct nfp_insn_meta *meta)
* @__prog_alloc_len: alloc size of @prog array
* @verifier_meta: temporary storage for verifier's insn meta
* @type: BPF program type
- * @start_off: address of the first instruction in the memory
* @last_bpf_off: address of the last instruction translated from BPF
* @tgt_out: jump target for normal exit
* @tgt_abort: jump target for abort (e.g. access outside of packet buffer)
- * @tgt_done: jump target to get the next packet
* @n_translated: number of successfully translated instructions (for errors)
* @error: error code if something went wrong
* @stack_depth: max stack depth from the verifier
@@ -213,11 +297,9 @@ struct nfp_prog {
enum bpf_prog_type type;
- unsigned int start_off;
unsigned int last_bpf_off;
unsigned int tgt_out;
unsigned int tgt_abort;
- unsigned int tgt_done;
unsigned int n_translated;
int error;
@@ -231,12 +313,18 @@ struct nfp_prog {
/**
* struct nfp_bpf_vnic - per-vNIC BPF priv structure
* @tc_prog: currently loaded cls_bpf program
+ * @start_off: address of the first instruction in the memory
+ * @tgt_done: jump target to get the next packet
*/
struct nfp_bpf_vnic {
struct bpf_prog *tc_prog;
+ unsigned int start_off;
+ unsigned int tgt_done;
};
+void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt);
int nfp_bpf_jit(struct nfp_prog *prog);
+bool nfp_bpf_supported_opcode(u8 code);
extern const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops;
@@ -244,16 +332,30 @@ struct netdev_bpf;
struct nfp_app;
struct nfp_net;
+int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn,
+ struct netdev_bpf *bpf);
int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
bool old_prog);
-int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
- struct netdev_bpf *bpf);
-int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog);
-int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog);
struct nfp_insn_meta *
nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
unsigned int insn_idx, unsigned int n_insns);
+
+void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv);
+
+long long int
+nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map);
+void
+nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map);
+int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
+ void *next_key);
+int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
+ void *key, void *value, u64 flags);
+int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key);
+int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
+ void *key, void *value);
+int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
+ void *key, void *next_key);
+
+void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index fa2905e67b07..1a357aacc444 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -36,12 +36,16 @@
* Netronome network device driver: TC offload functions for PF and VF
*/
+#define pr_fmt(fmt) "NFP net bpf: " fmt
+
+#include <linux/bpf.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <linux/list.h>
+#include <linux/mm.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
@@ -70,23 +74,7 @@ nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
list_add_tail(&meta->l, &nfp_prog->insns);
}
- /* Another pass to record jump information. */
- list_for_each_entry(meta, &nfp_prog->insns, l) {
- u64 code = meta->insn.code;
-
- if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_EXIT &&
- BPF_OP(code) != BPF_CALL) {
- struct nfp_insn_meta *dst_meta;
- unsigned short dst_indx;
-
- dst_indx = meta->n + 1 + meta->insn.off;
- dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_indx,
- cnt);
-
- meta->jmp_dst = dst_meta;
- dst_meta->flags |= FLAG_INSN_IS_JUMP_DST;
- }
- }
+ nfp_bpf_jit_prepare(nfp_prog, cnt);
return 0;
}
@@ -102,8 +90,9 @@ static void nfp_prog_free(struct nfp_prog *nfp_prog)
kfree(nfp_prog);
}
-int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
- struct netdev_bpf *bpf)
+static int
+nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
+ struct netdev_bpf *bpf)
{
struct bpf_prog *prog = bpf->verifier.prog;
struct nfp_prog *nfp_prog;
@@ -133,12 +122,12 @@ err_free:
return ret;
}
-int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog)
+static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog)
{
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
unsigned int stack_size;
unsigned int max_instr;
+ int err;
stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
if (prog->aux->stack_depth > stack_size) {
@@ -146,37 +135,158 @@ int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
prog->aux->stack_depth, stack_size);
return -EOPNOTSUPP;
}
-
- nfp_prog->stack_depth = prog->aux->stack_depth;
- nfp_prog->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
- nfp_prog->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
+ nfp_prog->stack_depth = round_up(prog->aux->stack_depth, 4);
max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
nfp_prog->__prog_alloc_len = max_instr * sizeof(u64);
- nfp_prog->prog = kmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL);
+ nfp_prog->prog = kvmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL);
if (!nfp_prog->prog)
return -ENOMEM;
- return nfp_bpf_jit(nfp_prog);
+ err = nfp_bpf_jit(nfp_prog);
+ if (err)
+ return err;
+
+ prog->aux->offload->jited_len = nfp_prog->prog_len * sizeof(u64);
+ prog->aux->offload->jited_image = nfp_prog->prog;
+
+ return 0;
}
-int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog)
+static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog)
{
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
- kfree(nfp_prog->prog);
+ kvfree(nfp_prog->prog);
nfp_prog_free(nfp_prog);
return 0;
}
+static int
+nfp_bpf_map_get_next_key(struct bpf_offloaded_map *offmap,
+ void *key, void *next_key)
+{
+ if (!key)
+ return nfp_bpf_ctrl_getfirst_entry(offmap, next_key);
+ return nfp_bpf_ctrl_getnext_entry(offmap, key, next_key);
+}
+
+static int
+nfp_bpf_map_delete_elem(struct bpf_offloaded_map *offmap, void *key)
+{
+ if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY)
+ return -EINVAL;
+ return nfp_bpf_ctrl_del_entry(offmap, key);
+}
+
+static const struct bpf_map_dev_ops nfp_bpf_map_ops = {
+ .map_get_next_key = nfp_bpf_map_get_next_key,
+ .map_lookup_elem = nfp_bpf_ctrl_lookup_entry,
+ .map_update_elem = nfp_bpf_ctrl_update_entry,
+ .map_delete_elem = nfp_bpf_map_delete_elem,
+};
+
+static int
+nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
+{
+ struct nfp_bpf_map *nfp_map;
+ long long int res;
+
+ if (!bpf->maps.types)
+ return -EOPNOTSUPP;
+
+ if (offmap->map.map_flags ||
+ offmap->map.numa_node != NUMA_NO_NODE) {
+ pr_info("map flags are not supported\n");
+ return -EINVAL;
+ }
+
+ if (!(bpf->maps.types & 1 << offmap->map.map_type)) {
+ pr_info("map type not supported\n");
+ return -EOPNOTSUPP;
+ }
+ if (bpf->maps.max_maps == bpf->maps_in_use) {
+ pr_info("too many maps for a device\n");
+ return -ENOMEM;
+ }
+ if (bpf->maps.max_elems - bpf->map_elems_in_use <
+ offmap->map.max_entries) {
+ pr_info("map with too many elements: %u, left: %u\n",
+ offmap->map.max_entries,
+ bpf->maps.max_elems - bpf->map_elems_in_use);
+ return -ENOMEM;
+ }
+ if (offmap->map.key_size > bpf->maps.max_key_sz ||
+ offmap->map.value_size > bpf->maps.max_val_sz ||
+ round_up(offmap->map.key_size, 8) +
+ round_up(offmap->map.value_size, 8) > bpf->maps.max_elem_sz) {
+ pr_info("elements don't fit in device constraints\n");
+ return -ENOMEM;
+ }
+
+ nfp_map = kzalloc(sizeof(*nfp_map), GFP_USER);
+ if (!nfp_map)
+ return -ENOMEM;
+
+ offmap->dev_priv = nfp_map;
+ nfp_map->offmap = offmap;
+ nfp_map->bpf = bpf;
+
+ res = nfp_bpf_ctrl_alloc_map(bpf, &offmap->map);
+ if (res < 0) {
+ kfree(nfp_map);
+ return res;
+ }
+
+ nfp_map->tid = res;
+ offmap->dev_ops = &nfp_bpf_map_ops;
+ bpf->maps_in_use++;
+ bpf->map_elems_in_use += offmap->map.max_entries;
+ list_add_tail(&nfp_map->l, &bpf->map_list);
+
+ return 0;
+}
+
+static int
+nfp_bpf_map_free(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
+{
+ struct nfp_bpf_map *nfp_map = offmap->dev_priv;
+
+ nfp_bpf_ctrl_free_map(bpf, nfp_map);
+ list_del_init(&nfp_map->l);
+ bpf->map_elems_in_use -= offmap->map.max_entries;
+ bpf->maps_in_use--;
+ kfree(nfp_map);
+
+ return 0;
+}
+
+int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf)
+{
+ switch (bpf->command) {
+ case BPF_OFFLOAD_VERIFIER_PREP:
+ return nfp_bpf_verifier_prep(app, nn, bpf);
+ case BPF_OFFLOAD_TRANSLATE:
+ return nfp_bpf_translate(nn, bpf->offload.prog);
+ case BPF_OFFLOAD_DESTROY:
+ return nfp_bpf_destroy(nn, bpf->offload.prog);
+ case BPF_OFFLOAD_MAP_ALLOC:
+ return nfp_bpf_map_alloc(app->priv, bpf->offmap);
+ case BPF_OFFLOAD_MAP_FREE:
+ return nfp_bpf_map_free(app->priv, bpf->offmap);
+ default:
+ return -EINVAL;
+ }
+}
+
static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog)
{
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
unsigned int max_mtu;
dma_addr_t dma_addr;
+ void *img;
int err;
max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
@@ -185,11 +295,17 @@ static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog)
return -EOPNOTSUPP;
}
- dma_addr = dma_map_single(nn->dp.dev, nfp_prog->prog,
+ img = nfp_bpf_relo_for_vnic(nfp_prog, nn->app_priv);
+ if (IS_ERR(img))
+ return PTR_ERR(img);
+
+ dma_addr = dma_map_single(nn->dp.dev, img,
nfp_prog->prog_len * sizeof(u64),
DMA_TO_DEVICE);
- if (dma_mapping_error(nn->dp.dev, dma_addr))
+ if (dma_mapping_error(nn->dp.dev, dma_addr)) {
+ kfree(img);
return -ENOMEM;
+ }
nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len);
nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr);
@@ -201,6 +317,7 @@ static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog)
dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64),
DMA_TO_DEVICE);
+ kfree(img);
return err;
}
@@ -234,7 +351,7 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
int err;
if (prog) {
- struct bpf_dev_offload *offload = prog->aux->offload;
+ struct bpf_prog_offload *offload = prog->aux->offload;
if (!offload)
return -EINVAL;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index d8870c2f11f3..479f602887e9 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -31,8 +31,6 @@
* SOFTWARE.
*/
-#define pr_fmt(fmt) "NFP net bpf: " fmt
-
#include <linux/bpf.h>
#include <linux/bpf_verifier.h>
#include <linux/kernel.h>
@@ -41,6 +39,9 @@
#include "fw.h"
#include "main.h"
+#define pr_vlog(env, fmt, ...) \
+ bpf_verifier_log_write(env, "[nfp] " fmt, ##__VA_ARGS__)
+
struct nfp_insn_meta *
nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
unsigned int insn_idx, unsigned int n_insns)
@@ -109,28 +110,69 @@ static int
nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env,
struct nfp_insn_meta *meta)
{
+ const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1;
const struct bpf_reg_state *reg2 = cur_regs(env) + BPF_REG_2;
struct nfp_app_bpf *bpf = nfp_prog->bpf;
u32 func_id = meta->insn.imm;
+ s64 off, old_off;
switch (func_id) {
case BPF_FUNC_xdp_adjust_head:
if (!bpf->adjust_head.off_max) {
- pr_warn("adjust_head not supported by FW\n");
+ pr_vlog(env, "adjust_head not supported by FW\n");
return -EOPNOTSUPP;
}
if (!(bpf->adjust_head.flags & NFP_BPF_ADJUST_HEAD_NO_META)) {
- pr_warn("adjust_head: FW requires shifting metadata, not supported by the driver\n");
+ pr_vlog(env, "adjust_head: FW requires shifting metadata, not supported by the driver\n");
return -EOPNOTSUPP;
}
nfp_record_adjust_head(bpf, nfp_prog, meta, reg2);
break;
+
+ case BPF_FUNC_map_lookup_elem:
+ if (!bpf->helpers.map_lookup) {
+ pr_vlog(env, "map_lookup: not supported by FW\n");
+ return -EOPNOTSUPP;
+ }
+ if (reg2->type != PTR_TO_STACK) {
+ pr_vlog(env,
+ "map_lookup: unsupported key ptr type %d\n",
+ reg2->type);
+ return -EOPNOTSUPP;
+ }
+ if (!tnum_is_const(reg2->var_off)) {
+ pr_vlog(env, "map_lookup: variable key pointer\n");
+ return -EOPNOTSUPP;
+ }
+
+ off = reg2->var_off.value + reg2->off;
+ if (-off % 4) {
+ pr_vlog(env,
+ "map_lookup: unaligned stack pointer %lld\n",
+ -off);
+ return -EOPNOTSUPP;
+ }
+
+ /* Rest of the checks is only if we re-parse the same insn */
+ if (!meta->func_id)
+ break;
+
+ old_off = meta->arg2.var_off.value + meta->arg2.off;
+ meta->arg2_var_off |= off != old_off;
+
+ if (meta->arg1.map_ptr != reg1->map_ptr) {
+ pr_vlog(env, "map_lookup: called for different map\n");
+ return -EOPNOTSUPP;
+ }
+ break;
default:
- pr_warn("unsupported function id: %d\n", func_id);
+ pr_vlog(env, "unsupported function id: %d\n", func_id);
return -EOPNOTSUPP;
}
+ meta->func_id = func_id;
+ meta->arg1 = *reg1;
meta->arg2 = *reg2;
return 0;
@@ -150,7 +192,7 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg0->var_off);
- pr_info("unsupported exit state: %d, var_off: %s\n",
+ pr_vlog(env, "unsupported exit state: %d, var_off: %s\n",
reg0->type, tn_buf);
return -EINVAL;
}
@@ -160,7 +202,7 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
imm <= TC_ACT_REDIRECT &&
imm != TC_ACT_SHOT && imm != TC_ACT_STOLEN &&
imm != TC_ACT_QUEUED) {
- pr_info("unsupported exit state: %d, imm: %llx\n",
+ pr_vlog(env, "unsupported exit state: %d, imm: %llx\n",
reg0->type, imm);
return -EINVAL;
}
@@ -171,12 +213,13 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
static int
nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog,
struct nfp_insn_meta *meta,
- const struct bpf_reg_state *reg)
+ const struct bpf_reg_state *reg,
+ struct bpf_verifier_env *env)
{
s32 old_off, new_off;
if (!tnum_is_const(reg->var_off)) {
- pr_info("variable ptr stack access\n");
+ pr_vlog(env, "variable ptr stack access\n");
return -EINVAL;
}
@@ -194,7 +237,7 @@ nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog,
if (old_off % 4 == new_off % 4)
return 0;
- pr_info("stack access changed location was:%d is:%d\n",
+ pr_vlog(env, "stack access changed location was:%d is:%d\n",
old_off, new_off);
return -EINVAL;
}
@@ -208,19 +251,27 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
if (reg->type != PTR_TO_CTX &&
reg->type != PTR_TO_STACK &&
+ reg->type != PTR_TO_MAP_VALUE &&
reg->type != PTR_TO_PACKET) {
- pr_info("unsupported ptr type: %d\n", reg->type);
+ pr_vlog(env, "unsupported ptr type: %d\n", reg->type);
return -EINVAL;
}
if (reg->type == PTR_TO_STACK) {
- err = nfp_bpf_check_stack_access(nfp_prog, meta, reg);
+ err = nfp_bpf_check_stack_access(nfp_prog, meta, reg, env);
if (err)
return err;
}
+ if (reg->type == PTR_TO_MAP_VALUE) {
+ if (is_mbpf_store(meta)) {
+ pr_vlog(env, "map writes not supported\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) {
- pr_info("ptr type changed for instruction %d -> %d\n",
+ pr_vlog(env, "ptr type changed for instruction %d -> %d\n",
meta->ptr.type, reg->type);
return -EINVAL;
}
@@ -239,9 +290,15 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx, env->prog->len);
nfp_prog->verifier_meta = meta;
+ if (!nfp_bpf_supported_opcode(meta->insn.code)) {
+ pr_vlog(env, "instruction %#02x not supported\n",
+ meta->insn.code);
+ return -EINVAL;
+ }
+
if (meta->insn.src_reg >= MAX_BPF_REG ||
meta->insn.dst_reg >= MAX_BPF_REG) {
- pr_err("program uses extended registers - jit hardening?\n");
+ pr_vlog(env, "program uses extended registers - jit hardening?\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index 67c406815365..742d6f1575b5 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -99,7 +99,7 @@ nfp_flower_repr_get(struct nfp_app *app, u32 port_id)
if (port >= reprs->num_reprs)
return NULL;
- return reprs->reprs[port];
+ return rcu_dereference(reprs->reprs[port]);
}
static int
@@ -114,15 +114,19 @@ nfp_flower_reprs_reify(struct nfp_app *app, enum nfp_repr_type type,
if (!reprs)
return 0;
- for (i = 0; i < reprs->num_reprs; i++)
- if (reprs->reprs[i]) {
- struct nfp_repr *repr = netdev_priv(reprs->reprs[i]);
+ for (i = 0; i < reprs->num_reprs; i++) {
+ struct net_device *netdev;
+
+ netdev = nfp_repr_get_locked(app, reprs, i);
+ if (netdev) {
+ struct nfp_repr *repr = netdev_priv(netdev);
err = nfp_flower_cmsg_portreify(repr, exists);
if (err)
return err;
count++;
}
+ }
return count;
}
@@ -234,19 +238,21 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
return -ENOMEM;
for (i = 0; i < cnt; i++) {
+ struct net_device *repr;
struct nfp_port *port;
u32 port_id;
- reprs->reprs[i] = nfp_repr_alloc(app);
- if (!reprs->reprs[i]) {
+ repr = nfp_repr_alloc(app);
+ if (!repr) {
err = -ENOMEM;
goto err_reprs_clean;
}
+ RCU_INIT_POINTER(reprs->reprs[i], repr);
/* For now we only support 1 PF */
WARN_ON(repr_type == NFP_REPR_TYPE_PF && i);
- port = nfp_port_alloc(app, port_type, reprs->reprs[i]);
+ port = nfp_port_alloc(app, port_type, repr);
if (repr_type == NFP_REPR_TYPE_PF) {
port->pf_id = i;
port->vnic = priv->nn->dp.ctrl_bar;
@@ -257,11 +263,11 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
app->pf->vf_cfg_mem + i * NFP_NET_CFG_BAR_SZ;
}
- eth_hw_addr_random(reprs->reprs[i]);
+ eth_hw_addr_random(repr);
port_id = nfp_flower_cmsg_pcie_port(nfp_pcie, vnic_type,
i, queue);
- err = nfp_repr_init(app, reprs->reprs[i],
+ err = nfp_repr_init(app, repr,
port_id, port, priv->nn->dp.netdev);
if (err) {
nfp_port_free(port);
@@ -270,7 +276,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
nfp_info(app->cpp, "%s%d Representor(%s) created\n",
repr_type == NFP_REPR_TYPE_PF ? "PF" : "VF", i,
- reprs->reprs[i]->name);
+ repr->name);
}
nfp_app_reprs_set(app, repr_type, reprs);
@@ -291,7 +297,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
err_reprs_remove:
reprs = nfp_app_reprs_set(app, repr_type, NULL);
err_reprs_clean:
- nfp_reprs_clean_and_free(reprs);
+ nfp_reprs_clean_and_free(app, reprs);
return err;
}
@@ -329,17 +335,18 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
for (i = 0; i < eth_tbl->count; i++) {
unsigned int phys_port = eth_tbl->ports[i].index;
+ struct net_device *repr;
struct nfp_port *port;
u32 cmsg_port_id;
- reprs->reprs[phys_port] = nfp_repr_alloc(app);
- if (!reprs->reprs[phys_port]) {
+ repr = nfp_repr_alloc(app);
+ if (!repr) {
err = -ENOMEM;
goto err_reprs_clean;
}
+ RCU_INIT_POINTER(reprs->reprs[phys_port], repr);
- port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT,
- reprs->reprs[phys_port]);
+ port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr);
if (IS_ERR(port)) {
err = PTR_ERR(port);
goto err_reprs_clean;
@@ -350,11 +357,11 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
goto err_reprs_clean;
}
- SET_NETDEV_DEV(reprs->reprs[phys_port], &priv->nn->pdev->dev);
+ SET_NETDEV_DEV(repr, &priv->nn->pdev->dev);
nfp_net_get_mac_addr(app->pf, port);
cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port);
- err = nfp_repr_init(app, reprs->reprs[phys_port],
+ err = nfp_repr_init(app, repr,
cmsg_port_id, port, priv->nn->dp.netdev);
if (err) {
nfp_port_free(port);
@@ -367,7 +374,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
phys_port);
nfp_info(app->cpp, "Phys Port %d Representor(%s) created\n",
- phys_port, reprs->reprs[phys_port]->name);
+ phys_port, repr->name);
}
nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs);
@@ -397,7 +404,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
err_reprs_remove:
reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, NULL);
err_reprs_clean:
- nfp_reprs_clean_and_free(reprs);
+ nfp_reprs_clean_and_free(app, reprs);
err_free_ctrl_skb:
kfree_skb(ctrl_skb);
return err;
@@ -558,6 +565,8 @@ static void nfp_flower_stop(struct nfp_app *app)
const struct nfp_app_type app_flower = {
.id = NFP_APP_FLOWER_NIC,
.name = "flower",
+
+ .ctrl_cap_mask = ~0U,
.ctrl_has_meta = true,
.extra_cap = nfp_flower_extra_cap,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c
index 955a9f44d244..6aedef0ad433 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c
@@ -32,6 +32,8 @@
*/
#include <linux/bug.h>
+#include <linux/lockdep.h>
+#include <linux/rcupdate.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
@@ -99,13 +101,19 @@ nfp_app_ctrl_msg_alloc(struct nfp_app *app, unsigned int size, gfp_t priority)
}
struct nfp_reprs *
+nfp_reprs_get_locked(struct nfp_app *app, enum nfp_repr_type type)
+{
+ return rcu_dereference_protected(app->reprs[type],
+ lockdep_is_held(&app->pf->lock));
+}
+
+struct nfp_reprs *
nfp_app_reprs_set(struct nfp_app *app, enum nfp_repr_type type,
struct nfp_reprs *reprs)
{
struct nfp_reprs *old;
- old = rcu_dereference_protected(app->reprs[type],
- lockdep_is_held(&app->pf->lock));
+ old = nfp_reprs_get_locked(app, type);
rcu_assign_pointer(app->reprs[type], reprs);
return old;
@@ -116,7 +124,7 @@ struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id)
struct nfp_app *app;
if (id >= ARRAY_SIZE(apps) || !apps[id]) {
- nfp_err(pf->cpp, "failed to find app with ID 0x%02hhx\n", id);
+ nfp_err(pf->cpp, "unknown FW app ID 0x%02hhx, driver too old or support for FW not built in\n", id);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index 3af1943a8521..7e474df90598 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -66,6 +66,9 @@ extern const struct nfp_app_type app_flower;
* struct nfp_app_type - application definition
* @id: application ID
* @name: application name
+ * @ctrl_cap_mask: ctrl vNIC capability mask, allows disabling features like
+ * IRQMOD which are on by default but counter-productive for
+ * control messages which are often latency-sensitive
* @ctrl_has_meta: control messages have prepend of type:5/port:CTRL
*
* Callbacks
@@ -82,15 +85,15 @@ extern const struct nfp_app_type app_flower;
* @repr_clean: representor about to be unregistered
* @repr_open: representor netdev open callback
* @repr_stop: representor netdev stop callback
+ * @change_mtu: MTU change on a netdev has been requested (veto-only, change
+ * is not guaranteed to be committed)
* @start: start application logic
* @stop: stop application logic
* @ctrl_msg_rx: control message handler
* @setup_tc: setup TC ndo
* @tc_busy: TC HW offload busy (rules loaded)
+ * @bpf: BPF ndo offload-related calls
* @xdp_offload: offload an XDP program
- * @bpf_verifier_prep: verifier prep for dev-specific BPF programs
- * @bpf_translate: translate call for dev-specific BPF programs
- * @bpf_destroy: destroy for dev-specific BPF programs
* @eswitch_mode_get: get SR-IOV eswitch mode
* @sriov_enable: app-specific sriov initialisation
* @sriov_disable: app-specific sriov clean-up
@@ -100,6 +103,7 @@ struct nfp_app_type {
enum nfp_app_id id;
const char *name;
+ u32 ctrl_cap_mask;
bool ctrl_has_meta;
int (*init)(struct nfp_app *app);
@@ -120,6 +124,9 @@ struct nfp_app_type {
int (*repr_open)(struct nfp_app *app, struct nfp_repr *repr);
int (*repr_stop)(struct nfp_app *app, struct nfp_repr *repr);
+ int (*change_mtu)(struct nfp_app *app, struct net_device *netdev,
+ int new_mtu);
+
int (*start)(struct nfp_app *app);
void (*stop)(struct nfp_app *app);
@@ -128,14 +135,10 @@ struct nfp_app_type {
int (*setup_tc)(struct nfp_app *app, struct net_device *netdev,
enum tc_setup_type type, void *type_data);
bool (*tc_busy)(struct nfp_app *app, struct nfp_net *nn);
+ int (*bpf)(struct nfp_app *app, struct nfp_net *nn,
+ struct netdev_bpf *xdp);
int (*xdp_offload)(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog);
- int (*bpf_verifier_prep)(struct nfp_app *app, struct nfp_net *nn,
- struct netdev_bpf *bpf);
- int (*bpf_translate)(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog);
- int (*bpf_destroy)(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog);
int (*sriov_enable)(struct nfp_app *app, int num_vfs);
void (*sriov_disable)(struct nfp_app *app);
@@ -166,6 +169,7 @@ struct nfp_app {
void *priv;
};
+bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
static inline int nfp_app_init(struct nfp_app *app)
@@ -242,6 +246,14 @@ nfp_app_repr_clean(struct nfp_app *app, struct net_device *netdev)
app->type->repr_clean(app, netdev);
}
+static inline int
+nfp_app_change_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu)
+{
+ if (!app || !app->type->change_mtu)
+ return 0;
+ return app->type->change_mtu(app, netdev, new_mtu);
+}
+
static inline int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl)
{
app->ctrl = ctrl;
@@ -303,6 +315,14 @@ static inline int nfp_app_setup_tc(struct nfp_app *app,
return app->type->setup_tc(app, netdev, type, type_data);
}
+static inline int nfp_app_bpf(struct nfp_app *app, struct nfp_net *nn,
+ struct netdev_bpf *bpf)
+{
+ if (!app || !app->type->bpf)
+ return -EINVAL;
+ return app->type->bpf(app, nn, bpf);
+}
+
static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog)
{
@@ -311,31 +331,12 @@ static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
return app->type->xdp_offload(app, nn, prog);
}
-static inline int
-nfp_app_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
- struct netdev_bpf *bpf)
+static inline bool __nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb)
{
- if (!app || !app->type->bpf_verifier_prep)
- return -EOPNOTSUPP;
- return app->type->bpf_verifier_prep(app, nn, bpf);
-}
-
-static inline int
-nfp_app_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog)
-{
- if (!app || !app->type->bpf_translate)
- return -EOPNOTSUPP;
- return app->type->bpf_translate(app, nn, prog);
-}
+ trace_devlink_hwmsg(priv_to_devlink(app->pf), false, 0,
+ skb->data, skb->len);
-static inline int
-nfp_app_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog)
-{
- if (!app || !app->type->bpf_destroy)
- return -EOPNOTSUPP;
- return app->type->bpf_destroy(app, nn, prog);
+ return __nfp_ctrl_tx(app->ctrl, skb);
}
static inline bool nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb)
@@ -388,6 +389,8 @@ static inline struct net_device *nfp_app_repr_get(struct nfp_app *app, u32 id)
struct nfp_app *nfp_app_from_netdev(struct net_device *netdev);
struct nfp_reprs *
+nfp_reprs_get_locked(struct nfp_app *app, enum nfp_repr_type type);
+struct nfp_reprs *
nfp_app_reprs_set(struct nfp_app *app, enum nfp_repr_type type,
struct nfp_reprs *reprs);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
index d3610987fb07..3f6952b66a49 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
@@ -50,6 +50,94 @@ const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = {
[CMD_TGT_READ_SWAP_LE] = { 0x03, 0x40 },
};
+static bool unreg_is_imm(u16 reg)
+{
+ return (reg & UR_REG_IMM) == UR_REG_IMM;
+}
+
+u16 br_get_offset(u64 instr)
+{
+ u16 addr_lo, addr_hi;
+
+ addr_lo = FIELD_GET(OP_BR_ADDR_LO, instr);
+ addr_hi = FIELD_GET(OP_BR_ADDR_HI, instr);
+
+ return (addr_hi * ((OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO)) + 1)) |
+ addr_lo;
+}
+
+void br_set_offset(u64 *instr, u16 offset)
+{
+ u16 addr_lo, addr_hi;
+
+ addr_lo = offset & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO));
+ addr_hi = offset != addr_lo;
+ *instr &= ~(OP_BR_ADDR_HI | OP_BR_ADDR_LO);
+ *instr |= FIELD_PREP(OP_BR_ADDR_HI, addr_hi);
+ *instr |= FIELD_PREP(OP_BR_ADDR_LO, addr_lo);
+}
+
+void br_add_offset(u64 *instr, u16 offset)
+{
+ u16 addr;
+
+ addr = br_get_offset(*instr);
+ br_set_offset(instr, addr + offset);
+}
+
+static bool immed_can_modify(u64 instr)
+{
+ if (FIELD_GET(OP_IMMED_INV, instr) ||
+ FIELD_GET(OP_IMMED_SHIFT, instr) ||
+ FIELD_GET(OP_IMMED_WIDTH, instr) != IMMED_WIDTH_ALL) {
+ pr_err("Can't decode/encode immed!\n");
+ return false;
+ }
+ return true;
+}
+
+u16 immed_get_value(u64 instr)
+{
+ u16 reg;
+
+ if (!immed_can_modify(instr))
+ return 0;
+
+ reg = FIELD_GET(OP_IMMED_A_SRC, instr);
+ if (!unreg_is_imm(reg))
+ reg = FIELD_GET(OP_IMMED_B_SRC, instr);
+
+ return (reg & 0xff) | FIELD_GET(OP_IMMED_IMM, instr);
+}
+
+void immed_set_value(u64 *instr, u16 immed)
+{
+ if (!immed_can_modify(*instr))
+ return;
+
+ if (unreg_is_imm(FIELD_GET(OP_IMMED_A_SRC, *instr))) {
+ *instr &= ~FIELD_PREP(OP_IMMED_A_SRC, 0xff);
+ *instr |= FIELD_PREP(OP_IMMED_A_SRC, immed & 0xff);
+ } else {
+ *instr &= ~FIELD_PREP(OP_IMMED_B_SRC, 0xff);
+ *instr |= FIELD_PREP(OP_IMMED_B_SRC, immed & 0xff);
+ }
+
+ *instr &= ~OP_IMMED_IMM;
+ *instr |= FIELD_PREP(OP_IMMED_IMM, immed >> 8);
+}
+
+void immed_add_value(u64 *instr, u16 offset)
+{
+ u16 val;
+
+ if (!immed_can_modify(*instr))
+ return;
+
+ val = immed_get_value(*instr);
+ immed_set_value(instr, val + offset);
+}
+
static u16 nfp_swreg_to_unreg(swreg reg, bool is_dst)
{
bool lm_id, lm_dec = false;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
index a24daeab1a77..5f9291db98e0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
@@ -81,6 +81,7 @@ enum br_mask {
BR_BHS = 0x04,
BR_BLO = 0x05,
BR_BGE = 0x08,
+ BR_BLT = 0x09,
BR_UNC = 0x18,
};
@@ -93,6 +94,10 @@ enum br_ctx_signal_state {
BR_CSS_NONE = 2,
};
+u16 br_get_offset(u64 instr);
+void br_set_offset(u64 *instr, u16 offset);
+void br_add_offset(u64 *instr, u16 offset);
+
#define OP_BBYTE_BASE 0x0c800000000ULL
#define OP_BB_A_SRC 0x000000000ffULL
#define OP_BB_BYTE 0x00000000300ULL
@@ -133,6 +138,10 @@ enum immed_shift {
IMMED_SHIFT_2B = 2,
};
+u16 immed_get_value(u64 instr);
+void immed_set_value(u64 *instr, u16 immed);
+void immed_add_value(u64 *instr, u16 offset);
+
#define OP_SHF_BASE 0x08000000000ULL
#define OP_SHF_A_SRC 0x000000000ffULL
#define OP_SHF_SC 0x00000000300ULL
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
index 6c9f29c2e975..eb0fc614673d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -152,18 +152,8 @@ out:
static int nfp_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{
struct nfp_pf *pf = devlink_priv(devlink);
- int ret;
-
- mutex_lock(&pf->lock);
- if (!pf->app) {
- ret = -EBUSY;
- goto out;
- }
- ret = nfp_app_eswitch_mode_get(pf->app, mode);
-out:
- mutex_unlock(&pf->lock);
- return ret;
+ return nfp_app_eswitch_mode_get(pf->app, mode);
}
const struct devlink_ops nfp_devlink_ops = {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 0953fa8f3109..c5b91040b12e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -499,13 +499,9 @@ static int nfp_pci_probe(struct pci_dev *pdev,
if (err)
goto err_hwinfo_free;
- err = devlink_register(devlink, &pdev->dev);
- if (err)
- goto err_hwinfo_free;
-
err = nfp_nsp_init(pdev, pf);
if (err)
- goto err_devlink_unreg;
+ goto err_hwinfo_free;
pf->mip = nfp_mip_open(pf->cpp);
pf->rtbl = __nfp_rtsym_table_read(pf->cpp, pf->mip);
@@ -549,8 +545,6 @@ err_fw_unload:
kfree(pf->eth_tbl);
kfree(pf->nspi);
vfree(pf->dumpspec);
-err_devlink_unreg:
- devlink_unregister(devlink);
err_hwinfo_free:
kfree(pf->hwinfo);
nfp_cpp_free(pf->cpp);
@@ -571,18 +565,13 @@ err_pci_disable:
static void nfp_pci_remove(struct pci_dev *pdev)
{
struct nfp_pf *pf = pci_get_drvdata(pdev);
- struct devlink *devlink;
nfp_hwmon_unregister(pf);
- devlink = priv_to_devlink(pf);
-
- nfp_net_pci_remove(pf);
-
nfp_pcie_sriov_disable(pdev);
pci_sriov_set_totalvfs(pf->pdev, 0);
- devlink_unregister(devlink);
+ nfp_net_pci_remove(pf);
vfree(pf->dumpspec);
kfree(pf->rtbl);
@@ -598,7 +587,7 @@ static void nfp_pci_remove(struct pci_dev *pdev)
kfree(pf->eth_tbl);
kfree(pf->nspi);
mutex_destroy(&pf->lock);
- devlink_free(devlink);
+ devlink_free(priv_to_devlink(pf));
pci_release_regions(pdev);
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 0e564cfabe7e..d88eda9707e6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -578,6 +578,7 @@ struct nfp_net_dp {
* @qcp_cfg: Pointer to QCP queue used for configuration notification
* @tx_bar: Pointer to mapped TX queues
* @rx_bar: Pointer to mapped FL/RX queues
+ * @tlv_caps: Parsed TLV capabilities
* @debugfs_dir: Device directory in debugfs
* @vnic_list: Entry on device vNIC list
* @pdev: Backpointer to PCI device
@@ -644,6 +645,8 @@ struct nfp_net {
u8 __iomem *tx_bar;
u8 __iomem *rx_bar;
+ struct nfp_net_tlv_caps tlv_caps;
+
struct dentry *debugfs_dir;
struct list_head vnic_list;
@@ -839,6 +842,18 @@ static inline const char *nfp_net_name(struct nfp_net *nn)
return nn->dp.netdev ? nn->dp.netdev->name : "ctrl";
}
+static inline void nfp_ctrl_lock(struct nfp_net *nn)
+ __acquires(&nn->r_vecs[0].lock)
+{
+ spin_lock_bh(&nn->r_vecs[0].lock);
+}
+
+static inline void nfp_ctrl_unlock(struct nfp_net *nn)
+ __releases(&nn->r_vecs[0].lock)
+{
+ spin_unlock_bh(&nn->r_vecs[0].lock);
+}
+
/* Globals */
extern const char nfp_driver_version[];
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 05e071b3dc5b..cdf52421eaca 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -293,9 +293,15 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update)
*/
static int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd)
{
+ u32 mbox = nn->tlv_caps.mbox_off;
int ret;
- nn_writeq(nn, NFP_NET_CFG_MBOX_CMD, mbox_cmd);
+ if (!nfp_net_has_mbox(&nn->tlv_caps)) {
+ nn_err(nn, "no mailbox present, command: %u\n", mbox_cmd);
+ return -EIO;
+ }
+
+ nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
ret = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX);
if (ret) {
@@ -303,7 +309,7 @@ static int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd)
return ret;
}
- return -nn_readl(nn, NFP_NET_CFG_MBOX_RET);
+ return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
}
/* Interrupt configuration and handling
@@ -568,6 +574,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
return err;
}
nn_writeb(nn, ctrl_offset, entry->entry);
+ nfp_net_irq_unmask(nn, entry->entry);
return 0;
}
@@ -582,6 +589,7 @@ static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset,
unsigned int vector_idx)
{
nn_writeb(nn, ctrl_offset, 0xff);
+ nn_pci_flush(nn);
free_irq(nn->irq_entries[vector_idx].vector, nn);
}
@@ -1918,6 +1926,13 @@ err_free:
return false;
}
+bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
+{
+ struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
+
+ return nfp_ctrl_tx_one(nn, r_vec, skb, false);
+}
+
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
{
struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
@@ -2253,7 +2268,8 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
- xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+ if (dp->netdev)
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
kfree(rx_ring->rxbufs);
if (rx_ring->rxds)
@@ -2279,9 +2295,12 @@ nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
{
int sz, err;
- err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev, rx_ring->idx);
- if (err < 0)
- return err;
+ if (dp->netdev) {
+ err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev,
+ rx_ring->idx);
+ if (err < 0)
+ return err;
+ }
rx_ring->cnt = dp->rxd_cnt;
rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt;
@@ -2445,7 +2464,7 @@ void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
* ME timestamp ticks. There are 16 ME clock cycles for each timestamp
* count.
*/
- factor = nn->me_freq_mhz / 16;
+ factor = nn->tlv_caps.me_freq_mhz / 16;
/* copy RX interrupt coalesce parameters */
value = (nn->rx_coalesce_max_frames << 16) |
@@ -3045,6 +3064,11 @@ static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
{
struct nfp_net *nn = netdev_priv(netdev);
struct nfp_net_dp *dp;
+ int err;
+
+ err = nfp_app_change_mtu(nn->app, netdev, new_mtu);
+ if (err)
+ return err;
dp = nfp_net_clone_dp(nn);
if (!dp)
@@ -3066,8 +3090,9 @@ nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
if (!vid)
return 0;
- nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_VID, vid);
- nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_PROTO, ETH_P_8021Q);
+ nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
+ nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
+ ETH_P_8021Q);
return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD);
}
@@ -3083,8 +3108,9 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
if (!vid)
return 0;
- nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_VID, vid);
- nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_PROTO, ETH_P_8021Q);
+ nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
+ nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
+ ETH_P_8021Q);
return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL);
}
@@ -3405,16 +3431,8 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
xdp->prog_id = nn->xdp_prog ? nn->xdp_prog->aux->id : 0;
xdp->prog_flags = nn->xdp_prog ? nn->xdp_flags : 0;
return 0;
- case BPF_OFFLOAD_VERIFIER_PREP:
- return nfp_app_bpf_verifier_prep(nn->app, nn, xdp);
- case BPF_OFFLOAD_TRANSLATE:
- return nfp_app_bpf_translate(nn->app, nn,
- xdp->offload.prog);
- case BPF_OFFLOAD_DESTROY:
- return nfp_app_bpf_destroy(nn->app, nn,
- xdp->offload.prog);
default:
- return -EINVAL;
+ return nfp_app_bpf(nn->app, nn, xdp);
}
}
@@ -3738,18 +3756,8 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
nfp_net_set_ethtool_ops(netdev);
}
-/**
- * nfp_net_init() - Initialise/finalise the nfp_net structure
- * @nn: NFP Net device structure
- *
- * Return: 0 on success or negative errno on error.
- */
-int nfp_net_init(struct nfp_net *nn)
+static int nfp_net_read_caps(struct nfp_net *nn)
{
- int err;
-
- nn->dp.rx_dma_dir = DMA_FROM_DEVICE;
-
/* Get some of the read-only fields from the BAR */
nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
@@ -3782,6 +3790,29 @@ int nfp_net_init(struct nfp_net *nn)
nn->dp.rx_offset = NFP_NET_RX_OFFSET;
}
+ /* For control vNICs mask out the capabilities app doesn't want. */
+ if (!nn->dp.netdev)
+ nn->cap &= nn->app->type->ctrl_cap_mask;
+
+ return 0;
+}
+
+/**
+ * nfp_net_init() - Initialise/finalise the nfp_net structure
+ * @nn: NFP Net device structure
+ *
+ * Return: 0 on success or negative errno on error.
+ */
+int nfp_net_init(struct nfp_net *nn)
+{
+ int err;
+
+ nn->dp.rx_dma_dir = DMA_FROM_DEVICE;
+
+ err = nfp_net_read_caps(nn);
+ if (err)
+ return err;
+
/* Set default MTU and Freelist buffer size */
if (nn->max_mtu < NFP_NET_DEFAULT_MTU)
nn->dp.mtu = nn->max_mtu;
@@ -3805,6 +3836,11 @@ int nfp_net_init(struct nfp_net *nn)
nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
}
+ err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,
+ &nn->tlv_caps);
+ if (err)
+ return err;
+
if (nn->dp.netdev)
nfp_net_netdev_init(nn);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
new file mode 100644
index 000000000000..ffb402746ad4
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2018 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "nfp_net_ctrl.h"
+#include "nfp_net.h"
+
+static void nfp_net_tlv_caps_reset(struct nfp_net_tlv_caps *caps)
+{
+ memset(caps, 0, sizeof(*caps));
+ caps->me_freq_mhz = 1200;
+ caps->mbox_off = NFP_NET_CFG_MBOX_BASE;
+ caps->mbox_len = NFP_NET_CFG_MBOX_VAL_MAX_SZ;
+}
+
+int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
+ struct nfp_net_tlv_caps *caps)
+{
+ u8 __iomem *data = ctrl_mem + NFP_NET_CFG_TLV_BASE;
+ u8 __iomem *end = ctrl_mem + NFP_NET_CFG_BAR_SZ;
+ u32 hdr;
+
+ nfp_net_tlv_caps_reset(caps);
+
+ hdr = readl(data);
+ if (!hdr)
+ return 0;
+
+ while (true) {
+ unsigned int length, offset;
+ u32 hdr = readl(data);
+
+ length = FIELD_GET(NFP_NET_CFG_TLV_HEADER_LENGTH, hdr);
+ offset = data - ctrl_mem + NFP_NET_CFG_TLV_BASE;
+
+ /* Advance past the header */
+ data += 4;
+
+ if (length % NFP_NET_CFG_TLV_LENGTH_INC) {
+ dev_err(dev, "TLV size not multiple of %u len:%u\n",
+ NFP_NET_CFG_TLV_LENGTH_INC, length);
+ return -EINVAL;
+ }
+ if (data + length > end) {
+ dev_err(dev, "oversized TLV offset:%u len:%u\n",
+ offset, length);
+ return -EINVAL;
+ }
+
+ switch (FIELD_GET(NFP_NET_CFG_TLV_HEADER_TYPE, hdr)) {
+ case NFP_NET_CFG_TLV_TYPE_UNKNOWN:
+ dev_err(dev, "NULL TLV at offset:%u\n", offset);
+ return -EINVAL;
+ case NFP_NET_CFG_TLV_TYPE_RESERVED:
+ break;
+ case NFP_NET_CFG_TLV_TYPE_END:
+ if (!length)
+ return 0;
+
+ dev_err(dev, "END TLV should be empty, has len:%d\n",
+ length);
+ return -EINVAL;
+ case NFP_NET_CFG_TLV_TYPE_ME_FREQ:
+ if (length != 4) {
+ dev_err(dev,
+ "ME FREQ TLV should be 4B, is %dB\n",
+ length);
+ return -EINVAL;
+ }
+
+ caps->me_freq_mhz = readl(data);
+ break;
+ case NFP_NET_CFG_TLV_TYPE_MBOX:
+ if (!length) {
+ caps->mbox_off = 0;
+ caps->mbox_len = 0;
+ } else {
+ caps->mbox_off = data - ctrl_mem;
+ caps->mbox_len = length;
+ }
+ break;
+ default:
+ if (!FIELD_GET(NFP_NET_CFG_TLV_HEADER_REQUIRED, hdr))
+ break;
+
+ dev_err(dev, "unknown TLV type:%u offset:%u len:%u\n",
+ FIELD_GET(NFP_NET_CFG_TLV_HEADER_TYPE, hdr),
+ offset, length);
+ return -EINVAL;
+ }
+
+ data += length;
+ if (data + 4 > end) {
+ dev_err(dev, "reached end of BAR without END TLV\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Not reached */
+ return -EINVAL;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 782d452e0fc2..eeecef2caac6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -43,9 +43,7 @@
#ifndef _NFP_NET_CTRL_H_
#define _NFP_NET_CTRL_H_
-/* IMPORTANT: This header file is shared with the FW,
- * no OS specific constructs, please!
- */
+#include <linux/types.h>
/**
* Configuration BAR size.
@@ -91,23 +89,24 @@
#define NFP_NET_RSS_IPV6_EX_UDP 9
/**
- * @NFP_NET_TXR_MAX: Maximum number of TX rings
- * @NFP_NET_RXR_MAX: Maximum number of RX rings
+ * Ring counts
+ * %NFP_NET_TXR_MAX: Maximum number of TX rings
+ * %NFP_NET_RXR_MAX: Maximum number of RX rings
*/
#define NFP_NET_TXR_MAX 64
#define NFP_NET_RXR_MAX 64
/**
* Read/Write config words (0x0000 - 0x002c)
- * @NFP_NET_CFG_CTRL: Global control
- * @NFP_NET_CFG_UPDATE: Indicate which fields are updated
- * @NFP_NET_CFG_TXRS_ENABLE: Bitmask of enabled TX rings
- * @NFP_NET_CFG_RXRS_ENABLE: Bitmask of enabled RX rings
- * @NFP_NET_CFG_MTU: Set MTU size
- * @NFP_NET_CFG_FLBUFSZ: Set freelist buffer size (must be larger than MTU)
- * @NFP_NET_CFG_EXN: MSI-X table entry for exceptions
- * @NFP_NET_CFG_LSC: MSI-X table entry for link state changes
- * @NFP_NET_CFG_MACADDR: MAC address
+ * %NFP_NET_CFG_CTRL: Global control
+ * %NFP_NET_CFG_UPDATE: Indicate which fields are updated
+ * %NFP_NET_CFG_TXRS_ENABLE: Bitmask of enabled TX rings
+ * %NFP_NET_CFG_RXRS_ENABLE: Bitmask of enabled RX rings
+ * %NFP_NET_CFG_MTU: Set MTU size
+ * %NFP_NET_CFG_FLBUFSZ: Set freelist buffer size (must be larger than MTU)
+ * %NFP_NET_CFG_EXN: MSI-X table entry for exceptions
+ * %NFP_NET_CFG_LSC: MSI-X table entry for link state changes
+ * %NFP_NET_CFG_MACADDR: MAC address
*
* TODO:
* - define Error details in UPDATE
@@ -176,14 +175,14 @@
/**
* Read-only words (0x0030 - 0x0050):
- * @NFP_NET_CFG_VERSION: Firmware version number
- * @NFP_NET_CFG_STS: Status
- * @NFP_NET_CFG_CAP: Capabilities (same bits as @NFP_NET_CFG_CTRL)
- * @NFP_NET_CFG_MAX_TXRINGS: Maximum number of TX rings
- * @NFP_NET_CFG_MAX_RXRINGS: Maximum number of RX rings
- * @NFP_NET_CFG_MAX_MTU: Maximum support MTU
- * @NFP_NET_CFG_START_TXQ: Start Queue Control Queue to use for TX (PF only)
- * @NFP_NET_CFG_START_RXQ: Start Queue Control Queue to use for RX (PF only)
+ * %NFP_NET_CFG_VERSION: Firmware version number
+ * %NFP_NET_CFG_STS: Status
+ * %NFP_NET_CFG_CAP: Capabilities (same bits as %NFP_NET_CFG_CTRL)
+ * %NFP_NET_CFG_MAX_TXRINGS: Maximum number of TX rings
+ * %NFP_NET_CFG_MAX_RXRINGS: Maximum number of RX rings
+ * %NFP_NET_CFG_MAX_MTU: Maximum support MTU
+ * %NFP_NET_CFG_START_TXQ: Start Queue Control Queue to use for TX (PF only)
+ * %NFP_NET_CFG_START_RXQ: Start Queue Control Queue to use for RX (PF only)
*
* TODO:
* - define more STS bits
@@ -228,31 +227,37 @@
/**
* RSS capabilities
- * @NFP_NET_CFG_RSS_CAP_HFUNC: supported hash functions (same bits as
- * @NFP_NET_CFG_RSS_HFUNC)
+ * %NFP_NET_CFG_RSS_CAP_HFUNC: supported hash functions (same bits as
+ * %NFP_NET_CFG_RSS_HFUNC)
*/
#define NFP_NET_CFG_RSS_CAP 0x0054
#define NFP_NET_CFG_RSS_CAP_HFUNC 0xff000000
/**
+ * TLV area start
+ * %NFP_NET_CFG_TLV_BASE: start anchor of the TLV area
+ */
+#define NFP_NET_CFG_TLV_BASE 0x0058
+
+/**
* VXLAN/UDP encap configuration
- * @NFP_NET_CFG_VXLAN_PORT: Base address of table of tunnels' UDP dst ports
- * @NFP_NET_CFG_VXLAN_SZ: Size of the UDP port table in bytes
+ * %NFP_NET_CFG_VXLAN_PORT: Base address of table of tunnels' UDP dst ports
+ * %NFP_NET_CFG_VXLAN_SZ: Size of the UDP port table in bytes
*/
#define NFP_NET_CFG_VXLAN_PORT 0x0060
#define NFP_NET_CFG_VXLAN_SZ 0x0008
/**
* BPF section
- * @NFP_NET_CFG_BPF_ABI: BPF ABI version
- * @NFP_NET_CFG_BPF_CAP: BPF capabilities
- * @NFP_NET_CFG_BPF_MAX_LEN: Maximum size of JITed BPF code in bytes
- * @NFP_NET_CFG_BPF_START: Offset at which BPF will be loaded
- * @NFP_NET_CFG_BPF_DONE: Offset to jump to on exit
- * @NFP_NET_CFG_BPF_STACK_SZ: Total size of stack area in 64B chunks
- * @NFP_NET_CFG_BPF_INL_MTU: Packet data split offset in 64B chunks
- * @NFP_NET_CFG_BPF_SIZE: Size of the JITed BPF code in instructions
- * @NFP_NET_CFG_BPF_ADDR: DMA address of the buffer with JITed BPF code
+ * %NFP_NET_CFG_BPF_ABI: BPF ABI version
+ * %NFP_NET_CFG_BPF_CAP: BPF capabilities
+ * %NFP_NET_CFG_BPF_MAX_LEN: Maximum size of JITed BPF code in bytes
+ * %NFP_NET_CFG_BPF_START: Offset at which BPF will be loaded
+ * %NFP_NET_CFG_BPF_DONE: Offset to jump to on exit
+ * %NFP_NET_CFG_BPF_STACK_SZ: Total size of stack area in 64B chunks
+ * %NFP_NET_CFG_BPF_INL_MTU: Packet data split offset in 64B chunks
+ * %NFP_NET_CFG_BPF_SIZE: Size of the JITed BPF code in instructions
+ * %NFP_NET_CFG_BPF_ADDR: DMA address of the buffer with JITed BPF code
*/
#define NFP_NET_CFG_BPF_ABI 0x0080
#define NFP_NET_BPF_ABI 2
@@ -278,9 +283,9 @@
/**
* RSS configuration (0x0100 - 0x01ac):
* Used only when NFP_NET_CFG_CTRL_RSS is enabled
- * @NFP_NET_CFG_RSS_CFG: RSS configuration word
- * @NFP_NET_CFG_RSS_KEY: RSS "secret" key
- * @NFP_NET_CFG_RSS_ITBL: RSS indirection table
+ * %NFP_NET_CFG_RSS_CFG: RSS configuration word
+ * %NFP_NET_CFG_RSS_KEY: RSS "secret" key
+ * %NFP_NET_CFG_RSS_ITBL: RSS indirection table
*/
#define NFP_NET_CFG_RSS_BASE 0x0100
#define NFP_NET_CFG_RSS_CTRL NFP_NET_CFG_RSS_BASE
@@ -305,13 +310,13 @@
/**
* TX ring configuration (0x200 - 0x800)
- * @NFP_NET_CFG_TXR_BASE: Base offset for TX ring configuration
- * @NFP_NET_CFG_TXR_ADDR: Per TX ring DMA address (8B entries)
- * @NFP_NET_CFG_TXR_WB_ADDR: Per TX ring write back DMA address (8B entries)
- * @NFP_NET_CFG_TXR_SZ: Per TX ring ring size (1B entries)
- * @NFP_NET_CFG_TXR_VEC: Per TX ring MSI-X table entry (1B entries)
- * @NFP_NET_CFG_TXR_PRIO: Per TX ring priority (1B entries)
- * @NFP_NET_CFG_TXR_IRQ_MOD: Per TX ring interrupt moderation packet
+ * %NFP_NET_CFG_TXR_BASE: Base offset for TX ring configuration
+ * %NFP_NET_CFG_TXR_ADDR: Per TX ring DMA address (8B entries)
+ * %NFP_NET_CFG_TXR_WB_ADDR: Per TX ring write back DMA address (8B entries)
+ * %NFP_NET_CFG_TXR_SZ: Per TX ring ring size (1B entries)
+ * %NFP_NET_CFG_TXR_VEC: Per TX ring MSI-X table entry (1B entries)
+ * %NFP_NET_CFG_TXR_PRIO: Per TX ring priority (1B entries)
+ * %NFP_NET_CFG_TXR_IRQ_MOD: Per TX ring interrupt moderation packet
*/
#define NFP_NET_CFG_TXR_BASE 0x0200
#define NFP_NET_CFG_TXR_ADDR(_x) (NFP_NET_CFG_TXR_BASE + ((_x) * 0x8))
@@ -325,12 +330,12 @@
/**
* RX ring configuration (0x0800 - 0x0c00)
- * @NFP_NET_CFG_RXR_BASE: Base offset for RX ring configuration
- * @NFP_NET_CFG_RXR_ADDR: Per RX ring DMA address (8B entries)
- * @NFP_NET_CFG_RXR_SZ: Per RX ring ring size (1B entries)
- * @NFP_NET_CFG_RXR_VEC: Per RX ring MSI-X table entry (1B entries)
- * @NFP_NET_CFG_RXR_PRIO: Per RX ring priority (1B entries)
- * @NFP_NET_CFG_RXR_IRQ_MOD: Per RX ring interrupt moderation (4B entries)
+ * %NFP_NET_CFG_RXR_BASE: Base offset for RX ring configuration
+ * %NFP_NET_CFG_RXR_ADDR: Per RX ring DMA address (8B entries)
+ * %NFP_NET_CFG_RXR_SZ: Per RX ring ring size (1B entries)
+ * %NFP_NET_CFG_RXR_VEC: Per RX ring MSI-X table entry (1B entries)
+ * %NFP_NET_CFG_RXR_PRIO: Per RX ring priority (1B entries)
+ * %NFP_NET_CFG_RXR_IRQ_MOD: Per RX ring interrupt moderation (4B entries)
*/
#define NFP_NET_CFG_RXR_BASE 0x0800
#define NFP_NET_CFG_RXR_ADDR(_x) (NFP_NET_CFG_RXR_BASE + ((_x) * 0x8))
@@ -343,7 +348,7 @@
/**
* Interrupt Control/Cause registers (0x0c00 - 0x0d00)
* These registers are only used when MSI-X auto-masking is not
- * enabled (@NFP_NET_CFG_CTRL_MSIXAUTO not set). The array is index
+ * enabled (%NFP_NET_CFG_CTRL_MSIXAUTO not set). The array is index
* by MSI-X entry and are 1B in size. If an entry is zero, the
* corresponding entry is enabled. If the FW generates an interrupt,
* it writes a cause into the corresponding field. This also masks
@@ -393,8 +398,8 @@
/**
* Per ring stats (0x1000 - 0x1800)
* options, 64bit per entry
- * @NFP_NET_CFG_TXR_STATS: TX ring statistics (Packet and Byte count)
- * @NFP_NET_CFG_RXR_STATS: RX ring statistics (Packet and Byte count)
+ * %NFP_NET_CFG_TXR_STATS: TX ring statistics (Packet and Byte count)
+ * %NFP_NET_CFG_RXR_STATS: RX ring statistics (Packet and Byte count)
*/
#define NFP_NET_CFG_TXR_STATS_BASE 0x1000
#define NFP_NET_CFG_TXR_STATS(_x) (NFP_NET_CFG_TXR_STATS_BASE + \
@@ -408,24 +413,105 @@
* 4B used for update command and 4B return code
* followed by a max of 504B of variable length value
*/
-#define NFP_NET_CFG_MBOX_CMD 0x1800
-#define NFP_NET_CFG_MBOX_RET 0x1804
-#define NFP_NET_CFG_MBOX_VAL 0x1808
+#define NFP_NET_CFG_MBOX_BASE 0x1800
#define NFP_NET_CFG_MBOX_VAL_MAX_SZ 0x1F8
+#define NFP_NET_CFG_MBOX_SIMPLE_CMD 0x0
+#define NFP_NET_CFG_MBOX_SIMPLE_RET 0x4
+#define NFP_NET_CFG_MBOX_SIMPLE_VAL 0x8
+#define NFP_NET_CFG_MBOX_SIMPLE_LEN 0x12
+
#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD 1
#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL 2
/**
* VLAN filtering using general use mailbox
- * @NFP_NET_CFG_VLAN_FILTER: Base address of VLAN filter mailbox
- * @NFP_NET_CFG_VLAN_FILTER_VID: VLAN ID to filter
- * @NFP_NET_CFG_VLAN_FILTER_PROTO: VLAN proto to filter
- * @NFP_NET_CFG_VXLAN_SZ: Size of the VLAN filter mailbox in bytes
+ * %NFP_NET_CFG_VLAN_FILTER: Base address of VLAN filter mailbox
+ * %NFP_NET_CFG_VLAN_FILTER_VID: VLAN ID to filter
+ * %NFP_NET_CFG_VLAN_FILTER_PROTO: VLAN proto to filter
+ * %NFP_NET_CFG_VXLAN_SZ: Size of the VLAN filter mailbox in bytes
*/
-#define NFP_NET_CFG_VLAN_FILTER NFP_NET_CFG_MBOX_VAL
+#define NFP_NET_CFG_VLAN_FILTER NFP_NET_CFG_MBOX_SIMPLE_VAL
#define NFP_NET_CFG_VLAN_FILTER_VID NFP_NET_CFG_VLAN_FILTER
#define NFP_NET_CFG_VLAN_FILTER_PROTO (NFP_NET_CFG_VLAN_FILTER + 2)
#define NFP_NET_CFG_VLAN_FILTER_SZ 0x0004
+/**
+ * TLV capabilities
+ * %NFP_NET_CFG_TLV_TYPE: Offset of type within the TLV
+ * %NFP_NET_CFG_TLV_TYPE_REQUIRED: Driver must be able to parse the TLV
+ * %NFP_NET_CFG_TLV_LENGTH: Offset of length within the TLV
+ * %NFP_NET_CFG_TLV_LENGTH_INC: TLV length increments
+ * %NFP_NET_CFG_TLV_VALUE: Offset of value with the TLV
+ *
+ * List of simple TLV structures, first one starts at %NFP_NET_CFG_TLV_BASE.
+ * Last structure must be of type %NFP_NET_CFG_TLV_TYPE_END. Presence of TLVs
+ * is indicated by %NFP_NET_CFG_TLV_BASE being non-zero. TLV structures may
+ * fill the entire remainder of the BAR or be shorter. FW must make sure TLVs
+ * don't conflict with other features which allocate space beyond
+ * %NFP_NET_CFG_TLV_BASE. %NFP_NET_CFG_TLV_TYPE_RESERVED should be used to wrap
+ * space used by such features.
+ * Note that the 4 byte TLV header is not counted in %NFP_NET_CFG_TLV_LENGTH.
+ */
+#define NFP_NET_CFG_TLV_TYPE 0x00
+#define NFP_NET_CFG_TLV_TYPE_REQUIRED 0x8000
+#define NFP_NET_CFG_TLV_LENGTH 0x02
+#define NFP_NET_CFG_TLV_LENGTH_INC 4
+#define NFP_NET_CFG_TLV_VALUE 0x04
+
+#define NFP_NET_CFG_TLV_HEADER_REQUIRED 0x80000000
+#define NFP_NET_CFG_TLV_HEADER_TYPE 0x7fff0000
+#define NFP_NET_CFG_TLV_HEADER_LENGTH 0x0000ffff
+
+/**
+ * Capability TLV types
+ *
+ * %NFP_NET_CFG_TLV_TYPE_UNKNOWN:
+ * Special TLV type to catch bugs, should never be encountered. Drivers should
+ * treat encountering this type as error and refuse to probe.
+ *
+ * %NFP_NET_CFG_TLV_TYPE_RESERVED:
+ * Reserved space, may contain legacy fixed-offset fields, or be used for
+ * padding. The use of this type should be otherwise avoided.
+ *
+ * %NFP_NET_CFG_TLV_TYPE_END:
+ * Empty, end of TLV list. Must be the last TLV. Drivers will stop processing
+ * further TLVs when encountered.
+ *
+ * %NFP_NET_CFG_TLV_TYPE_ME_FREQ:
+ * Single word, ME frequency in MHz as used in calculation for
+ * %NFP_NET_CFG_RXR_IRQ_MOD and %NFP_NET_CFG_TXR_IRQ_MOD.
+ *
+ * %NFP_NET_CFG_TLV_TYPE_MBOX:
+ * Variable, mailbox area. Overwrites the default location which is
+ * %NFP_NET_CFG_MBOX_BASE and length %NFP_NET_CFG_MBOX_VAL_MAX_SZ.
+ */
+#define NFP_NET_CFG_TLV_TYPE_UNKNOWN 0
+#define NFP_NET_CFG_TLV_TYPE_RESERVED 1
+#define NFP_NET_CFG_TLV_TYPE_END 2
+#define NFP_NET_CFG_TLV_TYPE_ME_FREQ 3
+#define NFP_NET_CFG_TLV_TYPE_MBOX 4
+
+struct device;
+
+/**
+ * struct nfp_net_tlv_caps - parsed control BAR TLV capabilities
+ * @me_freq_mhz: ME clock_freq (MHz)
+ * @mbox_off: vNIC mailbox area offset
+ * @mbox_len: vNIC mailbox area length
+ */
+struct nfp_net_tlv_caps {
+ u32 me_freq_mhz;
+ unsigned int mbox_off;
+ unsigned int mbox_len;
+};
+
+int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
+ struct nfp_net_tlv_caps *caps);
+
+static inline bool nfp_net_has_mbox(struct nfp_net_tlv_caps *caps)
+{
+ return caps->mbox_len >= NFP_NET_CFG_MBOX_SIMPLE_LEN;
+}
+
#endif /* _NFP_NET_CTRL_H_ */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
index 173646e17e94..e6f19f44b461 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
@@ -518,16 +518,15 @@ nfp_dump_csr_range(struct nfp_pf *pf, struct nfp_dumpspec_csr *spec_csr,
max_rd_addr = cpp_rd_addr + be32_to_cpu(spec_csr->cpp.dump_length);
while (cpp_rd_addr < max_rd_addr) {
- if (is_xpb_read(&spec_csr->cpp.cpp_id))
- bytes_read = nfp_xpb_readl(pf->cpp, cpp_rd_addr,
- (u32 *)dest);
- else
+ if (is_xpb_read(&spec_csr->cpp.cpp_id)) {
+ err = nfp_xpb_readl(pf->cpp, cpp_rd_addr, (u32 *)dest);
+ } else {
bytes_read = nfp_cpp_read(pf->cpp, cpp_id, cpp_rd_addr,
dest, reg_sz);
- if (bytes_read != reg_sz) {
- if (bytes_read >= 0)
- bytes_read = -EIO;
- dump_header->error = cpu_to_be32(bytes_read);
+ err = bytes_read == reg_sz ? 0 : -EIO;
+ }
+ if (err) {
+ dump_header->error = cpu_to_be32(err);
dump_header->error_offset = cpu_to_be32(cpp_rd_addr);
break;
}
@@ -555,8 +554,8 @@ nfp_read_indirect_csr(struct nfp_cpp *cpp,
NFP_IND_ME_REFL_WR_SIG_INIT,
cpp_params.token, cpp_params.island);
result = nfp_cpp_writel(cpp, cpp_id, csr_ctx_ptr_offs, context);
- if (result != sizeof(context))
- return result < 0 ? result : -EIO;
+ if (result)
+ return result;
cpp_id = nfp_get_numeric_cpp_id(&cpp_params);
result = nfp_cpp_read(cpp, cpp_id, csr_ctx_ptr_offs, dest, reg_sz);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 00b8c642e672..e1dae0616f52 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -331,7 +331,7 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
ls >= ARRAY_SIZE(ls_to_ethtool))
return 0;
- cmd->base.speed = ls_to_ethtool[sts];
+ cmd->base.speed = ls_to_ethtool[ls];
cmd->base.duplex = DUPLEX_FULL;
return 0;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index c505014121c4..15fa47f622aa 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -208,12 +208,6 @@ nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
{
int err;
- /* Get ME clock frequency from ctrl BAR
- * XXX for now frequency is hardcoded until we figure out how
- * to get the value from nfp-hwinfo into ctrl bar
- */
- nn->me_freq_mhz = 1200;
-
err = nfp_net_init(nn);
if (err)
return err;
@@ -373,7 +367,9 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
if (IS_ERR(pf->app))
return PTR_ERR(pf->app);
+ mutex_lock(&pf->lock);
err = nfp_app_init(pf->app);
+ mutex_unlock(&pf->lock);
if (err)
goto err_free;
@@ -401,7 +397,9 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
err_unmap:
nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
err_app_clean:
+ mutex_lock(&pf->lock);
nfp_app_clean(pf->app);
+ mutex_unlock(&pf->lock);
err_free:
nfp_app_free(pf->app);
pf->app = NULL;
@@ -414,7 +412,11 @@ static void nfp_net_pf_app_clean(struct nfp_pf *pf)
nfp_net_pf_free_vnic(pf, pf->ctrl_vnic);
nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
}
+
+ mutex_lock(&pf->lock);
nfp_app_clean(pf->app);
+ mutex_unlock(&pf->lock);
+
nfp_app_free(pf->app);
pf->app = NULL;
}
@@ -570,17 +572,6 @@ err_unmap_ctrl:
return err;
}
-static void nfp_net_pci_remove_finish(struct nfp_pf *pf)
-{
- nfp_net_pf_app_stop(pf);
- /* stop app first, to avoid double free of ctrl vNIC's ddir */
- nfp_net_debugfs_dir_clean(&pf->ddir);
-
- nfp_net_pf_free_irqs(pf);
- nfp_net_pf_app_clean(pf);
- nfp_net_pci_unmap_mem(pf);
-}
-
static int
nfp_net_eth_port_update(struct nfp_cpp *cpp, struct nfp_port *port,
struct nfp_eth_table *eth_table)
@@ -655,9 +646,6 @@ int nfp_net_refresh_port_table_sync(struct nfp_pf *pf)
nfp_net_pf_free_vnic(pf, nn);
}
- if (list_empty(&pf->vnics))
- nfp_net_pci_remove_finish(pf);
-
return 0;
}
@@ -707,6 +695,7 @@ int nfp_net_refresh_eth_port(struct nfp_port *port)
*/
int nfp_net_pci_probe(struct nfp_pf *pf)
{
+ struct devlink *devlink = priv_to_devlink(pf);
struct nfp_net_fw_version fw_ver;
u8 __iomem *ctrl_bar, *qc_bar;
int stride;
@@ -720,16 +709,13 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
return -EINVAL;
}
- mutex_lock(&pf->lock);
pf->max_data_vnics = nfp_net_pf_get_num_ports(pf);
- if ((int)pf->max_data_vnics < 0) {
- err = pf->max_data_vnics;
- goto err_unlock;
- }
+ if ((int)pf->max_data_vnics < 0)
+ return pf->max_data_vnics;
err = nfp_net_pci_map_mem(pf);
if (err)
- goto err_unlock;
+ return err;
ctrl_bar = nfp_cpp_area_iomem(pf->data_vnic_bar);
qc_bar = nfp_cpp_area_iomem(pf->qc_area);
@@ -768,6 +754,11 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
if (err)
goto err_unmap;
+ err = devlink_register(devlink, &pf->pdev->dev);
+ if (err)
+ goto err_app_clean;
+
+ mutex_lock(&pf->lock);
pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
/* Allocate the vnics and do basic init */
@@ -799,32 +790,39 @@ err_free_vnics:
nfp_net_pf_free_vnics(pf);
err_clean_ddir:
nfp_net_debugfs_dir_clean(&pf->ddir);
+ mutex_unlock(&pf->lock);
+ cancel_work_sync(&pf->port_refresh_work);
+ devlink_unregister(devlink);
+err_app_clean:
nfp_net_pf_app_clean(pf);
err_unmap:
nfp_net_pci_unmap_mem(pf);
-err_unlock:
- mutex_unlock(&pf->lock);
- cancel_work_sync(&pf->port_refresh_work);
return err;
}
void nfp_net_pci_remove(struct nfp_pf *pf)
{
- struct nfp_net *nn;
+ struct nfp_net *nn, *next;
mutex_lock(&pf->lock);
- if (list_empty(&pf->vnics))
- goto out;
-
- list_for_each_entry(nn, &pf->vnics, vnic_list)
- if (nfp_net_is_data_vnic(nn))
- nfp_net_pf_clean_vnic(pf, nn);
+ list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
+ if (!nfp_net_is_data_vnic(nn))
+ continue;
+ nfp_net_pf_clean_vnic(pf, nn);
+ nfp_net_pf_free_vnic(pf, nn);
+ }
- nfp_net_pf_free_vnics(pf);
+ nfp_net_pf_app_stop(pf);
+ /* stop app first, to avoid double free of ctrl vNIC's ddir */
+ nfp_net_debugfs_dir_clean(&pf->ddir);
- nfp_net_pci_remove_finish(pf);
-out:
mutex_unlock(&pf->lock);
+ devlink_unregister(priv_to_devlink(pf));
+
+ nfp_net_pf_free_irqs(pf);
+ nfp_net_pf_app_clean(pf);
+ nfp_net_pci_unmap_mem(pf);
+
cancel_work_sync(&pf->port_refresh_work);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index f50aa119570a..f67da6bde9da 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -46,6 +46,13 @@
#include "nfp_net_sriov.h"
#include "nfp_port.h"
+struct net_device *
+nfp_repr_get_locked(struct nfp_app *app, struct nfp_reprs *set, unsigned int id)
+{
+ return rcu_dereference_protected(set->reprs[id],
+ lockdep_is_held(&app->pf->lock));
+}
+
static void
nfp_repr_inc_tx_stats(struct net_device *netdev, unsigned int len,
int tx_status)
@@ -186,6 +193,13 @@ nfp_repr_get_offload_stats(int attr_id, const struct net_device *dev,
return -EINVAL;
}
+static int nfp_repr_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct nfp_repr *repr = netdev_priv(netdev);
+
+ return nfp_app_change_mtu(repr->app, netdev, new_mtu);
+}
+
static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct nfp_repr *repr = netdev_priv(netdev);
@@ -240,6 +254,7 @@ const struct net_device_ops nfp_repr_netdev_ops = {
.ndo_open = nfp_repr_open,
.ndo_stop = nfp_repr_stop,
.ndo_start_xmit = nfp_repr_xmit,
+ .ndo_change_mtu = nfp_repr_change_mtu,
.ndo_get_stats64 = nfp_repr_get_stats64,
.ndo_has_offload_stats = nfp_repr_has_offload_stats,
.ndo_get_offload_stats = nfp_repr_get_offload_stats,
@@ -361,21 +376,24 @@ static void nfp_repr_clean_and_free(struct nfp_repr *repr)
nfp_repr_free(repr);
}
-void nfp_reprs_clean_and_free(struct nfp_reprs *reprs)
+void nfp_reprs_clean_and_free(struct nfp_app *app, struct nfp_reprs *reprs)
{
+ struct net_device *netdev;
unsigned int i;
- for (i = 0; i < reprs->num_reprs; i++)
- if (reprs->reprs[i])
- nfp_repr_clean_and_free(netdev_priv(reprs->reprs[i]));
+ for (i = 0; i < reprs->num_reprs; i++) {
+ netdev = nfp_repr_get_locked(app, reprs, i);
+ if (netdev)
+ nfp_repr_clean_and_free(netdev_priv(netdev));
+ }
kfree(reprs);
}
void
-nfp_reprs_clean_and_free_by_type(struct nfp_app *app,
- enum nfp_repr_type type)
+nfp_reprs_clean_and_free_by_type(struct nfp_app *app, enum nfp_repr_type type)
{
+ struct net_device *netdev;
struct nfp_reprs *reprs;
int i;
@@ -387,14 +405,16 @@ nfp_reprs_clean_and_free_by_type(struct nfp_app *app,
/* Preclean must happen before we remove the reprs reference from the
* app below.
*/
- for (i = 0; i < reprs->num_reprs; i++)
- if (reprs->reprs[i])
- nfp_app_repr_preclean(app, reprs->reprs[i]);
+ for (i = 0; i < reprs->num_reprs; i++) {
+ netdev = nfp_repr_get_locked(app, reprs, i);
+ if (netdev)
+ nfp_app_repr_preclean(app, netdev);
+ }
reprs = nfp_app_reprs_set(app, type, NULL);
synchronize_rcu();
- nfp_reprs_clean_and_free(reprs);
+ nfp_reprs_clean_and_free(app, reprs);
}
struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs)
@@ -412,48 +432,29 @@ struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs)
int nfp_reprs_resync_phys_ports(struct nfp_app *app)
{
- struct nfp_reprs *reprs, *old_reprs;
+ struct net_device *netdev;
+ struct nfp_reprs *reprs;
struct nfp_repr *repr;
int i;
- old_reprs =
- rcu_dereference_protected(app->reprs[NFP_REPR_TYPE_PHYS_PORT],
- lockdep_is_held(&app->pf->lock));
- if (!old_reprs)
- return 0;
-
- reprs = nfp_reprs_alloc(old_reprs->num_reprs);
+ reprs = nfp_reprs_get_locked(app, NFP_REPR_TYPE_PHYS_PORT);
if (!reprs)
- return -ENOMEM;
-
- for (i = 0; i < old_reprs->num_reprs; i++) {
- if (!old_reprs->reprs[i])
- continue;
-
- repr = netdev_priv(old_reprs->reprs[i]);
- if (repr->port->type == NFP_PORT_INVALID) {
- nfp_app_repr_preclean(app, old_reprs->reprs[i]);
- continue;
- }
-
- reprs->reprs[i] = old_reprs->reprs[i];
- }
-
- old_reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs);
- synchronize_rcu();
+ return 0;
- /* Now we free up removed representors */
- for (i = 0; i < old_reprs->num_reprs; i++) {
- if (!old_reprs->reprs[i])
+ for (i = 0; i < reprs->num_reprs; i++) {
+ netdev = nfp_repr_get_locked(app, reprs, i);
+ if (!netdev)
continue;
- repr = netdev_priv(old_reprs->reprs[i]);
+ repr = netdev_priv(netdev);
if (repr->port->type != NFP_PORT_INVALID)
continue;
+ nfp_app_repr_preclean(app, netdev);
+ rcu_assign_pointer(reprs->reprs[i], NULL);
+ synchronize_rcu();
nfp_repr_clean(repr);
}
- kfree(old_reprs);
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
index 5d4d897bc9c6..a621e8ff528e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
@@ -35,6 +35,7 @@
#define NFP_NET_REPR_H
struct metadata_dst;
+struct nfp_app;
struct nfp_net;
struct nfp_port;
@@ -47,7 +48,7 @@ struct nfp_port;
*/
struct nfp_reprs {
unsigned int num_reprs;
- struct net_device *reprs[0];
+ struct net_device __rcu *reprs[0];
};
/**
@@ -89,6 +90,7 @@ struct nfp_repr {
* @NFP_REPR_TYPE_PHYS_PORT: external NIC port
* @NFP_REPR_TYPE_PF: physical function
* @NFP_REPR_TYPE_VF: virtual function
+ * @__NFP_REPR_TYPE_MAX: number of representor types
*/
enum nfp_repr_type {
NFP_REPR_TYPE_PHYS_PORT,
@@ -113,16 +115,18 @@ static inline int nfp_repr_get_port_id(struct net_device *netdev)
return priv->dst->u.port_info.port_id;
}
+struct net_device *
+nfp_repr_get_locked(struct nfp_app *app, struct nfp_reprs *set,
+ unsigned int id);
+
void nfp_repr_inc_rx_stats(struct net_device *netdev, unsigned int len);
int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
u32 cmsg_port_id, struct nfp_port *port,
struct net_device *pf_netdev);
struct net_device *nfp_repr_alloc(struct nfp_app *app);
-void
-nfp_reprs_clean_and_free(struct nfp_reprs *reprs);
-void
-nfp_reprs_clean_and_free_by_type(struct nfp_app *app,
- enum nfp_repr_type type);
+void nfp_reprs_clean_and_free(struct nfp_app *app, struct nfp_reprs *reprs);
+void nfp_reprs_clean_and_free_by_type(struct nfp_app *app,
+ enum nfp_repr_type type);
struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs);
int nfp_reprs_resync_phys_ports(struct nfp_app *app);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index c879626e035b..b802a1d55449 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -277,12 +277,6 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
}
nfp_net_irqs_assign(nn, vf->irq_entries, num_irqs);
- /* Get ME clock frequency from ctrl BAR
- * XXX for now frequency is hardcoded until we figure out how
- * to get the value from nfp-hwinfo into ctrl bar
- */
- nn->me_freq_mhz = 1200;
-
err = nfp_net_init(nn);
if (err)
goto err_irqs_disable;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
index 28262470dabf..ef30597aa319 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
@@ -674,18 +674,20 @@ void __iomem *nfp_cpp_area_iomem(struct nfp_cpp_area *area)
* @offset: Offset into area
* @value: Pointer to read buffer
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_cpp_area_readl(struct nfp_cpp_area *area,
unsigned long offset, u32 *value)
{
u8 tmp[4];
- int err;
+ int n;
- err = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
- *value = get_unaligned_le32(tmp);
+ n = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
+ if (n != sizeof(tmp))
+ return n < 0 ? n : -EIO;
- return err;
+ *value = get_unaligned_le32(tmp);
+ return 0;
}
/**
@@ -694,16 +696,18 @@ int nfp_cpp_area_readl(struct nfp_cpp_area *area,
* @offset: Offset into area
* @value: Value to write
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_cpp_area_writel(struct nfp_cpp_area *area,
unsigned long offset, u32 value)
{
u8 tmp[4];
+ int n;
put_unaligned_le32(value, tmp);
+ n = nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp));
- return nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp));
+ return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO;
}
/**
@@ -712,18 +716,20 @@ int nfp_cpp_area_writel(struct nfp_cpp_area *area,
* @offset: Offset into area
* @value: Pointer to read buffer
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_cpp_area_readq(struct nfp_cpp_area *area,
unsigned long offset, u64 *value)
{
u8 tmp[8];
- int err;
+ int n;
- err = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
- *value = get_unaligned_le64(tmp);
+ n = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
+ if (n != sizeof(tmp))
+ return n < 0 ? n : -EIO;
- return err;
+ *value = get_unaligned_le64(tmp);
+ return 0;
}
/**
@@ -732,16 +738,18 @@ int nfp_cpp_area_readq(struct nfp_cpp_area *area,
* @offset: Offset into area
* @value: Value to write
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_cpp_area_writeq(struct nfp_cpp_area *area,
unsigned long offset, u64 value)
{
u8 tmp[8];
+ int n;
put_unaligned_le64(value, tmp);
+ n = nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp));
- return nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp));
+ return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO;
}
/**
@@ -1080,7 +1088,7 @@ static u32 nfp_xpb_to_cpp(struct nfp_cpp *cpp, u32 *xpb_addr)
* @xpb_addr: Address for operation
* @value: Pointer to read buffer
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_xpb_readl(struct nfp_cpp *cpp, u32 xpb_addr, u32 *value)
{
@@ -1095,7 +1103,7 @@ int nfp_xpb_readl(struct nfp_cpp *cpp, u32 xpb_addr, u32 *value)
* @xpb_addr: Address for operation
* @value: Value to write
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_xpb_writel(struct nfp_cpp *cpp, u32 xpb_addr, u32 value)
{
@@ -1113,7 +1121,7 @@ int nfp_xpb_writel(struct nfp_cpp *cpp, u32 xpb_addr, u32 value)
*
* KERNEL: This operation is safe to call in interrupt or softirq context.
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_xpb_writelm(struct nfp_cpp *cpp, u32 xpb_tgt,
u32 mask, u32 value)
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
index ab86bceb93f2..20bad05e2e92 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
@@ -64,18 +64,20 @@
* @address: Address for operation
* @value: Pointer to read buffer
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_cpp_readl(struct nfp_cpp *cpp, u32 cpp_id,
unsigned long long address, u32 *value)
{
u8 tmp[4];
- int err;
+ int n;
- err = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp));
- *value = get_unaligned_le32(tmp);
+ n = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp));
+ if (n != sizeof(tmp))
+ return n < 0 ? n : -EIO;
- return err;
+ *value = get_unaligned_le32(tmp);
+ return 0;
}
/**
@@ -85,15 +87,18 @@ int nfp_cpp_readl(struct nfp_cpp *cpp, u32 cpp_id,
* @address: Address for operation
* @value: Value to write
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_cpp_writel(struct nfp_cpp *cpp, u32 cpp_id,
unsigned long long address, u32 value)
{
u8 tmp[4];
+ int n;
put_unaligned_le32(value, tmp);
- return nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp));
+ n = nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp));
+
+ return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO;
}
/**
@@ -103,18 +108,20 @@ int nfp_cpp_writel(struct nfp_cpp *cpp, u32 cpp_id,
* @address: Address for operation
* @value: Pointer to read buffer
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_cpp_readq(struct nfp_cpp *cpp, u32 cpp_id,
unsigned long long address, u64 *value)
{
u8 tmp[8];
- int err;
+ int n;
- err = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp));
- *value = get_unaligned_le64(tmp);
+ n = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp));
+ if (n != sizeof(tmp))
+ return n < 0 ? n : -EIO;
- return err;
+ *value = get_unaligned_le64(tmp);
+ return 0;
}
/**
@@ -124,15 +131,18 @@ int nfp_cpp_readq(struct nfp_cpp *cpp, u32 cpp_id,
* @address: Address for operation
* @value: Value to write
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_cpp_writeq(struct nfp_cpp *cpp, u32 cpp_id,
unsigned long long address, u64 value)
{
u8 tmp[8];
+ int n;
put_unaligned_le64(value, tmp);
- return nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp));
+ n = nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp));
+
+ return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO;
}
/* NOTE: This code should not use nfp_xpb_* functions,
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
index ecda474ac7c3..46107aefad1c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
@@ -277,10 +277,6 @@ u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name,
break;
}
- if (err == sym->size)
- err = 0;
- else if (err >= 0)
- err = -EIO;
exit:
if (error)
*error = err;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 21e15cb2f62e..a3f6d514c0f2 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -5510,11 +5510,9 @@ static int nv_open(struct net_device *dev)
/* One manual link speed update: Interrupts are enabled, future link
* speed changes cause interrupts and are handled by nv_link_irq().
*/
- {
- u32 miistat;
- miistat = readl(base + NvRegMIIStatus);
- writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
- }
+ readl(base + NvRegMIIStatus);
+ writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
+
/* set linkspeed to invalid value, thus force nv_update_linkspeed
* to init hw */
np->linkspeed = 0;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 217b62a3f587..3e57bf5d3d03 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -776,6 +776,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
int rc = 0;
struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
bool b_ret_ent = true;
+ bool eblock;
if (!p_hwfn)
return -EINVAL;
@@ -794,6 +795,11 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
if (rc)
goto spq_post_fail;
+ /* Check if entry is in block mode before qed_spq_add_entry,
+ * which might kfree p_ent.
+ */
+ eblock = (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK);
+
/* Add the request to the pending queue */
rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
if (rc)
@@ -811,7 +817,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
spin_unlock_bh(&p_spq->lock);
- if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
+ if (eblock) {
/* For entries in QED BLOCK mode, the completion code cannot
* perform the necessary cleanup - if it did, we couldn't
* access p_ent here to see whether it's successful or not.
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 7aa1c12750b3..a197e11f3a56 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2089,8 +2089,8 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
add_reg(CSMR);
if (cd->select_mii)
add_reg(RMII_MII);
- add_reg(ARSTR);
if (cd->tsu) {
+ add_tsu_reg(ARSTR);
add_tsu_reg(TSU_CTRST);
add_tsu_reg(TSU_FWEN0);
add_tsu_reg(TSU_FWEN1);
@@ -3125,7 +3125,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
const struct platform_device_id *id = platform_get_device_id(pdev);
struct sh_eth_private *mdp;
struct net_device *ndev;
- int ret, devno;
+ int ret;
/* get base addr */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -3137,10 +3137,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
- devno = pdev->id;
- if (devno < 0)
- devno = 0;
-
ret = platform_get_irq(pdev, 0);
if (ret < 0)
goto out_release;
@@ -3222,8 +3218,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
eth_hw_addr_random(ndev);
}
- /* ioremap the TSU registers */
if (mdp->cd->tsu) {
+ int port = pdev->id < 0 ? 0 : pdev->id % 2;
struct resource *rtsu;
rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
@@ -3235,7 +3231,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
/* We can only request the TSU region for the first port
* of the two sharing this TSU for the probe to succeed...
*/
- if (devno % 2 == 0 &&
+ if (port == 0 &&
!devm_request_mem_region(&pdev->dev, rtsu->start,
resource_size(rtsu),
dev_name(&pdev->dev))) {
@@ -3243,6 +3239,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
ret = -EBUSY;
goto out_release;
}
+ /* ioremap the TSU registers */
mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start,
resource_size(rtsu));
if (!mdp->tsu_addr) {
@@ -3250,16 +3247,14 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto out_release;
}
- mdp->port = devno % 2;
+ mdp->port = port;
ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
- }
- /* Need to init only the first port of the two sharing a TSU */
- if (devno % 2 == 0) {
- if (mdp->cd->chip_reset)
- mdp->cd->chip_reset(ndev);
+ /* Need to init only the first port of the two sharing a TSU */
+ if (port == 0) {
+ if (mdp->cd->chip_reset)
+ mdp->cd->chip_reset(ndev);
- if (mdp->cd->tsu) {
/* TSU init (Init only)*/
sh_eth_tsu_init(mdp);
}
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 3780161de5a1..12f0abc30cb1 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -953,31 +953,42 @@ void efx_link_status_changed(struct efx_nic *efx)
netif_info(efx, link, efx->net_dev, "link down\n");
}
-void efx_link_set_advertising(struct efx_nic *efx, u32 advertising)
+void efx_link_set_advertising(struct efx_nic *efx,
+ const unsigned long *advertising)
{
- efx->link_advertising = advertising;
- if (advertising) {
- if (advertising & ADVERTISED_Pause)
- efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
- else
- efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
- if (advertising & ADVERTISED_Asym_Pause)
- efx->wanted_fc ^= EFX_FC_TX;
- }
+ memcpy(efx->link_advertising, advertising,
+ sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK()));
+
+ efx->link_advertising[0] |= ADVERTISED_Autoneg;
+ if (advertising[0] & ADVERTISED_Pause)
+ efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
+ else
+ efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
+ if (advertising[0] & ADVERTISED_Asym_Pause)
+ efx->wanted_fc ^= EFX_FC_TX;
+}
+
+/* Equivalent to efx_link_set_advertising with all-zeroes, except does not
+ * force the Autoneg bit on.
+ */
+void efx_link_clear_advertising(struct efx_nic *efx)
+{
+ bitmap_zero(efx->link_advertising, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
}
void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
{
efx->wanted_fc = wanted_fc;
- if (efx->link_advertising) {
+ if (efx->link_advertising[0]) {
if (wanted_fc & EFX_FC_RX)
- efx->link_advertising |= (ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
+ efx->link_advertising[0] |= (ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
else
- efx->link_advertising &= ~(ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
+ efx->link_advertising[0] &= ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
if (wanted_fc & EFX_FC_TX)
- efx->link_advertising ^= ADVERTISED_Asym_Pause;
+ efx->link_advertising[0] ^= ADVERTISED_Asym_Pause;
}
}
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 16da3e9a6000..0cddc5ad77b1 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -258,7 +258,9 @@ static inline void efx_schedule_channel_irq(struct efx_channel *channel)
}
void efx_link_status_changed(struct efx_nic *efx);
-void efx_link_set_advertising(struct efx_nic *efx, u32);
+void efx_link_set_advertising(struct efx_nic *efx,
+ const unsigned long *advertising);
+void efx_link_clear_advertising(struct efx_nic *efx);
void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
static inline void efx_device_detach_sync(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 3747b5644110..4db2dc2bf52f 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -720,7 +720,7 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
goto out;
}
- if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising) {
+ if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising[0]) {
netif_dbg(efx, drv, efx->net_dev,
"Autonegotiation is disabled\n");
rc = -EINVAL;
@@ -732,10 +732,10 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
(wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX))
efx->type->prepare_enable_fc_tx(efx);
- old_adv = efx->link_advertising;
+ old_adv = efx->link_advertising[0];
old_fc = efx->wanted_fc;
efx_link_set_wanted_fc(efx, wanted_fc);
- if (efx->link_advertising != old_adv ||
+ if (efx->link_advertising[0] != old_adv ||
(efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) {
rc = efx->phy_op->reconfigure(efx);
if (rc) {
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 65ee1a468170..ce8aabf9091e 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -171,89 +171,108 @@ static int efx_mcdi_mdio_write(struct net_device *net_dev,
return 0;
}
-static u32 mcdi_to_ethtool_cap(u32 media, u32 cap)
+static void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset)
{
- u32 result = 0;
+ #define SET_BIT(name) __set_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
+ linkset)
+ bitmap_zero(linkset, __ETHTOOL_LINK_MODE_MASK_NBITS);
switch (media) {
case MC_CMD_MEDIA_KX4:
- result |= SUPPORTED_Backplane;
+ SET_BIT(Backplane);
if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
- result |= SUPPORTED_1000baseKX_Full;
+ SET_BIT(1000baseKX_Full);
if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
- result |= SUPPORTED_10000baseKX4_Full;
+ SET_BIT(10000baseKX4_Full);
if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
- result |= SUPPORTED_40000baseKR4_Full;
+ SET_BIT(40000baseKR4_Full);
break;
case MC_CMD_MEDIA_XFP:
case MC_CMD_MEDIA_SFP_PLUS:
case MC_CMD_MEDIA_QSFP_PLUS:
- result |= SUPPORTED_FIBRE;
+ SET_BIT(FIBRE);
if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
- result |= SUPPORTED_1000baseT_Full;
+ SET_BIT(1000baseT_Full);
if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
- result |= SUPPORTED_10000baseT_Full;
+ SET_BIT(10000baseT_Full);
if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
- result |= SUPPORTED_40000baseCR4_Full;
+ SET_BIT(40000baseCR4_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN))
+ SET_BIT(100000baseCR4_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN))
+ SET_BIT(25000baseCR_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN))
+ SET_BIT(50000baseCR2_Full);
break;
case MC_CMD_MEDIA_BASE_T:
- result |= SUPPORTED_TP;
+ SET_BIT(TP);
if (cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
- result |= SUPPORTED_10baseT_Half;
+ SET_BIT(10baseT_Half);
if (cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
- result |= SUPPORTED_10baseT_Full;
+ SET_BIT(10baseT_Full);
if (cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
- result |= SUPPORTED_100baseT_Half;
+ SET_BIT(100baseT_Half);
if (cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
- result |= SUPPORTED_100baseT_Full;
+ SET_BIT(100baseT_Full);
if (cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
- result |= SUPPORTED_1000baseT_Half;
+ SET_BIT(1000baseT_Half);
if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
- result |= SUPPORTED_1000baseT_Full;
+ SET_BIT(1000baseT_Full);
if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
- result |= SUPPORTED_10000baseT_Full;
+ SET_BIT(10000baseT_Full);
break;
}
if (cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
- result |= SUPPORTED_Pause;
+ SET_BIT(Pause);
if (cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
- result |= SUPPORTED_Asym_Pause;
+ SET_BIT(Asym_Pause);
if (cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
- result |= SUPPORTED_Autoneg;
+ SET_BIT(Autoneg);
- return result;
+ #undef SET_BIT
}
-static u32 ethtool_to_mcdi_cap(u32 cap)
+static u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset)
{
u32 result = 0;
- if (cap & SUPPORTED_10baseT_Half)
+ #define TEST_BIT(name) test_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
+ linkset)
+
+ if (TEST_BIT(10baseT_Half))
result |= (1 << MC_CMD_PHY_CAP_10HDX_LBN);
- if (cap & SUPPORTED_10baseT_Full)
+ if (TEST_BIT(10baseT_Full))
result |= (1 << MC_CMD_PHY_CAP_10FDX_LBN);
- if (cap & SUPPORTED_100baseT_Half)
+ if (TEST_BIT(100baseT_Half))
result |= (1 << MC_CMD_PHY_CAP_100HDX_LBN);
- if (cap & SUPPORTED_100baseT_Full)
+ if (TEST_BIT(100baseT_Full))
result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN);
- if (cap & SUPPORTED_1000baseT_Half)
+ if (TEST_BIT(1000baseT_Half))
result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN);
- if (cap & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseKX_Full))
+ if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full))
result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN);
- if (cap & (SUPPORTED_10000baseT_Full | SUPPORTED_10000baseKX4_Full))
+ if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full))
result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN);
- if (cap & (SUPPORTED_40000baseCR4_Full | SUPPORTED_40000baseKR4_Full))
+ if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full))
result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN);
- if (cap & SUPPORTED_Pause)
+ if (TEST_BIT(100000baseCR4_Full))
+ result |= (1 << MC_CMD_PHY_CAP_100000FDX_LBN);
+ if (TEST_BIT(25000baseCR_Full))
+ result |= (1 << MC_CMD_PHY_CAP_25000FDX_LBN);
+ if (TEST_BIT(50000baseCR2_Full))
+ result |= (1 << MC_CMD_PHY_CAP_50000FDX_LBN);
+ if (TEST_BIT(Pause))
result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN);
- if (cap & SUPPORTED_Asym_Pause)
+ if (TEST_BIT(Asym_Pause))
result |= (1 << MC_CMD_PHY_CAP_ASYM_LBN);
- if (cap & SUPPORTED_Autoneg)
+ if (TEST_BIT(Autoneg))
result |= (1 << MC_CMD_PHY_CAP_AN_LBN);
+ #undef TEST_BIT
+
return result;
}
@@ -285,7 +304,7 @@ static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
return flags;
}
-static u32 mcdi_to_ethtool_media(u32 media)
+static u8 mcdi_to_ethtool_media(u32 media)
{
switch (media) {
case MC_CMD_MEDIA_XAUI:
@@ -371,8 +390,8 @@ static int efx_mcdi_phy_probe(struct efx_nic *efx)
caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
- efx->link_advertising =
- mcdi_to_ethtool_cap(phy_data->media, caps);
+ mcdi_to_ethtool_linkset(phy_data->media, caps,
+ efx->link_advertising);
else
phy_data->forced_cap = caps;
@@ -435,8 +454,8 @@ fail:
int efx_mcdi_port_reconfigure(struct efx_nic *efx)
{
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
- u32 caps = (efx->link_advertising ?
- ethtool_to_mcdi_cap(efx->link_advertising) :
+ u32 caps = (efx->link_advertising[0] ?
+ ethtool_linkset_to_mcdi_cap(efx->link_advertising) :
phy_cfg->forced_cap);
return efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx),
@@ -509,34 +528,28 @@ static void efx_mcdi_phy_get_link_ksettings(struct efx_nic *efx,
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
int rc;
- u32 supported, advertising, lp_advertising;
- supported = mcdi_to_ethtool_cap(phy_cfg->media, phy_cfg->supported_cap);
- advertising = efx->link_advertising;
cmd->base.speed = efx->link_state.speed;
cmd->base.duplex = efx->link_state.fd;
cmd->base.port = mcdi_to_ethtool_media(phy_cfg->media);
cmd->base.phy_address = phy_cfg->port;
- cmd->base.autoneg = !!(efx->link_advertising & ADVERTISED_Autoneg);
+ cmd->base.autoneg = !!(efx->link_advertising[0] & ADVERTISED_Autoneg);
cmd->base.mdio_support = (efx->mdio.mode_support &
(MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22));
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
- supported);
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
- advertising);
+ mcdi_to_ethtool_linkset(phy_cfg->media, phy_cfg->supported_cap,
+ cmd->link_modes.supported);
+ memcpy(cmd->link_modes.advertising, efx->link_advertising,
+ sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK()));
BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), NULL);
if (rc)
return;
- lp_advertising =
- mcdi_to_ethtool_cap(phy_cfg->media,
- MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP));
-
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
- lp_advertising);
+ mcdi_to_ethtool_linkset(phy_cfg->media,
+ MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP),
+ cmd->link_modes.lp_advertising);
}
static int
@@ -546,29 +559,28 @@ efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx,
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u32 caps;
int rc;
- u32 advertising;
-
- ethtool_convert_link_mode_to_legacy_u32(&advertising,
- cmd->link_modes.advertising);
if (cmd->base.autoneg) {
- caps = (ethtool_to_mcdi_cap(advertising) |
- 1 << MC_CMD_PHY_CAP_AN_LBN);
+ caps = (ethtool_linkset_to_mcdi_cap(cmd->link_modes.advertising) |
+ 1 << MC_CMD_PHY_CAP_AN_LBN);
} else if (cmd->base.duplex) {
switch (cmd->base.speed) {
- case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break;
- case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break;
- case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break;
- case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break;
- case 40000: caps = 1 << MC_CMD_PHY_CAP_40000FDX_LBN; break;
- default: return -EINVAL;
+ case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break;
+ case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break;
+ case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break;
+ case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break;
+ case 40000: caps = 1 << MC_CMD_PHY_CAP_40000FDX_LBN; break;
+ case 100000: caps = 1 << MC_CMD_PHY_CAP_100000FDX_LBN; break;
+ case 25000: caps = 1 << MC_CMD_PHY_CAP_25000FDX_LBN; break;
+ case 50000: caps = 1 << MC_CMD_PHY_CAP_50000FDX_LBN; break;
+ default: return -EINVAL;
}
} else {
switch (cmd->base.speed) {
- case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break;
- case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break;
- case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break;
- default: return -EINVAL;
+ case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break;
+ case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break;
+ case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break;
+ default: return -EINVAL;
}
}
@@ -578,11 +590,10 @@ efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx,
return rc;
if (cmd->base.autoneg) {
- efx_link_set_advertising(
- efx, advertising | ADVERTISED_Autoneg);
+ efx_link_set_advertising(efx, cmd->link_modes.advertising);
phy_cfg->forced_cap = 0;
} else {
- efx_link_set_advertising(efx, 0);
+ efx_link_clear_advertising(efx);
phy_cfg->forced_cap = caps;
}
return 0;
@@ -985,6 +996,9 @@ static unsigned int efx_mcdi_event_link_speed[] = {
[MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
[MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
[MCDI_EVENT_LINKCHANGE_SPEED_40G] = 40000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_25G] = 25000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_50G] = 50000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_100G] = 100000,
};
void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 4cedc5c4c6d9..3dd42f3136fe 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -937,7 +937,7 @@ struct efx_nic {
unsigned int mdio_bus;
enum efx_phy_mode phy_mode;
- u32 link_advertising;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(link_advertising);
struct efx_link_state link_state;
unsigned int n_link_state_changes;
diff --git a/drivers/net/ethernet/socionext/Kconfig b/drivers/net/ethernet/socionext/Kconfig
index 3a1829e59e24..6bcfe27fc560 100644
--- a/drivers/net/ethernet/socionext/Kconfig
+++ b/drivers/net/ethernet/socionext/Kconfig
@@ -19,4 +19,16 @@ config SNI_AVE
Driver for gigabit ethernet MACs, called AVE, in the
Socionext UniPhier family.
+config SNI_NETSEC
+ tristate "Socionext NETSEC ethernet support"
+ depends on (ARCH_SYNQUACER || COMPILE_TEST) && OF
+ select PHYLIB
+ select MII
+ ---help---
+ Enable to add support for the SocioNext NetSec Gigabit Ethernet
+ controller + PHY, as found on the Synquacer SC2A11 SoC
+
+ To compile this driver as a module, choose M here: the module will be
+ called netsec. If unsure, say N.
+
endif #NET_VENDOR_SOCIONEXT
diff --git a/drivers/net/ethernet/socionext/Makefile b/drivers/net/ethernet/socionext/Makefile
index ab83df63b670..7fd837a999fd 100644
--- a/drivers/net/ethernet/socionext/Makefile
+++ b/drivers/net/ethernet/socionext/Makefile
@@ -3,3 +3,4 @@
# Makefile for all ethernet ip drivers on Socionext platforms
#
obj-$(CONFIG_SNI_AVE) += sni_ave.o
+obj-$(CONFIG_SNI_NETSEC) += netsec.o
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
new file mode 100644
index 000000000000..f4c0b02ddad8
--- /dev/null
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -0,0 +1,1777 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/acpi.h>
+#include <linux/of_mdio.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+
+#include <net/tcp.h>
+#include <net/ip6_checksum.h>
+
+#define NETSEC_REG_SOFT_RST 0x104
+#define NETSEC_REG_COM_INIT 0x120
+
+#define NETSEC_REG_TOP_STATUS 0x200
+#define NETSEC_IRQ_RX BIT(1)
+#define NETSEC_IRQ_TX BIT(0)
+
+#define NETSEC_REG_TOP_INTEN 0x204
+#define NETSEC_REG_INTEN_SET 0x234
+#define NETSEC_REG_INTEN_CLR 0x238
+
+#define NETSEC_REG_NRM_TX_STATUS 0x400
+#define NETSEC_REG_NRM_TX_INTEN 0x404
+#define NETSEC_REG_NRM_TX_INTEN_SET 0x428
+#define NETSEC_REG_NRM_TX_INTEN_CLR 0x42c
+#define NRM_TX_ST_NTOWNR BIT(17)
+#define NRM_TX_ST_TR_ERR BIT(16)
+#define NRM_TX_ST_TXDONE BIT(15)
+#define NRM_TX_ST_TMREXP BIT(14)
+
+#define NETSEC_REG_NRM_RX_STATUS 0x440
+#define NETSEC_REG_NRM_RX_INTEN 0x444
+#define NETSEC_REG_NRM_RX_INTEN_SET 0x468
+#define NETSEC_REG_NRM_RX_INTEN_CLR 0x46c
+#define NRM_RX_ST_RC_ERR BIT(16)
+#define NRM_RX_ST_PKTCNT BIT(15)
+#define NRM_RX_ST_TMREXP BIT(14)
+
+#define NETSEC_REG_PKT_CMD_BUF 0xd0
+
+#define NETSEC_REG_CLK_EN 0x100
+
+#define NETSEC_REG_PKT_CTRL 0x140
+
+#define NETSEC_REG_DMA_TMR_CTRL 0x20c
+#define NETSEC_REG_F_TAIKI_MC_VER 0x22c
+#define NETSEC_REG_F_TAIKI_VER 0x230
+#define NETSEC_REG_DMA_HM_CTRL 0x214
+#define NETSEC_REG_DMA_MH_CTRL 0x220
+#define NETSEC_REG_ADDR_DIS_CORE 0x218
+#define NETSEC_REG_DMAC_HM_CMD_BUF 0x210
+#define NETSEC_REG_DMAC_MH_CMD_BUF 0x21c
+
+#define NETSEC_REG_NRM_TX_PKTCNT 0x410
+
+#define NETSEC_REG_NRM_TX_DONE_PKTCNT 0x414
+#define NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT 0x418
+
+#define NETSEC_REG_NRM_TX_TMR 0x41c
+
+#define NETSEC_REG_NRM_RX_PKTCNT 0x454
+#define NETSEC_REG_NRM_RX_RXINT_PKTCNT 0x458
+#define NETSEC_REG_NRM_TX_TXINT_TMR 0x420
+#define NETSEC_REG_NRM_RX_RXINT_TMR 0x460
+
+#define NETSEC_REG_NRM_RX_TMR 0x45c
+
+#define NETSEC_REG_NRM_TX_DESC_START_UP 0x434
+#define NETSEC_REG_NRM_TX_DESC_START_LW 0x408
+#define NETSEC_REG_NRM_RX_DESC_START_UP 0x474
+#define NETSEC_REG_NRM_RX_DESC_START_LW 0x448
+
+#define NETSEC_REG_NRM_TX_CONFIG 0x430
+#define NETSEC_REG_NRM_RX_CONFIG 0x470
+
+#define MAC_REG_STATUS 0x1024
+#define MAC_REG_DATA 0x11c0
+#define MAC_REG_CMD 0x11c4
+#define MAC_REG_FLOW_TH 0x11cc
+#define MAC_REG_INTF_SEL 0x11d4
+#define MAC_REG_DESC_INIT 0x11fc
+#define MAC_REG_DESC_SOFT_RST 0x1204
+#define NETSEC_REG_MODE_TRANS_COMP_STATUS 0x500
+
+#define GMAC_REG_MCR 0x0000
+#define GMAC_REG_MFFR 0x0004
+#define GMAC_REG_GAR 0x0010
+#define GMAC_REG_GDR 0x0014
+#define GMAC_REG_FCR 0x0018
+#define GMAC_REG_BMR 0x1000
+#define GMAC_REG_RDLAR 0x100c
+#define GMAC_REG_TDLAR 0x1010
+#define GMAC_REG_OMR 0x1018
+
+#define MHZ(n) ((n) * 1000 * 1000)
+
+#define NETSEC_TX_SHIFT_OWN_FIELD 31
+#define NETSEC_TX_SHIFT_LD_FIELD 30
+#define NETSEC_TX_SHIFT_DRID_FIELD 24
+#define NETSEC_TX_SHIFT_PT_FIELD 21
+#define NETSEC_TX_SHIFT_TDRID_FIELD 16
+#define NETSEC_TX_SHIFT_CC_FIELD 15
+#define NETSEC_TX_SHIFT_FS_FIELD 9
+#define NETSEC_TX_LAST 8
+#define NETSEC_TX_SHIFT_CO 7
+#define NETSEC_TX_SHIFT_SO 6
+#define NETSEC_TX_SHIFT_TRS_FIELD 4
+
+#define NETSEC_RX_PKT_OWN_FIELD 31
+#define NETSEC_RX_PKT_LD_FIELD 30
+#define NETSEC_RX_PKT_SDRID_FIELD 24
+#define NETSEC_RX_PKT_FR_FIELD 23
+#define NETSEC_RX_PKT_ER_FIELD 21
+#define NETSEC_RX_PKT_ERR_FIELD 16
+#define NETSEC_RX_PKT_TDRID_FIELD 12
+#define NETSEC_RX_PKT_FS_FIELD 9
+#define NETSEC_RX_PKT_LS_FIELD 8
+#define NETSEC_RX_PKT_CO_FIELD 6
+
+#define NETSEC_RX_PKT_ERR_MASK 3
+
+#define NETSEC_MAX_TX_PKT_LEN 1518
+#define NETSEC_MAX_TX_JUMBO_PKT_LEN 9018
+
+#define NETSEC_RING_GMAC 15
+#define NETSEC_RING_MAX 2
+
+#define NETSEC_TCP_SEG_LEN_MAX 1460
+#define NETSEC_TCP_JUMBO_SEG_LEN_MAX 8960
+
+#define NETSEC_RX_CKSUM_NOTAVAIL 0
+#define NETSEC_RX_CKSUM_OK 1
+#define NETSEC_RX_CKSUM_NG 2
+
+#define NETSEC_TOP_IRQ_REG_CODE_LOAD_END BIT(20)
+#define NETSEC_IRQ_TRANSITION_COMPLETE BIT(4)
+
+#define NETSEC_MODE_TRANS_COMP_IRQ_N2T BIT(20)
+#define NETSEC_MODE_TRANS_COMP_IRQ_T2N BIT(19)
+
+#define NETSEC_INT_PKTCNT_MAX 2047
+
+#define NETSEC_FLOW_START_TH_MAX 95
+#define NETSEC_FLOW_STOP_TH_MAX 95
+#define NETSEC_FLOW_PAUSE_TIME_MIN 5
+
+#define NETSEC_CLK_EN_REG_DOM_ALL 0x3f
+
+#define NETSEC_PKT_CTRL_REG_MODE_NRM BIT(28)
+#define NETSEC_PKT_CTRL_REG_EN_JUMBO BIT(27)
+#define NETSEC_PKT_CTRL_REG_LOG_CHKSUM_ER BIT(3)
+#define NETSEC_PKT_CTRL_REG_LOG_HD_INCOMPLETE BIT(2)
+#define NETSEC_PKT_CTRL_REG_LOG_HD_ER BIT(1)
+#define NETSEC_PKT_CTRL_REG_DRP_NO_MATCH BIT(0)
+
+#define NETSEC_CLK_EN_REG_DOM_G BIT(5)
+#define NETSEC_CLK_EN_REG_DOM_C BIT(1)
+#define NETSEC_CLK_EN_REG_DOM_D BIT(0)
+
+#define NETSEC_COM_INIT_REG_DB BIT(2)
+#define NETSEC_COM_INIT_REG_CLS BIT(1)
+#define NETSEC_COM_INIT_REG_ALL (NETSEC_COM_INIT_REG_CLS | \
+ NETSEC_COM_INIT_REG_DB)
+
+#define NETSEC_SOFT_RST_REG_RESET 0
+#define NETSEC_SOFT_RST_REG_RUN BIT(31)
+
+#define NETSEC_DMA_CTRL_REG_STOP 1
+#define MH_CTRL__MODE_TRANS BIT(20)
+
+#define NETSEC_GMAC_CMD_ST_READ 0
+#define NETSEC_GMAC_CMD_ST_WRITE BIT(28)
+#define NETSEC_GMAC_CMD_ST_BUSY BIT(31)
+
+#define NETSEC_GMAC_BMR_REG_COMMON 0x00412080
+#define NETSEC_GMAC_BMR_REG_RESET 0x00020181
+#define NETSEC_GMAC_BMR_REG_SWR 0x00000001
+
+#define NETSEC_GMAC_OMR_REG_ST BIT(13)
+#define NETSEC_GMAC_OMR_REG_SR BIT(1)
+
+#define NETSEC_GMAC_MCR_REG_IBN BIT(30)
+#define NETSEC_GMAC_MCR_REG_CST BIT(25)
+#define NETSEC_GMAC_MCR_REG_JE BIT(20)
+#define NETSEC_MCR_PS BIT(15)
+#define NETSEC_GMAC_MCR_REG_FES BIT(14)
+#define NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON 0x0000280c
+#define NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON 0x0001a00c
+
+#define NETSEC_FCR_RFE BIT(2)
+#define NETSEC_FCR_TFE BIT(1)
+
+#define NETSEC_GMAC_GAR_REG_GW BIT(1)
+#define NETSEC_GMAC_GAR_REG_GB BIT(0)
+
+#define NETSEC_GMAC_GAR_REG_SHIFT_PA 11
+#define NETSEC_GMAC_GAR_REG_SHIFT_GR 6
+#define GMAC_REG_SHIFT_CR_GAR 2
+
+#define NETSEC_GMAC_GAR_REG_CR_25_35_MHZ 2
+#define NETSEC_GMAC_GAR_REG_CR_35_60_MHZ 3
+#define NETSEC_GMAC_GAR_REG_CR_60_100_MHZ 0
+#define NETSEC_GMAC_GAR_REG_CR_100_150_MHZ 1
+#define NETSEC_GMAC_GAR_REG_CR_150_250_MHZ 4
+#define NETSEC_GMAC_GAR_REG_CR_250_300_MHZ 5
+
+#define NETSEC_GMAC_RDLAR_REG_COMMON 0x18000
+#define NETSEC_GMAC_TDLAR_REG_COMMON 0x1c000
+
+#define NETSEC_REG_NETSEC_VER_F_TAIKI 0x50000
+
+#define NETSEC_REG_DESC_RING_CONFIG_CFG_UP BIT(31)
+#define NETSEC_REG_DESC_RING_CONFIG_CH_RST BIT(30)
+#define NETSEC_REG_DESC_TMR_MODE 4
+#define NETSEC_REG_DESC_ENDIAN 0
+
+#define NETSEC_MAC_DESC_SOFT_RST_SOFT_RST 1
+#define NETSEC_MAC_DESC_INIT_REG_INIT 1
+
+#define NETSEC_EEPROM_MAC_ADDRESS 0x00
+#define NETSEC_EEPROM_HM_ME_ADDRESS_H 0x08
+#define NETSEC_EEPROM_HM_ME_ADDRESS_L 0x0C
+#define NETSEC_EEPROM_HM_ME_SIZE 0x10
+#define NETSEC_EEPROM_MH_ME_ADDRESS_H 0x14
+#define NETSEC_EEPROM_MH_ME_ADDRESS_L 0x18
+#define NETSEC_EEPROM_MH_ME_SIZE 0x1C
+#define NETSEC_EEPROM_PKT_ME_ADDRESS 0x20
+#define NETSEC_EEPROM_PKT_ME_SIZE 0x24
+
+#define DESC_NUM 128
+#define NAPI_BUDGET (DESC_NUM / 2)
+
+#define DESC_SZ sizeof(struct netsec_de)
+
+#define NETSEC_F_NETSEC_VER_MAJOR_NUM(x) ((x) & 0xffff0000)
+
+enum ring_id {
+ NETSEC_RING_TX = 0,
+ NETSEC_RING_RX
+};
+
+struct netsec_desc {
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ void *addr;
+ u16 len;
+};
+
+struct netsec_desc_ring {
+ dma_addr_t desc_dma;
+ struct netsec_desc *desc;
+ void *vaddr;
+ u16 pkt_cnt;
+ u16 head, tail;
+};
+
+struct netsec_priv {
+ struct netsec_desc_ring desc_ring[NETSEC_RING_MAX];
+ struct ethtool_coalesce et_coalesce;
+ spinlock_t reglock; /* protect reg access */
+ struct napi_struct napi;
+ phy_interface_t phy_interface;
+ struct net_device *ndev;
+ struct device_node *phy_np;
+ struct phy_device *phydev;
+ struct mii_bus *mii_bus;
+ void __iomem *ioaddr;
+ void __iomem *eeprom_base;
+ struct device *dev;
+ struct clk *clk;
+ u32 msg_enable;
+ u32 freq;
+ bool rx_cksum_offload_flag;
+};
+
+struct netsec_de { /* Netsec Descriptor layout */
+ u32 attr;
+ u32 data_buf_addr_up;
+ u32 data_buf_addr_lw;
+ u32 buf_len_info;
+};
+
+struct netsec_tx_pkt_ctrl {
+ u16 tcp_seg_len;
+ bool tcp_seg_offload_flag;
+ bool cksum_offload_flag;
+};
+
+struct netsec_rx_pkt_info {
+ int rx_cksum_result;
+ int err_code;
+ bool err_flag;
+};
+
+static void netsec_write(struct netsec_priv *priv, u32 reg_addr, u32 val)
+{
+ writel(val, priv->ioaddr + reg_addr);
+}
+
+static u32 netsec_read(struct netsec_priv *priv, u32 reg_addr)
+{
+ return readl(priv->ioaddr + reg_addr);
+}
+
+/************* MDIO BUS OPS FOLLOW *************/
+
+#define TIMEOUT_SPINS_MAC 1000
+#define TIMEOUT_SECONDARY_MS_MAC 100
+
+static u32 netsec_clk_type(u32 freq)
+{
+ if (freq < MHZ(35))
+ return NETSEC_GMAC_GAR_REG_CR_25_35_MHZ;
+ if (freq < MHZ(60))
+ return NETSEC_GMAC_GAR_REG_CR_35_60_MHZ;
+ if (freq < MHZ(100))
+ return NETSEC_GMAC_GAR_REG_CR_60_100_MHZ;
+ if (freq < MHZ(150))
+ return NETSEC_GMAC_GAR_REG_CR_100_150_MHZ;
+ if (freq < MHZ(250))
+ return NETSEC_GMAC_GAR_REG_CR_150_250_MHZ;
+
+ return NETSEC_GMAC_GAR_REG_CR_250_300_MHZ;
+}
+
+static int netsec_wait_while_busy(struct netsec_priv *priv, u32 addr, u32 mask)
+{
+ u32 timeout = TIMEOUT_SPINS_MAC;
+
+ while (--timeout && netsec_read(priv, addr) & mask)
+ cpu_relax();
+ if (timeout)
+ return 0;
+
+ timeout = TIMEOUT_SECONDARY_MS_MAC;
+ while (--timeout && netsec_read(priv, addr) & mask)
+ usleep_range(1000, 2000);
+
+ if (timeout)
+ return 0;
+
+ netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
+
+ return -ETIMEDOUT;
+}
+
+static int netsec_mac_write(struct netsec_priv *priv, u32 addr, u32 value)
+{
+ netsec_write(priv, MAC_REG_DATA, value);
+ netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_WRITE);
+ return netsec_wait_while_busy(priv,
+ MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
+}
+
+static int netsec_mac_read(struct netsec_priv *priv, u32 addr, u32 *read)
+{
+ int ret;
+
+ netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_READ);
+ ret = netsec_wait_while_busy(priv,
+ MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
+ if (ret)
+ return ret;
+
+ *read = netsec_read(priv, MAC_REG_DATA);
+
+ return 0;
+}
+
+static int netsec_mac_wait_while_busy(struct netsec_priv *priv,
+ u32 addr, u32 mask)
+{
+ u32 timeout = TIMEOUT_SPINS_MAC;
+ int ret, data;
+
+ do {
+ ret = netsec_mac_read(priv, addr, &data);
+ if (ret)
+ break;
+ cpu_relax();
+ } while (--timeout && (data & mask));
+
+ if (timeout)
+ return 0;
+
+ timeout = TIMEOUT_SECONDARY_MS_MAC;
+ do {
+ usleep_range(1000, 2000);
+
+ ret = netsec_mac_read(priv, addr, &data);
+ if (ret)
+ break;
+ cpu_relax();
+ } while (--timeout && (data & mask));
+
+ if (timeout && !ret)
+ return 0;
+
+ netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
+
+ return -ETIMEDOUT;
+}
+
+static int netsec_mac_update_to_phy_state(struct netsec_priv *priv)
+{
+ struct phy_device *phydev = priv->ndev->phydev;
+ u32 value = 0;
+
+ value = phydev->duplex ? NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON :
+ NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON;
+
+ if (phydev->speed != SPEED_1000)
+ value |= NETSEC_MCR_PS;
+
+ if (priv->phy_interface != PHY_INTERFACE_MODE_GMII &&
+ phydev->speed == SPEED_100)
+ value |= NETSEC_GMAC_MCR_REG_FES;
+
+ value |= NETSEC_GMAC_MCR_REG_CST | NETSEC_GMAC_MCR_REG_JE;
+
+ if (phy_interface_mode_is_rgmii(priv->phy_interface))
+ value |= NETSEC_GMAC_MCR_REG_IBN;
+
+ if (netsec_mac_write(priv, GMAC_REG_MCR, value))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int netsec_phy_write(struct mii_bus *bus,
+ int phy_addr, int reg, u16 val)
+{
+ struct netsec_priv *priv = bus->priv;
+
+ if (netsec_mac_write(priv, GMAC_REG_GDR, val))
+ return -ETIMEDOUT;
+ if (netsec_mac_write(priv, GMAC_REG_GAR,
+ phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
+ reg << NETSEC_GMAC_GAR_REG_SHIFT_GR |
+ NETSEC_GMAC_GAR_REG_GW | NETSEC_GMAC_GAR_REG_GB |
+ (netsec_clk_type(priv->freq) <<
+ GMAC_REG_SHIFT_CR_GAR)))
+ return -ETIMEDOUT;
+
+ return netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
+ NETSEC_GMAC_GAR_REG_GB);
+}
+
+static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr)
+{
+ struct netsec_priv *priv = bus->priv;
+ u32 data;
+ int ret;
+
+ if (netsec_mac_write(priv, GMAC_REG_GAR, NETSEC_GMAC_GAR_REG_GB |
+ phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
+ reg_addr << NETSEC_GMAC_GAR_REG_SHIFT_GR |
+ (netsec_clk_type(priv->freq) <<
+ GMAC_REG_SHIFT_CR_GAR)))
+ return -ETIMEDOUT;
+
+ ret = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
+ NETSEC_GMAC_GAR_REG_GB);
+ if (ret)
+ return ret;
+
+ ret = netsec_mac_read(priv, GMAC_REG_GDR, &data);
+ if (ret)
+ return ret;
+
+ return data;
+}
+
+/************* ETHTOOL_OPS FOLLOW *************/
+
+static void netsec_et_get_drvinfo(struct net_device *net_device,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, "netsec", sizeof(info->driver));
+ strlcpy(info->bus_info, dev_name(net_device->dev.parent),
+ sizeof(info->bus_info));
+}
+
+static int netsec_et_get_coalesce(struct net_device *net_device,
+ struct ethtool_coalesce *et_coalesce)
+{
+ struct netsec_priv *priv = netdev_priv(net_device);
+
+ *et_coalesce = priv->et_coalesce;
+
+ return 0;
+}
+
+static int netsec_et_set_coalesce(struct net_device *net_device,
+ struct ethtool_coalesce *et_coalesce)
+{
+ struct netsec_priv *priv = netdev_priv(net_device);
+
+ priv->et_coalesce = *et_coalesce;
+
+ if (priv->et_coalesce.tx_coalesce_usecs < 50)
+ priv->et_coalesce.tx_coalesce_usecs = 50;
+ if (priv->et_coalesce.tx_max_coalesced_frames < 1)
+ priv->et_coalesce.tx_max_coalesced_frames = 1;
+
+ netsec_write(priv, NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT,
+ priv->et_coalesce.tx_max_coalesced_frames);
+ netsec_write(priv, NETSEC_REG_NRM_TX_TXINT_TMR,
+ priv->et_coalesce.tx_coalesce_usecs);
+ netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TXDONE);
+ netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TMREXP);
+
+ if (priv->et_coalesce.rx_coalesce_usecs < 50)
+ priv->et_coalesce.rx_coalesce_usecs = 50;
+ if (priv->et_coalesce.rx_max_coalesced_frames < 1)
+ priv->et_coalesce.rx_max_coalesced_frames = 1;
+
+ netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_PKTCNT,
+ priv->et_coalesce.rx_max_coalesced_frames);
+ netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_TMR,
+ priv->et_coalesce.rx_coalesce_usecs);
+ netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_PKTCNT);
+ netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_TMREXP);
+
+ return 0;
+}
+
+static u32 netsec_et_get_msglevel(struct net_device *dev)
+{
+ struct netsec_priv *priv = netdev_priv(dev);
+
+ return priv->msg_enable;
+}
+
+static void netsec_et_set_msglevel(struct net_device *dev, u32 datum)
+{
+ struct netsec_priv *priv = netdev_priv(dev);
+
+ priv->msg_enable = datum;
+}
+
+static const struct ethtool_ops netsec_ethtool_ops = {
+ .get_drvinfo = netsec_et_get_drvinfo,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = netsec_et_get_coalesce,
+ .set_coalesce = netsec_et_set_coalesce,
+ .get_msglevel = netsec_et_get_msglevel,
+ .set_msglevel = netsec_et_set_msglevel,
+};
+
+/************* NETDEV_OPS FOLLOW *************/
+
+static struct sk_buff *netsec_alloc_skb(struct netsec_priv *priv,
+ struct netsec_desc *desc)
+{
+ struct sk_buff *skb;
+
+ if (device_get_dma_attr(priv->dev) == DEV_DMA_COHERENT) {
+ skb = netdev_alloc_skb_ip_align(priv->ndev, desc->len);
+ } else {
+ desc->len = L1_CACHE_ALIGN(desc->len);
+ skb = netdev_alloc_skb(priv->ndev, desc->len);
+ }
+ if (!skb)
+ return NULL;
+
+ desc->addr = skb->data;
+ desc->dma_addr = dma_map_single(priv->dev, desc->addr, desc->len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(priv->dev, desc->dma_addr)) {
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+ return skb;
+}
+
+static void netsec_set_rx_de(struct netsec_priv *priv,
+ struct netsec_desc_ring *dring, u16 idx,
+ const struct netsec_desc *desc,
+ struct sk_buff *skb)
+{
+ struct netsec_de *de = dring->vaddr + DESC_SZ * idx;
+ u32 attr = (1 << NETSEC_RX_PKT_OWN_FIELD) |
+ (1 << NETSEC_RX_PKT_FS_FIELD) |
+ (1 << NETSEC_RX_PKT_LS_FIELD);
+
+ if (idx == DESC_NUM - 1)
+ attr |= (1 << NETSEC_RX_PKT_LD_FIELD);
+
+ de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
+ de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
+ de->buf_len_info = desc->len;
+ de->attr = attr;
+ dma_wmb();
+
+ dring->desc[idx].dma_addr = desc->dma_addr;
+ dring->desc[idx].addr = desc->addr;
+ dring->desc[idx].len = desc->len;
+ dring->desc[idx].skb = skb;
+}
+
+static struct sk_buff *netsec_get_rx_de(struct netsec_priv *priv,
+ struct netsec_desc_ring *dring,
+ u16 idx,
+ struct netsec_rx_pkt_info *rxpi,
+ struct netsec_desc *desc, u16 *len)
+{
+ struct netsec_de de = {};
+
+ memcpy(&de, dring->vaddr + DESC_SZ * idx, DESC_SZ);
+
+ *len = de.buf_len_info >> 16;
+
+ rxpi->err_flag = (de.attr >> NETSEC_RX_PKT_ER_FIELD) & 1;
+ rxpi->rx_cksum_result = (de.attr >> NETSEC_RX_PKT_CO_FIELD) & 3;
+ rxpi->err_code = (de.attr >> NETSEC_RX_PKT_ERR_FIELD) &
+ NETSEC_RX_PKT_ERR_MASK;
+ *desc = dring->desc[idx];
+ return desc->skb;
+}
+
+static struct sk_buff *netsec_get_rx_pkt_data(struct netsec_priv *priv,
+ struct netsec_rx_pkt_info *rxpi,
+ struct netsec_desc *desc,
+ u16 *len)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
+ struct sk_buff *tmp_skb, *skb = NULL;
+ struct netsec_desc td;
+ int tail;
+
+ *rxpi = (struct netsec_rx_pkt_info){};
+
+ td.len = priv->ndev->mtu + 22;
+
+ tmp_skb = netsec_alloc_skb(priv, &td);
+
+ dma_rmb();
+
+ tail = dring->tail;
+
+ if (!tmp_skb) {
+ netsec_set_rx_de(priv, dring, tail, &dring->desc[tail],
+ dring->desc[tail].skb);
+ } else {
+ skb = netsec_get_rx_de(priv, dring, tail, rxpi, desc, len);
+ netsec_set_rx_de(priv, dring, tail, &td, tmp_skb);
+ }
+
+ /* move tail ahead */
+ dring->tail = (dring->tail + 1) % DESC_NUM;
+
+ dring->pkt_cnt--;
+
+ return skb;
+}
+
+static int netsec_clean_tx_dring(struct netsec_priv *priv, int budget)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
+ unsigned int pkts, bytes;
+
+ dring->pkt_cnt += netsec_read(priv, NETSEC_REG_NRM_TX_DONE_PKTCNT);
+
+ if (dring->pkt_cnt < budget)
+ budget = dring->pkt_cnt;
+
+ pkts = 0;
+ bytes = 0;
+
+ while (pkts < budget) {
+ struct netsec_desc *desc;
+ struct netsec_de *entry;
+ int tail, eop;
+
+ tail = dring->tail;
+
+ /* move tail ahead */
+ dring->tail = (tail + 1) % DESC_NUM;
+
+ desc = &dring->desc[tail];
+ entry = dring->vaddr + DESC_SZ * tail;
+
+ eop = (entry->attr >> NETSEC_TX_LAST) & 1;
+
+ dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
+ DMA_TO_DEVICE);
+ if (eop) {
+ pkts++;
+ bytes += desc->skb->len;
+ dev_kfree_skb(desc->skb);
+ }
+ *desc = (struct netsec_desc){};
+ }
+ dring->pkt_cnt -= budget;
+
+ priv->ndev->stats.tx_packets += budget;
+ priv->ndev->stats.tx_bytes += bytes;
+
+ netdev_completed_queue(priv->ndev, budget, bytes);
+
+ return budget;
+}
+
+static int netsec_process_tx(struct netsec_priv *priv, int budget)
+{
+ struct net_device *ndev = priv->ndev;
+ int new, done = 0;
+
+ do {
+ new = netsec_clean_tx_dring(priv, budget);
+ done += new;
+ budget -= new;
+ } while (new);
+
+ if (done && netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+
+ return done;
+}
+
+static int netsec_process_rx(struct netsec_priv *priv, int budget)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
+ struct net_device *ndev = priv->ndev;
+ struct netsec_rx_pkt_info rx_info;
+ int done = 0, rx_num = 0;
+ struct netsec_desc desc;
+ struct sk_buff *skb;
+ u16 len;
+
+ while (done < budget) {
+ if (!rx_num) {
+ rx_num = netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT);
+ dring->pkt_cnt += rx_num;
+
+ /* move head 'rx_num' */
+ dring->head = (dring->head + rx_num) % DESC_NUM;
+
+ rx_num = dring->pkt_cnt;
+ if (!rx_num)
+ break;
+ }
+ done++;
+ rx_num--;
+ skb = netsec_get_rx_pkt_data(priv, &rx_info, &desc, &len);
+ if (unlikely(!skb) || rx_info.err_flag) {
+ netif_err(priv, drv, priv->ndev,
+ "%s: rx fail err(%d)\n",
+ __func__, rx_info.err_code);
+ ndev->stats.rx_dropped++;
+ continue;
+ }
+
+ dma_unmap_single(priv->dev, desc.dma_addr, desc.len,
+ DMA_FROM_DEVICE);
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, priv->ndev);
+
+ if (priv->rx_cksum_offload_flag &&
+ rx_info.rx_cksum_result == NETSEC_RX_CKSUM_OK)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (napi_gro_receive(&priv->napi, skb) != GRO_DROP) {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += len;
+ }
+ }
+
+ return done;
+}
+
+static int netsec_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct netsec_priv *priv;
+ struct net_device *ndev;
+ int tx, rx, done, todo;
+
+ priv = container_of(napi, struct netsec_priv, napi);
+ ndev = priv->ndev;
+
+ todo = budget;
+ do {
+ if (!todo)
+ break;
+
+ tx = netsec_process_tx(priv, todo);
+ todo -= tx;
+
+ if (!todo)
+ break;
+
+ rx = netsec_process_rx(priv, todo);
+ todo -= rx;
+ } while (rx || tx);
+
+ done = budget - todo;
+
+ if (done < budget && napi_complete_done(napi, done)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->reglock, flags);
+ netsec_write(priv, NETSEC_REG_INTEN_SET,
+ NETSEC_IRQ_RX | NETSEC_IRQ_TX);
+ spin_unlock_irqrestore(&priv->reglock, flags);
+ }
+
+ return done;
+}
+
+static void netsec_set_tx_de(struct netsec_priv *priv,
+ struct netsec_desc_ring *dring,
+ const struct netsec_tx_pkt_ctrl *tx_ctrl,
+ const struct netsec_desc *desc,
+ struct sk_buff *skb)
+{
+ int idx = dring->head;
+ struct netsec_de *de;
+ u32 attr;
+
+ de = dring->vaddr + (DESC_SZ * idx);
+
+ attr = (1 << NETSEC_TX_SHIFT_OWN_FIELD) |
+ (1 << NETSEC_TX_SHIFT_PT_FIELD) |
+ (NETSEC_RING_GMAC << NETSEC_TX_SHIFT_TDRID_FIELD) |
+ (1 << NETSEC_TX_SHIFT_FS_FIELD) |
+ (1 << NETSEC_TX_LAST) |
+ (tx_ctrl->cksum_offload_flag << NETSEC_TX_SHIFT_CO) |
+ (tx_ctrl->tcp_seg_offload_flag << NETSEC_TX_SHIFT_SO) |
+ (1 << NETSEC_TX_SHIFT_TRS_FIELD);
+ if (idx == DESC_NUM - 1)
+ attr |= (1 << NETSEC_TX_SHIFT_LD_FIELD);
+
+ de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
+ de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
+ de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len;
+ de->attr = attr;
+ dma_wmb();
+
+ dring->desc[idx] = *desc;
+ dring->desc[idx].skb = skb;
+
+ /* move head ahead */
+ dring->head = (dring->head + 1) % DESC_NUM;
+}
+
+static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
+ struct netsec_tx_pkt_ctrl tx_ctrl = {};
+ struct netsec_desc tx_desc;
+ u16 tso_seg_len = 0;
+ int filled;
+
+ /* differentiate between full/emtpy ring */
+ if (dring->head >= dring->tail)
+ filled = dring->head - dring->tail;
+ else
+ filled = dring->head + DESC_NUM - dring->tail;
+
+ if (DESC_NUM - filled < 2) { /* if less than 2 available */
+ netif_err(priv, drv, priv->ndev, "%s: TxQFull!\n", __func__);
+ netif_stop_queue(priv->ndev);
+ dma_wmb();
+ return NETDEV_TX_BUSY;
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ tx_ctrl.cksum_offload_flag = true;
+
+ if (skb_is_gso(skb))
+ tso_seg_len = skb_shinfo(skb)->gso_size;
+
+ if (tso_seg_len > 0) {
+ if (skb->protocol == htons(ETH_P_IP)) {
+ ip_hdr(skb)->tot_len = 0;
+ tcp_hdr(skb)->check =
+ ~tcp_v4_check(0, ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr, 0);
+ } else {
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ }
+
+ tx_ctrl.tcp_seg_offload_flag = true;
+ tx_ctrl.tcp_seg_len = tso_seg_len;
+ }
+
+ tx_desc.dma_addr = dma_map_single(priv->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->dev, tx_desc.dma_addr)) {
+ netif_err(priv, drv, priv->ndev,
+ "%s: DMA mapping failed\n", __func__);
+ ndev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ tx_desc.addr = skb->data;
+ tx_desc.len = skb_headlen(skb);
+
+ skb_tx_timestamp(skb);
+ netdev_sent_queue(priv->ndev, skb->len);
+
+ netsec_set_tx_de(priv, dring, &tx_ctrl, &tx_desc, skb);
+ netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, 1); /* submit another tx */
+
+ return NETDEV_TX_OK;
+}
+
+static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[id];
+ struct netsec_desc *desc;
+ u16 idx;
+
+ if (!dring->vaddr || !dring->desc)
+ return;
+
+ for (idx = 0; idx < DESC_NUM; idx++) {
+ desc = &dring->desc[idx];
+ if (!desc->addr)
+ continue;
+
+ dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
+ id == NETSEC_RING_RX ? DMA_FROM_DEVICE :
+ DMA_TO_DEVICE);
+ dev_kfree_skb(desc->skb);
+ }
+
+ memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM);
+ memset(dring->vaddr, 0, DESC_SZ * DESC_NUM);
+
+ dring->head = 0;
+ dring->tail = 0;
+ dring->pkt_cnt = 0;
+}
+
+static void netsec_free_dring(struct netsec_priv *priv, int id)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[id];
+
+ if (dring->vaddr) {
+ dma_free_coherent(priv->dev, DESC_SZ * DESC_NUM,
+ dring->vaddr, dring->desc_dma);
+ dring->vaddr = NULL;
+ }
+
+ kfree(dring->desc);
+ dring->desc = NULL;
+}
+
+static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[id];
+ int ret = 0;
+
+ dring->vaddr = dma_zalloc_coherent(priv->dev, DESC_SZ * DESC_NUM,
+ &dring->desc_dma, GFP_KERNEL);
+ if (!dring->vaddr) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ dring->desc = kzalloc(DESC_NUM * sizeof(*dring->desc), GFP_KERNEL);
+ if (!dring->desc) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ return 0;
+err:
+ netsec_free_dring(priv, id);
+
+ return ret;
+}
+
+static int netsec_setup_rx_dring(struct netsec_priv *priv)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
+ struct netsec_desc desc;
+ struct sk_buff *skb;
+ int n;
+
+ desc.len = priv->ndev->mtu + 22;
+
+ for (n = 0; n < DESC_NUM; n++) {
+ skb = netsec_alloc_skb(priv, &desc);
+ if (!skb) {
+ netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
+ return -ENOMEM;
+ }
+ netsec_set_rx_de(priv, dring, n, &desc, skb);
+ }
+
+ return 0;
+}
+
+static int netsec_netdev_load_ucode_region(struct netsec_priv *priv, u32 reg,
+ u32 addr_h, u32 addr_l, u32 size)
+{
+ u64 base = (u64)addr_h << 32 | addr_l;
+ void __iomem *ucode;
+ u32 i;
+
+ ucode = ioremap(base, size * sizeof(u32));
+ if (!ucode)
+ return -ENOMEM;
+
+ for (i = 0; i < size; i++)
+ netsec_write(priv, reg, readl(ucode + i * 4));
+
+ iounmap(ucode);
+ return 0;
+}
+
+static int netsec_netdev_load_microcode(struct netsec_priv *priv)
+{
+ u32 addr_h, addr_l, size;
+ int err;
+
+ addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_H);
+ addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_L);
+ size = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_SIZE);
+ err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_HM_CMD_BUF,
+ addr_h, addr_l, size);
+ if (err)
+ return err;
+
+ addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_H);
+ addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_L);
+ size = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_SIZE);
+ err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_MH_CMD_BUF,
+ addr_h, addr_l, size);
+ if (err)
+ return err;
+
+ addr_h = 0;
+ addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_ADDRESS);
+ size = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_SIZE);
+ err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_PKT_CMD_BUF,
+ addr_h, addr_l, size);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int netsec_reset_hardware(struct netsec_priv *priv)
+{
+ u32 value;
+ int err;
+
+ /* stop DMA engines */
+ if (!netsec_read(priv, NETSEC_REG_ADDR_DIS_CORE)) {
+ netsec_write(priv, NETSEC_REG_DMA_HM_CTRL,
+ NETSEC_DMA_CTRL_REG_STOP);
+ netsec_write(priv, NETSEC_REG_DMA_MH_CTRL,
+ NETSEC_DMA_CTRL_REG_STOP);
+
+ while (netsec_read(priv, NETSEC_REG_DMA_HM_CTRL) &
+ NETSEC_DMA_CTRL_REG_STOP)
+ cpu_relax();
+
+ while (netsec_read(priv, NETSEC_REG_DMA_MH_CTRL) &
+ NETSEC_DMA_CTRL_REG_STOP)
+ cpu_relax();
+ }
+
+ netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RESET);
+ netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RUN);
+ netsec_write(priv, NETSEC_REG_COM_INIT, NETSEC_COM_INIT_REG_ALL);
+
+ while (netsec_read(priv, NETSEC_REG_COM_INIT) != 0)
+ cpu_relax();
+
+ /* set desc_start addr */
+ netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_UP,
+ upper_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma));
+ netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_LW,
+ lower_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma));
+
+ netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_UP,
+ upper_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma));
+ netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_LW,
+ lower_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma));
+
+ /* set normal tx dring ring config */
+ netsec_write(priv, NETSEC_REG_NRM_TX_CONFIG,
+ 1 << NETSEC_REG_DESC_ENDIAN);
+ netsec_write(priv, NETSEC_REG_NRM_RX_CONFIG,
+ 1 << NETSEC_REG_DESC_ENDIAN);
+
+ err = netsec_netdev_load_microcode(priv);
+ if (err) {
+ netif_err(priv, probe, priv->ndev,
+ "%s: failed to load microcode (%d)\n", __func__, err);
+ return err;
+ }
+
+ /* start DMA engines */
+ netsec_write(priv, NETSEC_REG_DMA_TMR_CTRL, priv->freq / 1000000 - 1);
+ netsec_write(priv, NETSEC_REG_ADDR_DIS_CORE, 0);
+
+ usleep_range(1000, 2000);
+
+ if (!(netsec_read(priv, NETSEC_REG_TOP_STATUS) &
+ NETSEC_TOP_IRQ_REG_CODE_LOAD_END)) {
+ netif_err(priv, probe, priv->ndev,
+ "microengine start failed\n");
+ return -ENXIO;
+ }
+ netsec_write(priv, NETSEC_REG_TOP_STATUS,
+ NETSEC_TOP_IRQ_REG_CODE_LOAD_END);
+
+ value = NETSEC_PKT_CTRL_REG_MODE_NRM;
+ if (priv->ndev->mtu > ETH_DATA_LEN)
+ value |= NETSEC_PKT_CTRL_REG_EN_JUMBO;
+
+ /* change to normal mode */
+ netsec_write(priv, NETSEC_REG_DMA_MH_CTRL, MH_CTRL__MODE_TRANS);
+ netsec_write(priv, NETSEC_REG_PKT_CTRL, value);
+
+ while ((netsec_read(priv, NETSEC_REG_MODE_TRANS_COMP_STATUS) &
+ NETSEC_MODE_TRANS_COMP_IRQ_T2N) == 0)
+ cpu_relax();
+
+ /* clear any pending EMPTY/ERR irq status */
+ netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, ~0);
+
+ /* Disable TX & RX intr */
+ netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
+
+ return 0;
+}
+
+static int netsec_start_gmac(struct netsec_priv *priv)
+{
+ struct phy_device *phydev = priv->ndev->phydev;
+ u32 value = 0;
+ int ret;
+
+ if (phydev->speed != SPEED_1000)
+ value = (NETSEC_GMAC_MCR_REG_CST |
+ NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON);
+
+ if (netsec_mac_write(priv, GMAC_REG_MCR, value))
+ return -ETIMEDOUT;
+ if (netsec_mac_write(priv, GMAC_REG_BMR,
+ NETSEC_GMAC_BMR_REG_RESET))
+ return -ETIMEDOUT;
+
+ /* Wait soft reset */
+ usleep_range(1000, 5000);
+
+ ret = netsec_mac_read(priv, GMAC_REG_BMR, &value);
+ if (ret)
+ return ret;
+ if (value & NETSEC_GMAC_BMR_REG_SWR)
+ return -EAGAIN;
+
+ netsec_write(priv, MAC_REG_DESC_SOFT_RST, 1);
+ if (netsec_wait_while_busy(priv, MAC_REG_DESC_SOFT_RST, 1))
+ return -ETIMEDOUT;
+
+ netsec_write(priv, MAC_REG_DESC_INIT, 1);
+ if (netsec_wait_while_busy(priv, MAC_REG_DESC_INIT, 1))
+ return -ETIMEDOUT;
+
+ if (netsec_mac_write(priv, GMAC_REG_BMR,
+ NETSEC_GMAC_BMR_REG_COMMON))
+ return -ETIMEDOUT;
+ if (netsec_mac_write(priv, GMAC_REG_RDLAR,
+ NETSEC_GMAC_RDLAR_REG_COMMON))
+ return -ETIMEDOUT;
+ if (netsec_mac_write(priv, GMAC_REG_TDLAR,
+ NETSEC_GMAC_TDLAR_REG_COMMON))
+ return -ETIMEDOUT;
+ if (netsec_mac_write(priv, GMAC_REG_MFFR, 0x80000001))
+ return -ETIMEDOUT;
+
+ ret = netsec_mac_update_to_phy_state(priv);
+ if (ret)
+ return ret;
+
+ ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
+ if (ret)
+ return ret;
+
+ value |= NETSEC_GMAC_OMR_REG_SR;
+ value |= NETSEC_GMAC_OMR_REG_ST;
+
+ netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
+ netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
+
+ netsec_et_set_coalesce(priv->ndev, &priv->et_coalesce);
+
+ if (netsec_mac_write(priv, GMAC_REG_OMR, value))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int netsec_stop_gmac(struct netsec_priv *priv)
+{
+ u32 value;
+ int ret;
+
+ ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
+ if (ret)
+ return ret;
+ value &= ~NETSEC_GMAC_OMR_REG_SR;
+ value &= ~NETSEC_GMAC_OMR_REG_ST;
+
+ /* disable all interrupts */
+ netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
+ netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
+
+ return netsec_mac_write(priv, GMAC_REG_OMR, value);
+}
+
+static void netsec_phy_adjust_link(struct net_device *ndev)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+
+ if (ndev->phydev->link)
+ netsec_start_gmac(priv);
+ else
+ netsec_stop_gmac(priv);
+
+ phy_print_status(ndev->phydev);
+}
+
+static irqreturn_t netsec_irq_handler(int irq, void *dev_id)
+{
+ struct netsec_priv *priv = dev_id;
+ u32 val, status = netsec_read(priv, NETSEC_REG_TOP_STATUS);
+ unsigned long flags;
+
+ /* Disable interrupts */
+ if (status & NETSEC_IRQ_TX) {
+ val = netsec_read(priv, NETSEC_REG_NRM_TX_STATUS);
+ netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, val);
+ }
+ if (status & NETSEC_IRQ_RX) {
+ val = netsec_read(priv, NETSEC_REG_NRM_RX_STATUS);
+ netsec_write(priv, NETSEC_REG_NRM_RX_STATUS, val);
+ }
+
+ spin_lock_irqsave(&priv->reglock, flags);
+ netsec_write(priv, NETSEC_REG_INTEN_CLR, NETSEC_IRQ_RX | NETSEC_IRQ_TX);
+ spin_unlock_irqrestore(&priv->reglock, flags);
+
+ napi_schedule(&priv->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int netsec_netdev_open(struct net_device *ndev)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ pm_runtime_get_sync(priv->dev);
+
+ ret = netsec_setup_rx_dring(priv);
+ if (ret) {
+ netif_err(priv, probe, priv->ndev,
+ "%s: fail setup ring\n", __func__);
+ goto err1;
+ }
+
+ ret = request_irq(priv->ndev->irq, netsec_irq_handler,
+ IRQF_SHARED, "netsec", priv);
+ if (ret) {
+ netif_err(priv, drv, priv->ndev, "request_irq failed\n");
+ goto err2;
+ }
+
+ if (dev_of_node(priv->dev)) {
+ if (!of_phy_connect(priv->ndev, priv->phy_np,
+ netsec_phy_adjust_link, 0,
+ priv->phy_interface)) {
+ netif_err(priv, link, priv->ndev, "missing PHY\n");
+ ret = -ENODEV;
+ goto err3;
+ }
+ } else {
+ ret = phy_connect_direct(priv->ndev, priv->phydev,
+ netsec_phy_adjust_link,
+ priv->phy_interface);
+ if (ret) {
+ netif_err(priv, link, priv->ndev,
+ "phy_connect_direct() failed (%d)\n", ret);
+ goto err3;
+ }
+ }
+
+ phy_start(ndev->phydev);
+
+ netsec_start_gmac(priv);
+ napi_enable(&priv->napi);
+ netif_start_queue(ndev);
+
+ /* Enable RX intr. */
+ netsec_write(priv, NETSEC_REG_INTEN_SET, NETSEC_IRQ_RX);
+
+ return 0;
+err3:
+ free_irq(priv->ndev->irq, priv);
+err2:
+ netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
+err1:
+ pm_runtime_put_sync(priv->dev);
+ return ret;
+}
+
+static int netsec_netdev_stop(struct net_device *ndev)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(priv->ndev);
+ dma_wmb();
+
+ napi_disable(&priv->napi);
+
+ netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
+ netsec_stop_gmac(priv);
+
+ free_irq(priv->ndev->irq, priv);
+
+ netsec_uninit_pkt_dring(priv, NETSEC_RING_TX);
+ netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
+
+ phy_stop(ndev->phydev);
+ phy_disconnect(ndev->phydev);
+
+ pm_runtime_put_sync(priv->dev);
+
+ return 0;
+}
+
+static int netsec_netdev_init(struct net_device *ndev)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = netsec_alloc_dring(priv, NETSEC_RING_TX);
+ if (ret)
+ return ret;
+
+ ret = netsec_alloc_dring(priv, NETSEC_RING_RX);
+ if (ret)
+ goto err1;
+
+ ret = netsec_reset_hardware(priv);
+ if (ret)
+ goto err2;
+
+ return 0;
+err2:
+ netsec_free_dring(priv, NETSEC_RING_RX);
+err1:
+ netsec_free_dring(priv, NETSEC_RING_TX);
+ return ret;
+}
+
+static void netsec_netdev_uninit(struct net_device *ndev)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+
+ netsec_free_dring(priv, NETSEC_RING_RX);
+ netsec_free_dring(priv, NETSEC_RING_TX);
+}
+
+static int netsec_netdev_set_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+
+ priv->rx_cksum_offload_flag = !!(features & NETIF_F_RXCSUM);
+
+ return 0;
+}
+
+static int netsec_netdev_ioctl(struct net_device *ndev, struct ifreq *ifr,
+ int cmd)
+{
+ return phy_mii_ioctl(ndev->phydev, ifr, cmd);
+}
+
+static const struct net_device_ops netsec_netdev_ops = {
+ .ndo_init = netsec_netdev_init,
+ .ndo_uninit = netsec_netdev_uninit,
+ .ndo_open = netsec_netdev_open,
+ .ndo_stop = netsec_netdev_stop,
+ .ndo_start_xmit = netsec_netdev_start_xmit,
+ .ndo_set_features = netsec_netdev_set_features,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = netsec_netdev_ioctl,
+};
+
+static int netsec_of_probe(struct platform_device *pdev,
+ struct netsec_priv *priv)
+{
+ priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+ if (!priv->phy_np) {
+ dev_err(&pdev->dev, "missing required property 'phy-handle'\n");
+ return -EINVAL;
+ }
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL); /* get by 'phy_ref_clk' */
+ if (IS_ERR(priv->clk)) {
+ dev_err(&pdev->dev, "phy_ref_clk not found\n");
+ return PTR_ERR(priv->clk);
+ }
+ priv->freq = clk_get_rate(priv->clk);
+
+ return 0;
+}
+
+static int netsec_acpi_probe(struct platform_device *pdev,
+ struct netsec_priv *priv, u32 *phy_addr)
+{
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_ACPI))
+ return -ENODEV;
+
+ ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "missing required property 'phy-channel'\n");
+ return ret;
+ }
+
+ ret = device_property_read_u32(&pdev->dev,
+ "socionext,phy-clock-frequency",
+ &priv->freq);
+ if (ret)
+ dev_err(&pdev->dev,
+ "missing required property 'socionext,phy-clock-frequency'\n");
+ return ret;
+}
+
+static void netsec_unregister_mdio(struct netsec_priv *priv)
+{
+ struct phy_device *phydev = priv->phydev;
+
+ if (!dev_of_node(priv->dev) && phydev) {
+ phy_device_remove(phydev);
+ phy_device_free(phydev);
+ }
+
+ mdiobus_unregister(priv->mii_bus);
+}
+
+static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr)
+{
+ struct mii_bus *bus;
+ int ret;
+
+ bus = devm_mdiobus_alloc(priv->dev);
+ if (!bus)
+ return -ENOMEM;
+
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(priv->dev));
+ bus->priv = priv;
+ bus->name = "SNI NETSEC MDIO";
+ bus->read = netsec_phy_read;
+ bus->write = netsec_phy_write;
+ bus->parent = priv->dev;
+ priv->mii_bus = bus;
+
+ if (dev_of_node(priv->dev)) {
+ struct device_node *mdio_node, *parent = dev_of_node(priv->dev);
+
+ mdio_node = of_get_child_by_name(parent, "mdio");
+ if (mdio_node) {
+ parent = mdio_node;
+ } else {
+ /* older f/w doesn't populate the mdio subnode,
+ * allow relaxed upgrade of f/w in due time.
+ */
+ dev_info(priv->dev, "Upgrade f/w for mdio subnode!\n");
+ }
+
+ ret = of_mdiobus_register(bus, parent);
+ of_node_put(mdio_node);
+
+ if (ret) {
+ dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
+ return ret;
+ }
+ } else {
+ /* Mask out all PHYs from auto probing. */
+ bus->phy_mask = ~0;
+ ret = mdiobus_register(bus);
+ if (ret) {
+ dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
+ return ret;
+ }
+
+ priv->phydev = get_phy_device(bus, phy_addr, false);
+ if (IS_ERR(priv->phydev)) {
+ ret = PTR_ERR(priv->phydev);
+ dev_err(priv->dev, "get_phy_device err(%d)\n", ret);
+ priv->phydev = NULL;
+ return -ENODEV;
+ }
+
+ ret = phy_device_register(priv->phydev);
+ if (ret) {
+ mdiobus_unregister(bus);
+ dev_err(priv->dev,
+ "phy_device_register err(%d)\n", ret);
+ }
+ }
+
+ return ret;
+}
+
+static int netsec_probe(struct platform_device *pdev)
+{
+ struct resource *mmio_res, *eeprom_res, *irq_res;
+ u8 *mac, macbuf[ETH_ALEN];
+ struct netsec_priv *priv;
+ u32 hw_ver, phy_addr = 0;
+ struct net_device *ndev;
+ int ret;
+
+ mmio_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mmio_res) {
+ dev_err(&pdev->dev, "No MMIO resource found.\n");
+ return -ENODEV;
+ }
+
+ eeprom_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!eeprom_res) {
+ dev_info(&pdev->dev, "No EEPROM resource found.\n");
+ return -ENODEV;
+ }
+
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq_res) {
+ dev_err(&pdev->dev, "No IRQ resource found.\n");
+ return -ENODEV;
+ }
+
+ ndev = alloc_etherdev(sizeof(*priv));
+ if (!ndev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
+
+ spin_lock_init(&priv->reglock);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ platform_set_drvdata(pdev, priv);
+ ndev->irq = irq_res->start;
+ priv->dev = &pdev->dev;
+ priv->ndev = ndev;
+
+ priv->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV |
+ NETIF_MSG_LINK | NETIF_MSG_PROBE;
+
+ priv->phy_interface = device_get_phy_mode(&pdev->dev);
+ if (priv->phy_interface < 0) {
+ dev_err(&pdev->dev, "missing required property 'phy-mode'\n");
+ ret = -ENODEV;
+ goto free_ndev;
+ }
+
+ priv->ioaddr = devm_ioremap(&pdev->dev, mmio_res->start,
+ resource_size(mmio_res));
+ if (!priv->ioaddr) {
+ dev_err(&pdev->dev, "devm_ioremap() failed\n");
+ ret = -ENXIO;
+ goto free_ndev;
+ }
+
+ priv->eeprom_base = devm_ioremap(&pdev->dev, eeprom_res->start,
+ resource_size(eeprom_res));
+ if (!priv->eeprom_base) {
+ dev_err(&pdev->dev, "devm_ioremap() failed for EEPROM\n");
+ ret = -ENXIO;
+ goto free_ndev;
+ }
+
+ mac = device_get_mac_address(&pdev->dev, macbuf, sizeof(macbuf));
+ if (mac)
+ ether_addr_copy(ndev->dev_addr, mac);
+
+ if (priv->eeprom_base &&
+ (!mac || !is_valid_ether_addr(ndev->dev_addr))) {
+ void __iomem *macp = priv->eeprom_base +
+ NETSEC_EEPROM_MAC_ADDRESS;
+
+ ndev->dev_addr[0] = readb(macp + 3);
+ ndev->dev_addr[1] = readb(macp + 2);
+ ndev->dev_addr[2] = readb(macp + 1);
+ ndev->dev_addr[3] = readb(macp + 0);
+ ndev->dev_addr[4] = readb(macp + 7);
+ ndev->dev_addr[5] = readb(macp + 6);
+ }
+
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ dev_warn(&pdev->dev, "No MAC address found, using random\n");
+ eth_hw_addr_random(ndev);
+ }
+
+ if (dev_of_node(&pdev->dev))
+ ret = netsec_of_probe(pdev, priv);
+ else
+ ret = netsec_acpi_probe(pdev, priv, &phy_addr);
+ if (ret)
+ goto free_ndev;
+
+ if (!priv->freq) {
+ dev_err(&pdev->dev, "missing PHY reference clock frequency\n");
+ ret = -ENODEV;
+ goto free_ndev;
+ }
+
+ /* default for throughput */
+ priv->et_coalesce.rx_coalesce_usecs = 500;
+ priv->et_coalesce.rx_max_coalesced_frames = 8;
+ priv->et_coalesce.tx_coalesce_usecs = 500;
+ priv->et_coalesce.tx_max_coalesced_frames = 8;
+
+ ret = device_property_read_u32(&pdev->dev, "max-frame-size",
+ &ndev->max_mtu);
+ if (ret < 0)
+ ndev->max_mtu = ETH_DATA_LEN;
+
+ /* runtime_pm coverage just for probe, open/close also cover it */
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ hw_ver = netsec_read(priv, NETSEC_REG_F_TAIKI_VER);
+ /* this driver only supports F_TAIKI style NETSEC */
+ if (NETSEC_F_NETSEC_VER_MAJOR_NUM(hw_ver) !=
+ NETSEC_F_NETSEC_VER_MAJOR_NUM(NETSEC_REG_NETSEC_VER_F_TAIKI)) {
+ ret = -ENODEV;
+ goto pm_disable;
+ }
+
+ dev_info(&pdev->dev, "hardware revision %d.%d\n",
+ hw_ver >> 16, hw_ver & 0xffff);
+
+ netif_napi_add(ndev, &priv->napi, netsec_napi_poll, NAPI_BUDGET);
+
+ ndev->netdev_ops = &netsec_netdev_ops;
+ ndev->ethtool_ops = &netsec_ethtool_ops;
+
+ ndev->features |= NETIF_F_HIGHDMA | NETIF_F_RXCSUM | NETIF_F_GSO |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ ndev->hw_features = ndev->features;
+
+ priv->rx_cksum_offload_flag = true;
+
+ ret = netsec_register_mdio(priv, phy_addr);
+ if (ret)
+ goto unreg_napi;
+
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)))
+ dev_warn(&pdev->dev, "Failed to enable 64-bit DMA\n");
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ netif_err(priv, probe, ndev, "register_netdev() failed\n");
+ goto unreg_mii;
+ }
+
+ pm_runtime_put_sync(&pdev->dev);
+ return 0;
+
+unreg_mii:
+ netsec_unregister_mdio(priv);
+unreg_napi:
+ netif_napi_del(&priv->napi);
+pm_disable:
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+free_ndev:
+ free_netdev(ndev);
+ dev_err(&pdev->dev, "init failed\n");
+
+ return ret;
+}
+
+static int netsec_remove(struct platform_device *pdev)
+{
+ struct netsec_priv *priv = platform_get_drvdata(pdev);
+
+ unregister_netdev(priv->ndev);
+
+ netsec_unregister_mdio(priv);
+
+ netif_napi_del(&priv->napi);
+
+ pm_runtime_disable(&pdev->dev);
+ free_netdev(priv->ndev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int netsec_runtime_suspend(struct device *dev)
+{
+ struct netsec_priv *priv = dev_get_drvdata(dev);
+
+ netsec_write(priv, NETSEC_REG_CLK_EN, 0);
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static int netsec_runtime_resume(struct device *dev)
+{
+ struct netsec_priv *priv = dev_get_drvdata(dev);
+
+ clk_prepare_enable(priv->clk);
+
+ netsec_write(priv, NETSEC_REG_CLK_EN, NETSEC_CLK_EN_REG_DOM_D |
+ NETSEC_CLK_EN_REG_DOM_C |
+ NETSEC_CLK_EN_REG_DOM_G);
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops netsec_pm_ops = {
+ SET_RUNTIME_PM_OPS(netsec_runtime_suspend, netsec_runtime_resume, NULL)
+};
+
+static const struct of_device_id netsec_dt_ids[] = {
+ { .compatible = "socionext,synquacer-netsec" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, netsec_dt_ids);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id netsec_acpi_ids[] = {
+ { "SCX0001" },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, netsec_acpi_ids);
+#endif
+
+static struct platform_driver netsec_driver = {
+ .probe = netsec_probe,
+ .remove = netsec_remove,
+ .driver = {
+ .name = "netsec",
+ .pm = &netsec_pm_ops,
+ .of_match_table = netsec_dt_ids,
+ .acpi_match_table = ACPI_PTR(netsec_acpi_ids),
+ },
+};
+module_platform_driver(netsec_driver);
+
+MODULE_AUTHOR("Jassi Brar <[email protected]>");
+MODULE_AUTHOR("Ard Biesheuvel <[email protected]>");
+MODULE_DESCRIPTION("NETSEC Ethernet driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 4404650b32c5..5270d26f0bc6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -40,9 +40,7 @@
#define PRG_ETH0_CLK_M250_DIV_SHIFT 7
#define PRG_ETH0_CLK_M250_DIV_WIDTH 3
-/* divides the result of m25_sel by either 5 (bit unset) or 10 (bit set) */
-#define PRG_ETH0_CLK_M25_DIV_SHIFT 10
-#define PRG_ETH0_CLK_M25_DIV_WIDTH 1
+#define PRG_ETH0_RGMII_TX_CLK_EN 10
#define PRG_ETH0_INVERTED_RMII_CLK BIT(11)
#define PRG_ETH0_TX_AND_PHY_REF_CLK BIT(12)
@@ -63,8 +61,11 @@ struct meson8b_dwmac {
struct clk_divider m250_div;
struct clk *m250_div_clk;
- struct clk_divider m25_div;
- struct clk *m25_div_clk;
+ struct clk_fixed_factor fixed_div2;
+ struct clk *fixed_div2_clk;
+
+ struct clk_gate rgmii_tx_en;
+ struct clk *rgmii_tx_en_clk;
u32 tx_delay_ns;
};
@@ -81,7 +82,7 @@ static void meson8b_dwmac_mask_bits(struct meson8b_dwmac *dwmac, u32 reg,
writel(data, dwmac->regs + reg);
}
-static int meson8b_init_clk(struct meson8b_dwmac *dwmac)
+static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
{
struct clk_init_data init;
int i, ret;
@@ -89,11 +90,6 @@ static int meson8b_init_clk(struct meson8b_dwmac *dwmac)
char clk_name[32];
const char *clk_div_parents[1];
const char *mux_parent_names[MUX_CLK_NUM_PARENTS];
- static const struct clk_div_table clk_25m_div_table[] = {
- { .val = 0, .div = 5 },
- { .val = 1, .div = 10 },
- { /* sentinel */ },
- };
/* get the mux parents from DT */
for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) {
@@ -116,7 +112,7 @@ static int meson8b_init_clk(struct meson8b_dwmac *dwmac)
snprintf(clk_name, sizeof(clk_name), "%s#m250_sel", dev_name(dev));
init.name = clk_name;
init.ops = &clk_mux_ops;
- init.flags = 0;
+ init.flags = CLK_SET_RATE_PARENT;
init.parent_names = mux_parent_names;
init.num_parents = MUX_CLK_NUM_PARENTS;
@@ -144,31 +140,48 @@ static int meson8b_init_clk(struct meson8b_dwmac *dwmac)
dwmac->m250_div.shift = PRG_ETH0_CLK_M250_DIV_SHIFT;
dwmac->m250_div.width = PRG_ETH0_CLK_M250_DIV_WIDTH;
dwmac->m250_div.hw.init = &init;
- dwmac->m250_div.flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO;
+ dwmac->m250_div.flags = CLK_DIVIDER_ONE_BASED |
+ CLK_DIVIDER_ALLOW_ZERO |
+ CLK_DIVIDER_ROUND_CLOSEST;
dwmac->m250_div_clk = devm_clk_register(dev, &dwmac->m250_div.hw);
if (WARN_ON(IS_ERR(dwmac->m250_div_clk)))
return PTR_ERR(dwmac->m250_div_clk);
- /* create the m25_div */
- snprintf(clk_name, sizeof(clk_name), "%s#m25_div", dev_name(dev));
+ /* create the fixed_div2 */
+ snprintf(clk_name, sizeof(clk_name), "%s#fixed_div2", dev_name(dev));
init.name = devm_kstrdup(dev, clk_name, GFP_KERNEL);
- init.ops = &clk_divider_ops;
- init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT;
+ init.ops = &clk_fixed_factor_ops;
+ init.flags = CLK_SET_RATE_PARENT;
clk_div_parents[0] = __clk_get_name(dwmac->m250_div_clk);
init.parent_names = clk_div_parents;
init.num_parents = ARRAY_SIZE(clk_div_parents);
- dwmac->m25_div.reg = dwmac->regs + PRG_ETH0;
- dwmac->m25_div.shift = PRG_ETH0_CLK_M25_DIV_SHIFT;
- dwmac->m25_div.width = PRG_ETH0_CLK_M25_DIV_WIDTH;
- dwmac->m25_div.table = clk_25m_div_table;
- dwmac->m25_div.hw.init = &init;
- dwmac->m25_div.flags = CLK_DIVIDER_ALLOW_ZERO;
+ dwmac->fixed_div2.mult = 1;
+ dwmac->fixed_div2.div = 2;
+ dwmac->fixed_div2.hw.init = &init;
- dwmac->m25_div_clk = devm_clk_register(dev, &dwmac->m25_div.hw);
- if (WARN_ON(IS_ERR(dwmac->m25_div_clk)))
- return PTR_ERR(dwmac->m25_div_clk);
+ dwmac->fixed_div2_clk = devm_clk_register(dev, &dwmac->fixed_div2.hw);
+ if (WARN_ON(IS_ERR(dwmac->fixed_div2_clk)))
+ return PTR_ERR(dwmac->fixed_div2_clk);
+
+ /* create the rgmii_tx_en */
+ init.name = devm_kasprintf(dev, GFP_KERNEL, "%s#rgmii_tx_en",
+ dev_name(dev));
+ init.ops = &clk_gate_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ clk_div_parents[0] = __clk_get_name(dwmac->fixed_div2_clk);
+ init.parent_names = clk_div_parents;
+ init.num_parents = ARRAY_SIZE(clk_div_parents);
+
+ dwmac->rgmii_tx_en.reg = dwmac->regs + PRG_ETH0;
+ dwmac->rgmii_tx_en.bit_idx = PRG_ETH0_RGMII_TX_CLK_EN;
+ dwmac->rgmii_tx_en.hw.init = &init;
+
+ dwmac->rgmii_tx_en_clk = devm_clk_register(dev,
+ &dwmac->rgmii_tx_en.hw);
+ if (WARN_ON(IS_ERR(dwmac->rgmii_tx_en_clk)))
+ return PTR_ERR(dwmac->rgmii_tx_en_clk);
return 0;
}
@@ -176,7 +189,6 @@ static int meson8b_init_clk(struct meson8b_dwmac *dwmac)
static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
{
int ret;
- unsigned long clk_rate;
u8 tx_dly_val = 0;
switch (dwmac->phy_mode) {
@@ -191,9 +203,6 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_TXID:
- /* Generate a 25MHz clock for the PHY */
- clk_rate = 25 * 1000 * 1000;
-
/* enable RGMII mode */
meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_RGMII_MODE,
PRG_ETH0_RGMII_MODE);
@@ -204,12 +213,28 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK,
tx_dly_val << PRG_ETH0_TXDLY_SHIFT);
+
+ /* Configure the 125MHz RGMII TX clock, the IP block changes
+ * the output automatically (= without us having to configure
+ * a register) based on the line-speed (125MHz for Gbit speeds,
+ * 25MHz for 100Mbit/s and 2.5MHz for 10Mbit/s).
+ */
+ ret = clk_set_rate(dwmac->rgmii_tx_en_clk, 125 * 1000 * 1000);
+ if (ret) {
+ dev_err(&dwmac->pdev->dev,
+ "failed to set RGMII TX clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dwmac->rgmii_tx_en_clk);
+ if (ret) {
+ dev_err(&dwmac->pdev->dev,
+ "failed to enable the RGMII TX clock\n");
+ return ret;
+ }
break;
case PHY_INTERFACE_MODE_RMII:
- /* Use the rate of the mux clock for the internal RMII PHY */
- clk_rate = clk_get_rate(dwmac->m250_mux_clk);
-
/* disable RGMII mode -> enables RMII mode */
meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_RGMII_MODE,
0);
@@ -231,20 +256,6 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
return -EINVAL;
}
- ret = clk_prepare_enable(dwmac->m25_div_clk);
- if (ret) {
- dev_err(&dwmac->pdev->dev, "failed to enable the PHY clock\n");
- return ret;
- }
-
- ret = clk_set_rate(dwmac->m25_div_clk, clk_rate);
- if (ret) {
- clk_disable_unprepare(dwmac->m25_div_clk);
-
- dev_err(&dwmac->pdev->dev, "failed to set PHY clock\n");
- return ret;
- }
-
/* enable TX_CLK and PHY_REF_CLK generator */
meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TX_AND_PHY_REF_CLK,
PRG_ETH0_TX_AND_PHY_REF_CLK);
@@ -294,7 +305,7 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
&dwmac->tx_delay_ns))
dwmac->tx_delay_ns = 2;
- ret = meson8b_init_clk(dwmac);
+ ret = meson8b_init_rgmii_tx_clk(dwmac);
if (ret)
goto err_remove_config_dt;
@@ -311,7 +322,8 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
return 0;
err_clk_disable:
- clk_disable_unprepare(dwmac->m25_div_clk);
+ if (phy_interface_mode_is_rgmii(dwmac->phy_mode))
+ clk_disable_unprepare(dwmac->rgmii_tx_en_clk);
err_remove_config_dt:
stmmac_remove_config_dt(pdev, plat_dat);
@@ -322,7 +334,8 @@ static int meson8b_dwmac_remove(struct platform_device *pdev)
{
struct meson8b_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev);
- clk_disable_unprepare(dwmac->m25_div_clk);
+ if (phy_interface_mode_is_rgmii(dwmac->phy_mode))
+ clk_disable_unprepare(dwmac->rgmii_tx_en_clk);
return stmmac_pltfr_remove(pdev);
}
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index ed58c746e4af..f5a7eb22d0f5 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -715,7 +715,7 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
/* warning!!!! We are retrieving the virtual ptr in the sw_data
* field as a 32bit value. Will not work on 64bit machines
*/
- page = (struct page *)GET_SW_DATA0(desc);
+ page = (struct page *)GET_SW_DATA0(ndesc);
if (likely(dma_buff && buf_len && page)) {
dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE,