aboutsummaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/can/Kconfig5
-rw-r--r--drivers/net/can/grcan.c3
-rw-r--r--drivers/net/can/kvaser_pciefd.c307
-rw-r--r--drivers/net/can/m_can/m_can.c32
-rw-r--r--drivers/net/can/m_can/m_can.h3
-rw-r--r--drivers/net/can/m_can/m_can_platform.c21
-rw-r--r--drivers/net/can/sja1000/ems_pci.c6
-rw-r--r--drivers/net/can/usb/ucan.c2
-rw-r--r--drivers/net/can/xilinx_can.c25
-rw-r--r--drivers/net/dsa/b53/b53_common.c6
-rw-r--r--drivers/net/dsa/b53/b53_mdio.c1
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c1
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.c1
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_ptp.c1
-rw-r--r--drivers/net/dsa/lan9303-core.c7
-rw-r--r--drivers/net/dsa/microchip/ksz8863_smi.c3
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c4
-rw-r--r--drivers/net/dsa/mt7530-mmio.c3
-rw-r--r--drivers/net/dsa/mt7530.c6
-rw-r--r--drivers/net/dsa/mv88e6xxx/Makefile3
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c426
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h33
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-6185.c190
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-6352.c390
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-639x.c898
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c30
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c1106
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h108
-rw-r--r--drivers/net/dsa/ocelot/felix.c6
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c1
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c3
-rw-r--r--drivers/net/dsa/qca/ar9331.c2
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c2
-rw-r--r--drivers/net/dsa/qca/qca8k-leds.c1
-rw-r--r--drivers/net/dsa/realtek/realtek-mdio.c2
-rw-r--r--drivers/net/dsa/realtek/realtek-smi.c1
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c7
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c1
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c12
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c2
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig12
-rw-r--r--drivers/net/ethernet/broadcom/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/asp2/Makefile2
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.c1437
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.h586
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c503
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c1415
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h257
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c156
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c4
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c13
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c2
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c16
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c9
-rw-r--r--drivers/net/ethernet/freescale/fec.h5
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c51
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c6
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx_phy.c6
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c16
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c5
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c5
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c6
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c6
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c6
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c6
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c6
-rw-r--r--drivers/net/ethernet/google/gve/gve_desc.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c65
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c101
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile2
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c46
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch_br.c1309
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch_br.h120
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c25
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c150
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_trace.h90
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c186
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c84
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c221
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c35
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c320
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos.c398
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos.h11
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_path.c36
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c498
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h328
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c18
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.h3
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h166
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c351
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_port_range.c200
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c601
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c170
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c5
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c10
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c54
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h1
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c160
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c70
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h5
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c4
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h39
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c23
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c36
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c123
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c47
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c254
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h6
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c68
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.h2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h6
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c35
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c64
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c1
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c8
-rw-r--r--drivers/net/gtp.c3
-rw-r--r--drivers/net/mdio/mdio-bcm-unimac.c2
-rw-r--r--drivers/net/mdio/mdio-xgene.c4
-rw-r--r--drivers/net/netconsole.c115
-rw-r--r--drivers/net/netdevsim/Makefile4
-rw-r--r--drivers/net/netdevsim/macsec.c356
-rw-r--r--drivers/net/netdevsim/netdev.c3
-rw-r--r--drivers/net/netdevsim/netdevsim.h34
-rw-r--r--drivers/net/pcs/pcs-rzn1-miic.c1
-rw-r--r--drivers/net/phy/Kconfig7
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/at803x.c135
-rw-r--r--drivers/net/phy/bcm7xxx.c1
-rw-r--r--drivers/net/phy/marvell-88q2xxx.c263
-rw-r--r--drivers/net/phy/marvell-88x2222.c1
-rw-r--r--drivers/net/phy/mdio_bus.c37
-rw-r--r--drivers/net/phy/mediatek-ge-soc.c2
-rw-r--r--drivers/net/phy/motorcomm.c118
-rw-r--r--drivers/net/phy/phy-c45.c63
-rw-r--r--drivers/net/phy/phylink.c178
-rw-r--r--drivers/net/phy/smsc.c252
-rw-r--r--drivers/net/ppp/pppoe.c4
-rw-r--r--drivers/net/ppp/pptp.c8
-rw-r--r--drivers/net/vxlan/vxlan_core.c44
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.c17
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.h2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_mhccif.h1
-rw-r--r--drivers/net/wwan/t7xx/t7xx_modem_ops.c76
-rw-r--r--drivers/net/wwan/t7xx/t7xx_modem_ops.h2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port.h6
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c8
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_proxy.c18
-rw-r--r--drivers/net/wwan/t7xx/t7xx_reg.h2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_state_monitor.c13
-rw-r--r--drivers/net/wwan/t7xx/t7xx_state_monitor.h2
215 files changed, 13666 insertions, 3480 deletions
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index a5c5036dfb94..18e4a6e94ba9 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -160,8 +160,13 @@ config CAN_KVASER_PCIEFD
Kvaser PCIEcan 4xHS
Kvaser PCIEcan 2xHS v2
Kvaser PCIEcan HS v2
+ Kvaser PCIEcan 1xCAN v3
+ Kvaser PCIEcan 2xCAN v3
+ Kvaser PCIEcan 4xCAN v2
Kvaser Mini PCI Express HS v2
Kvaser Mini PCI Express 2xHS v2
+ Kvaser Mini PCI Express 1xCAN v3
+ Kvaser Mini PCI Express 2xCAN v3
config CAN_SLCAN
tristate "Serial / USB serial CAN Adaptors (slcan)"
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index 3174efdae271..6d3ba71a6a73 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -30,8 +30,9 @@
#include <linux/ethtool.h>
#include <linux/io.h>
#include <linux/can/dev.h>
+#include <linux/platform_device.h>
#include <linux/spinlock.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index db6256f2b1b3..a57005faa04f 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/* Copyright (C) 2018 KVASER AB, Sweden. All rights reserved.
* Parts of this driver are based on the following:
- * - Kvaser linux pciefd driver (version 5.25)
+ * - Kvaser linux pciefd driver (version 5.42)
* - PEAK linux canfd driver
*/
@@ -33,37 +33,27 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
#define KVASER_PCIEFD_DMA_SIZE (4U * 1024U)
#define KVASER_PCIEFD_VENDOR 0x1a07
+/* Altera based devices */
#define KVASER_PCIEFD_4HS_DEVICE_ID 0x000d
#define KVASER_PCIEFD_2HS_V2_DEVICE_ID 0x000e
#define KVASER_PCIEFD_HS_V2_DEVICE_ID 0x000f
#define KVASER_PCIEFD_MINIPCIE_HS_V2_DEVICE_ID 0x0010
#define KVASER_PCIEFD_MINIPCIE_2HS_V2_DEVICE_ID 0x0011
-/* PCIe IRQ registers */
-#define KVASER_PCIEFD_IRQ_REG 0x40
-#define KVASER_PCIEFD_IEN_REG 0x50
-/* DMA address translation map register base */
-#define KVASER_PCIEFD_DMA_MAP_BASE 0x1000
-/* Loopback control register */
-#define KVASER_PCIEFD_LOOP_REG 0x1f000
-/* System identification and information registers */
-#define KVASER_PCIEFD_SYSID_BASE 0x1f020
-#define KVASER_PCIEFD_SYSID_VERSION_REG (KVASER_PCIEFD_SYSID_BASE + 0x8)
-#define KVASER_PCIEFD_SYSID_CANFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0xc)
-#define KVASER_PCIEFD_SYSID_BUSFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0x10)
-#define KVASER_PCIEFD_SYSID_BUILD_REG (KVASER_PCIEFD_SYSID_BASE + 0x14)
-/* Shared receive buffer registers */
-#define KVASER_PCIEFD_SRB_BASE 0x1f200
-#define KVASER_PCIEFD_SRB_FIFO_LAST_REG (KVASER_PCIEFD_SRB_BASE + 0x1f4)
-#define KVASER_PCIEFD_SRB_CMD_REG (KVASER_PCIEFD_SRB_BASE + 0x200)
-#define KVASER_PCIEFD_SRB_IEN_REG (KVASER_PCIEFD_SRB_BASE + 0x204)
-#define KVASER_PCIEFD_SRB_IRQ_REG (KVASER_PCIEFD_SRB_BASE + 0x20c)
-#define KVASER_PCIEFD_SRB_STAT_REG (KVASER_PCIEFD_SRB_BASE + 0x210)
-#define KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG (KVASER_PCIEFD_SRB_BASE + 0x214)
-#define KVASER_PCIEFD_SRB_CTRL_REG (KVASER_PCIEFD_SRB_BASE + 0x218)
+/* SmartFusion2 based devices */
+#define KVASER_PCIEFD_2CAN_V3_DEVICE_ID 0x0012
+#define KVASER_PCIEFD_1CAN_V3_DEVICE_ID 0x0013
+#define KVASER_PCIEFD_4CAN_V2_DEVICE_ID 0x0014
+#define KVASER_PCIEFD_MINIPCIE_2CAN_V3_DEVICE_ID 0x0015
+#define KVASER_PCIEFD_MINIPCIE_1CAN_V3_DEVICE_ID 0x0016
+
+/* Altera SerDes Enable 64-bit DMA address translation */
+#define KVASER_PCIEFD_ALTERA_DMA_64BIT BIT(0)
+
+/* SmartFusion2 SerDes LSB address translation mask */
+#define KVASER_PCIEFD_SF2_DMA_LSB_MASK GENMASK(31, 12)
+
/* Kvaser KCAN CAN controller registers */
-#define KVASER_PCIEFD_KCAN0_BASE 0x10000
-#define KVASER_PCIEFD_KCAN_BASE_OFFSET 0x1000
#define KVASER_PCIEFD_KCAN_FIFO_REG 0x100
#define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180
#define KVASER_PCIEFD_KCAN_CTRL_REG 0x2c0
@@ -77,13 +67,20 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
#define KVASER_PCIEFD_KCAN_BUS_LOAD_REG 0x424
#define KVASER_PCIEFD_KCAN_BTRD_REG 0x428
#define KVASER_PCIEFD_KCAN_PWM_REG 0x430
-
-/* PCI interrupt fields */
-#define KVASER_PCIEFD_IRQ_SRB BIT(4)
-#define KVASER_PCIEFD_IRQ_ALL_MASK GENMASK(4, 0)
-
-/* Enable 64-bit DMA address translation */
-#define KVASER_PCIEFD_64BIT_DMA_BIT BIT(0)
+/* System identification and information registers */
+#define KVASER_PCIEFD_SYSID_VERSION_REG 0x8
+#define KVASER_PCIEFD_SYSID_CANFREQ_REG 0xc
+#define KVASER_PCIEFD_SYSID_BUSFREQ_REG 0x10
+#define KVASER_PCIEFD_SYSID_BUILD_REG 0x14
+/* Shared receive buffer FIFO registers */
+#define KVASER_PCIEFD_SRB_FIFO_LAST_REG 0x1f4
+/* Shared receive buffer registers */
+#define KVASER_PCIEFD_SRB_CMD_REG 0x0
+#define KVASER_PCIEFD_SRB_IEN_REG 0x04
+#define KVASER_PCIEFD_SRB_IRQ_REG 0x0c
+#define KVASER_PCIEFD_SRB_STAT_REG 0x10
+#define KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG 0x14
+#define KVASER_PCIEFD_SRB_CTRL_REG 0x18
/* System build information fields */
#define KVASER_PCIEFD_SYSID_VERSION_NR_CHAN_MASK GENMASK(31, 24)
@@ -253,7 +250,122 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
/* KCAN Error detected packet, second word */
#define KVASER_PCIEFD_EPACK_DIR_TX BIT(0)
+/* Macros for calculating addresses of registers */
+#define KVASER_PCIEFD_GET_BLOCK_ADDR(pcie, block) \
+ ((pcie)->reg_base + (pcie)->driver_data->address_offset->block)
+#define KVASER_PCIEFD_PCI_IEN_ADDR(pcie) \
+ (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), pci_ien))
+#define KVASER_PCIEFD_PCI_IRQ_ADDR(pcie) \
+ (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), pci_irq))
+#define KVASER_PCIEFD_SERDES_ADDR(pcie) \
+ (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), serdes))
+#define KVASER_PCIEFD_SYSID_ADDR(pcie) \
+ (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), sysid))
+#define KVASER_PCIEFD_LOOPBACK_ADDR(pcie) \
+ (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), loopback))
+#define KVASER_PCIEFD_SRB_FIFO_ADDR(pcie) \
+ (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_srb_fifo))
+#define KVASER_PCIEFD_SRB_ADDR(pcie) \
+ (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_srb))
+#define KVASER_PCIEFD_KCAN_CH0_ADDR(pcie) \
+ (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_ch0))
+#define KVASER_PCIEFD_KCAN_CH1_ADDR(pcie) \
+ (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_ch1))
+#define KVASER_PCIEFD_KCAN_CHANNEL_SPAN(pcie) \
+ (KVASER_PCIEFD_KCAN_CH1_ADDR((pcie)) - KVASER_PCIEFD_KCAN_CH0_ADDR((pcie)))
+#define KVASER_PCIEFD_KCAN_CHX_ADDR(pcie, i) \
+ (KVASER_PCIEFD_KCAN_CH0_ADDR((pcie)) + (i) * KVASER_PCIEFD_KCAN_CHANNEL_SPAN((pcie)))
+
struct kvaser_pciefd;
+static void kvaser_pciefd_write_dma_map_altera(struct kvaser_pciefd *pcie,
+ dma_addr_t addr, int index);
+static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie,
+ dma_addr_t addr, int index);
+
+struct kvaser_pciefd_address_offset {
+ u32 serdes;
+ u32 pci_ien;
+ u32 pci_irq;
+ u32 sysid;
+ u32 loopback;
+ u32 kcan_srb_fifo;
+ u32 kcan_srb;
+ u32 kcan_ch0;
+ u32 kcan_ch1;
+};
+
+struct kvaser_pciefd_dev_ops {
+ void (*kvaser_pciefd_write_dma_map)(struct kvaser_pciefd *pcie,
+ dma_addr_t addr, int index);
+};
+
+struct kvaser_pciefd_irq_mask {
+ u32 kcan_rx0;
+ u32 kcan_tx[KVASER_PCIEFD_MAX_CAN_CHANNELS];
+ u32 all;
+};
+
+struct kvaser_pciefd_driver_data {
+ const struct kvaser_pciefd_address_offset *address_offset;
+ const struct kvaser_pciefd_irq_mask *irq_mask;
+ const struct kvaser_pciefd_dev_ops *ops;
+};
+
+static const struct kvaser_pciefd_address_offset kvaser_pciefd_altera_address_offset = {
+ .serdes = 0x1000,
+ .pci_ien = 0x50,
+ .pci_irq = 0x40,
+ .sysid = 0x1f020,
+ .loopback = 0x1f000,
+ .kcan_srb_fifo = 0x1f200,
+ .kcan_srb = 0x1f400,
+ .kcan_ch0 = 0x10000,
+ .kcan_ch1 = 0x11000,
+};
+
+static const struct kvaser_pciefd_address_offset kvaser_pciefd_sf2_address_offset = {
+ .serdes = 0x280c8,
+ .pci_ien = 0x102004,
+ .pci_irq = 0x102008,
+ .sysid = 0x100000,
+ .loopback = 0x103000,
+ .kcan_srb_fifo = 0x120000,
+ .kcan_srb = 0x121000,
+ .kcan_ch0 = 0x140000,
+ .kcan_ch1 = 0x142000,
+};
+
+static const struct kvaser_pciefd_irq_mask kvaser_pciefd_altera_irq_mask = {
+ .kcan_rx0 = BIT(4),
+ .kcan_tx = { BIT(0), BIT(1), BIT(2), BIT(3) },
+ .all = GENMASK(4, 0),
+};
+
+static const struct kvaser_pciefd_irq_mask kvaser_pciefd_sf2_irq_mask = {
+ .kcan_rx0 = BIT(4),
+ .kcan_tx = { BIT(16), BIT(17), BIT(18), BIT(19) },
+ .all = GENMASK(19, 16) | BIT(4),
+};
+
+static const struct kvaser_pciefd_dev_ops kvaser_pciefd_altera_dev_ops = {
+ .kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_altera,
+};
+
+static const struct kvaser_pciefd_dev_ops kvaser_pciefd_sf2_dev_ops = {
+ .kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_sf2,
+};
+
+static const struct kvaser_pciefd_driver_data kvaser_pciefd_altera_driver_data = {
+ .address_offset = &kvaser_pciefd_altera_address_offset,
+ .irq_mask = &kvaser_pciefd_altera_irq_mask,
+ .ops = &kvaser_pciefd_altera_dev_ops,
+};
+
+static const struct kvaser_pciefd_driver_data kvaser_pciefd_sf2_driver_data = {
+ .address_offset = &kvaser_pciefd_sf2_address_offset,
+ .irq_mask = &kvaser_pciefd_sf2_irq_mask,
+ .ops = &kvaser_pciefd_sf2_dev_ops,
+};
struct kvaser_pciefd_can {
struct can_priv can;
@@ -273,6 +385,7 @@ struct kvaser_pciefd {
struct pci_dev *pci;
void __iomem *reg_base;
struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS];
+ const struct kvaser_pciefd_driver_data *driver_data;
void *dma_data[KVASER_PCIEFD_DMA_COUNT];
u8 nr_channels;
u32 bus_freq;
@@ -305,18 +418,43 @@ static const struct can_bittiming_const kvaser_pciefd_bittiming_const = {
static struct pci_device_id kvaser_pciefd_id_table[] = {
{
PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4HS_DEVICE_ID),
+ .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data,
},
{
PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2HS_V2_DEVICE_ID),
+ .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data,
},
{
PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_HS_V2_DEVICE_ID),
+ .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data,
},
{
PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_HS_V2_DEVICE_ID),
+ .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data,
},
{
PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2HS_V2_DEVICE_ID),
+ .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data,
+ },
+ {
+ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2CAN_V3_DEVICE_ID),
+ .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data,
+ },
+ {
+ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_1CAN_V3_DEVICE_ID),
+ .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data,
+ },
+ {
+ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4CAN_V2_DEVICE_ID),
+ .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data,
+ },
+ {
+ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2CAN_V3_DEVICE_ID),
+ .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data,
+ },
+ {
+ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_1CAN_V3_DEVICE_ID),
+ .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data,
},
{
0,
@@ -783,8 +921,7 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
can = netdev_priv(netdev);
netdev->netdev_ops = &kvaser_pciefd_netdev_ops;
netdev->ethtool_ops = &kvaser_pciefd_ethtool_ops;
- can->reg_base = pcie->reg_base + KVASER_PCIEFD_KCAN0_BASE +
- i * KVASER_PCIEFD_KCAN_BASE_OFFSET;
+ can->reg_base = KVASER_PCIEFD_KCAN_CHX_ADDR(pcie, i);
can->kv_pcie = pcie;
can->cmd_seq = 0;
can->err_rep_cnt = 0;
@@ -865,20 +1002,37 @@ static int kvaser_pciefd_reg_candev(struct kvaser_pciefd *pcie)
return 0;
}
-static void kvaser_pciefd_write_dma_map(struct kvaser_pciefd *pcie,
- dma_addr_t addr, int offset)
+static void kvaser_pciefd_write_dma_map_altera(struct kvaser_pciefd *pcie,
+ dma_addr_t addr, int index)
{
+ void __iomem *serdes_base;
u32 word1, word2;
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- word1 = addr | KVASER_PCIEFD_64BIT_DMA_BIT;
+ word1 = addr | KVASER_PCIEFD_ALTERA_DMA_64BIT;
word2 = addr >> 32;
#else
word1 = addr;
word2 = 0;
#endif
- iowrite32(word1, pcie->reg_base + offset);
- iowrite32(word2, pcie->reg_base + offset + 4);
+ serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x8 * index;
+ iowrite32(word1, serdes_base);
+ iowrite32(word2, serdes_base + 0x4);
+}
+
+static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie,
+ dma_addr_t addr, int index)
+{
+ void __iomem *serdes_base;
+ u32 lsb = addr & KVASER_PCIEFD_SF2_DMA_LSB_MASK;
+ u32 msb = 0x0;
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ msb = addr >> 32;
+#endif
+ serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x10 * index;
+ iowrite32(lsb, serdes_base);
+ iowrite32(msb, serdes_base + 0x4);
}
static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
@@ -889,10 +1043,8 @@ static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT];
/* Disable the DMA */
- iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
+ iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG);
for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) {
- unsigned int offset = KVASER_PCIEFD_DMA_MAP_BASE + 8 * i;
-
pcie->dma_data[i] = dmam_alloc_coherent(&pcie->pci->dev,
KVASER_PCIEFD_DMA_SIZE,
&dma_addr[i],
@@ -903,24 +1055,25 @@ static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
KVASER_PCIEFD_DMA_SIZE);
return -ENOMEM;
}
- kvaser_pciefd_write_dma_map(pcie, dma_addr[i], offset);
+ pcie->driver_data->ops->kvaser_pciefd_write_dma_map(pcie, dma_addr[i], i);
}
/* Reset Rx FIFO, and both DMA buffers */
iowrite32(KVASER_PCIEFD_SRB_CMD_FOR | KVASER_PCIEFD_SRB_CMD_RDB0 |
KVASER_PCIEFD_SRB_CMD_RDB1,
- pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
+ KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
/* Empty Rx FIFO */
srb_packet_count =
FIELD_GET(KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK,
- ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG));
+ ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) +
+ KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG));
while (srb_packet_count) {
/* Drop current packet in FIFO */
- ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_FIFO_LAST_REG);
+ ioread32(KVASER_PCIEFD_SRB_FIFO_ADDR(pcie) + KVASER_PCIEFD_SRB_FIFO_LAST_REG);
srb_packet_count--;
}
- srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG);
+ srb_status = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_STAT_REG);
if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) {
dev_err(&pcie->pci->dev, "DMA not idle before enabling\n");
return -EIO;
@@ -928,7 +1081,7 @@ static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
/* Enable the DMA */
iowrite32(KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE,
- pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
+ KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG);
return 0;
}
@@ -937,30 +1090,29 @@ static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie)
{
u32 version, srb_status, build;
- version = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_VERSION_REG);
+ version = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_VERSION_REG);
pcie->nr_channels = min(KVASER_PCIEFD_MAX_CAN_CHANNELS,
FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_NR_CHAN_MASK, version));
- build = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_BUILD_REG);
+ build = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_BUILD_REG);
dev_dbg(&pcie->pci->dev, "Version %lu.%lu.%lu\n",
FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MAJOR_MASK, version),
FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MINOR_MASK, version),
FIELD_GET(KVASER_PCIEFD_SYSID_BUILD_SEQ_MASK, build));
- srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG);
+ srb_status = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_STAT_REG);
if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DMA)) {
dev_err(&pcie->pci->dev, "Hardware without DMA is not supported\n");
return -ENODEV;
}
- pcie->bus_freq = ioread32(pcie->reg_base +
- KVASER_PCIEFD_SYSID_BUSFREQ_REG);
- pcie->freq = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_CANFREQ_REG);
+ pcie->bus_freq = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_BUSFREQ_REG);
+ pcie->freq = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_CANFREQ_REG);
pcie->freq_to_ticks_div = pcie->freq / 1000000;
if (pcie->freq_to_ticks_div == 0)
pcie->freq_to_ticks_div = 1;
/* Turn off all loopback functionality */
- iowrite32(0, pcie->reg_base + KVASER_PCIEFD_LOOP_REG);
+ iowrite32(0, KVASER_PCIEFD_LOOPBACK_ADDR(pcie));
return 0;
}
@@ -1430,21 +1582,20 @@ static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf)
static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
{
- u32 irq;
+ u32 irq = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
- irq = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) {
kvaser_pciefd_read_buffer(pcie, 0);
/* Reset DMA buffer 0 */
iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
- pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
+ KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
}
if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) {
kvaser_pciefd_read_buffer(pcie, 1);
/* Reset DMA buffer 1 */
iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
- pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
+ KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
}
if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 ||
@@ -1453,7 +1604,7 @@ static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
irq & KVASER_PCIEFD_SRB_IRQ_DUF1)
dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq);
- iowrite32(irq, pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
+ iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
}
static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
@@ -1479,15 +1630,14 @@ static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
{
struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev;
- u32 board_irq;
+ const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask;
+ u32 board_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie));
int i;
- board_irq = ioread32(pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
-
- if (!(board_irq & KVASER_PCIEFD_IRQ_ALL_MASK))
+ if (!(board_irq & irq_mask->all))
return IRQ_NONE;
- if (board_irq & KVASER_PCIEFD_IRQ_SRB)
+ if (board_irq & irq_mask->kcan_rx0)
kvaser_pciefd_receive_irq(pcie);
for (i = 0; i < pcie->nr_channels; i++) {
@@ -1498,7 +1648,7 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
}
/* Check that mask matches channel (i) IRQ mask */
- if (board_irq & (1 << i))
+ if (board_irq & irq_mask->kcan_tx[i])
kvaser_pciefd_transmit_irq(pcie->can[i]);
}
@@ -1525,6 +1675,8 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
{
int err;
struct kvaser_pciefd *pcie;
+ const struct kvaser_pciefd_irq_mask *irq_mask;
+ void __iomem *irq_en_base;
pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
@@ -1532,6 +1684,8 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, pcie);
pcie->pci = pdev;
+ pcie->driver_data = (const struct kvaser_pciefd_driver_data *)id->driver_data;
+ irq_mask = pcie->driver_data->irq_mask;
err = pci_enable_device(pdev);
if (err)
@@ -1567,22 +1721,21 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
goto err_teardown_can_ctrls;
iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1,
- pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
+ KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1 |
KVASER_PCIEFD_SRB_IRQ_DOF0 | KVASER_PCIEFD_SRB_IRQ_DOF1 |
KVASER_PCIEFD_SRB_IRQ_DUF0 | KVASER_PCIEFD_SRB_IRQ_DUF1,
- pcie->reg_base + KVASER_PCIEFD_SRB_IEN_REG);
+ KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG);
/* Enable PCI interrupts */
- iowrite32(KVASER_PCIEFD_IRQ_ALL_MASK,
- pcie->reg_base + KVASER_PCIEFD_IEN_REG);
-
+ irq_en_base = KVASER_PCIEFD_PCI_IEN_ADDR(pcie);
+ iowrite32(irq_mask->all, irq_en_base);
/* Ready the DMA buffers */
iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
- pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
+ KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
- pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
+ KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
err = kvaser_pciefd_reg_candev(pcie);
if (err)
@@ -1592,12 +1745,12 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
err_free_irq:
/* Disable PCI interrupts */
- iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG);
+ iowrite32(0, irq_en_base);
free_irq(pcie->pci->irq, pcie);
err_teardown_can_ctrls:
kvaser_pciefd_teardown_can_ctrls(pcie);
- iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
+ iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG);
pci_clear_master(pdev);
err_pci_iounmap:
@@ -1636,8 +1789,8 @@ static void kvaser_pciefd_remove(struct pci_dev *pdev)
kvaser_pciefd_remove_all_ctrls(pcie);
/* Disable interrupts */
- iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
- iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG);
+ iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG);
+ iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
free_irq(pcie->pci->irq, pcie);
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index c5af92bcc9c9..13fd84b2e2dd 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -11,6 +11,7 @@
#include <linux/bitfield.h>
#include <linux/can/dev.h>
#include <linux/ethtool.h>
+#include <linux/hrtimer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -308,6 +309,9 @@ enum m_can_reg {
#define TX_EVENT_MM_MASK GENMASK(31, 24)
#define TX_EVENT_TXTS_MASK GENMASK(15, 0)
+/* Hrtimer polling interval */
+#define HRTIMER_POLL_INTERVAL_MS 1
+
/* The ID and DLC registers are adjacent in M_CAN FIFO memory,
* and we can save a (potentially slow) bus round trip by combining
* reads and writes to them.
@@ -1414,6 +1418,12 @@ static int m_can_start(struct net_device *dev)
m_can_enable_all_interrupts(cdev);
+ if (!dev->irq) {
+ dev_dbg(cdev->dev, "Start hrtimer\n");
+ hrtimer_start(&cdev->hrtimer, ms_to_ktime(HRTIMER_POLL_INTERVAL_MS),
+ HRTIMER_MODE_REL_PINNED);
+ }
+
return 0;
}
@@ -1568,6 +1578,11 @@ static void m_can_stop(struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
+ if (!dev->irq) {
+ dev_dbg(cdev->dev, "Stop hrtimer\n");
+ hrtimer_cancel(&cdev->hrtimer);
+ }
+
/* disable all interrupts */
m_can_disable_all_interrupts(cdev);
@@ -1793,6 +1808,18 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+static enum hrtimer_restart hrtimer_callback(struct hrtimer *timer)
+{
+ struct m_can_classdev *cdev = container_of(timer, struct
+ m_can_classdev, hrtimer);
+
+ m_can_isr(0, cdev->net);
+
+ hrtimer_forward_now(timer, ms_to_ktime(HRTIMER_POLL_INTERVAL_MS));
+
+ return HRTIMER_RESTART;
+}
+
static int m_can_open(struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
@@ -1831,7 +1858,7 @@ static int m_can_open(struct net_device *dev)
err = request_threaded_irq(dev->irq, NULL, m_can_isr,
IRQF_ONESHOT,
dev->name, dev);
- } else {
+ } else if (dev->irq) {
err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name,
dev);
}
@@ -2027,6 +2054,9 @@ int m_can_class_register(struct m_can_classdev *cdev)
goto clk_disable;
}
+ if (!cdev->net->irq)
+ cdev->hrtimer.function = &hrtimer_callback;
+
ret = m_can_dev_setup(cdev);
if (ret)
goto rx_offload_del;
diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h
index a839dc71dc9b..2ac18ac867a4 100644
--- a/drivers/net/can/m_can/m_can.h
+++ b/drivers/net/can/m_can/m_can.h
@@ -15,6 +15,7 @@
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/freezer.h>
+#include <linux/hrtimer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -93,6 +94,8 @@ struct m_can_classdev {
int is_peripheral;
struct mram_cfg mcfg[MRAM_CFG_NUM];
+
+ struct hrtimer hrtimer;
};
struct m_can_classdev *m_can_class_allocate_dev(struct device *dev, int sizeof_priv);
diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
index 94dc82644113..cdb28d6a092c 100644
--- a/drivers/net/can/m_can/m_can_platform.c
+++ b/drivers/net/can/m_can/m_can_platform.c
@@ -5,6 +5,7 @@
//
// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/
+#include <linux/hrtimer.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
@@ -82,7 +83,7 @@ static int m_can_plat_probe(struct platform_device *pdev)
void __iomem *addr;
void __iomem *mram_addr;
struct phy *transceiver;
- int irq, ret = 0;
+ int irq = 0, ret = 0;
mcan_class = m_can_class_allocate_dev(&pdev->dev,
sizeof(struct m_can_plat_priv));
@@ -96,12 +97,24 @@ static int m_can_plat_probe(struct platform_device *pdev)
goto probe_fail;
addr = devm_platform_ioremap_resource_byname(pdev, "m_can");
- irq = platform_get_irq_byname(pdev, "int0");
- if (IS_ERR(addr) || irq < 0) {
- ret = -EINVAL;
+ if (IS_ERR(addr)) {
+ ret = PTR_ERR(addr);
goto probe_fail;
}
+ if (device_property_present(mcan_class->dev, "interrupts") ||
+ device_property_present(mcan_class->dev, "interrupt-names")) {
+ irq = platform_get_irq_byname(pdev, "int0");
+ if (irq < 0) {
+ ret = irq;
+ goto probe_fail;
+ }
+ } else {
+ dev_dbg(mcan_class->dev, "Polling enabled, initialize hrtimer");
+ hrtimer_init(&mcan_class->hrtimer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+ }
+
/* message ram could be shared */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram");
if (!res) {
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index c56e27223e5f..ac86640998a8 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -148,7 +148,7 @@ static void ems_pci_v1_write_reg(const struct sja1000_priv *priv,
static void ems_pci_v1_post_irq(const struct sja1000_priv *priv)
{
- struct ems_pci_card *card = (struct ems_pci_card *)priv->priv;
+ struct ems_pci_card *card = priv->priv;
/* reset int flag of pita */
writel(PITA2_ICR_INT0_EN | PITA2_ICR_INT0,
@@ -168,7 +168,7 @@ static void ems_pci_v2_write_reg(const struct sja1000_priv *priv,
static void ems_pci_v2_post_irq(const struct sja1000_priv *priv)
{
- struct ems_pci_card *card = (struct ems_pci_card *)priv->priv;
+ struct ems_pci_card *card = priv->priv;
writel(PLX_ICSR_ENA_CLR, card->conf_addr + PLX_ICSR);
}
@@ -186,7 +186,7 @@ static void ems_pci_v3_write_reg(const struct sja1000_priv *priv,
static void ems_pci_v3_post_irq(const struct sja1000_priv *priv)
{
- struct ems_pci_card *card = (struct ems_pci_card *)priv->priv;
+ struct ems_pci_card *card = priv->priv;
writel(ASIX_LINTSR_INT0AC, card->conf_addr + ASIX_LINTSR);
}
diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
index a0f7bcec719c..39a63b7313a4 100644
--- a/drivers/net/can/usb/ucan.c
+++ b/drivers/net/can/usb/ucan.c
@@ -284,7 +284,7 @@ struct ucan_priv {
*/
spinlock_t echo_skb_lock;
- /* usb device information information */
+ /* usb device information */
u8 intf_index;
u8 in_ep_addr;
u8 out_ep_addr;
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 4d3283db3a13..abe58f103043 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -30,6 +30,7 @@
#include <linux/can/error.h>
#include <linux/phy/phy.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#define DRIVER_NAME "xilinx_can"
@@ -200,6 +201,7 @@ struct xcan_devtype_data {
* @can_clk: Pointer to struct clk
* @devtype: Device type specific constants
* @transceiver: Optional pointer to associated CAN transceiver
+ * @rstc: Pointer to reset control
*/
struct xcan_priv {
struct can_priv can;
@@ -218,6 +220,7 @@ struct xcan_priv {
struct clk *can_clk;
struct xcan_devtype_data devtype;
struct phy *transceiver;
+ struct reset_control *rstc;
};
/* CAN Bittiming constants as per Xilinx CAN specs */
@@ -1799,6 +1802,16 @@ static int xcan_probe(struct platform_device *pdev)
priv->can.do_get_berr_counter = xcan_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_BERR_REPORTING;
+ priv->rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(priv->rstc)) {
+ dev_err(&pdev->dev, "Cannot get CAN reset.\n");
+ ret = PTR_ERR(priv->rstc);
+ goto err_free;
+ }
+
+ ret = reset_control_reset(priv->rstc);
+ if (ret)
+ goto err_free;
if (devtype->cantype == XAXI_CANFD) {
priv->can.data_bittiming_const =
@@ -1827,7 +1840,7 @@ static int xcan_probe(struct platform_device *pdev)
/* Get IRQ for the device */
ret = platform_get_irq(pdev, 0);
if (ret < 0)
- goto err_free;
+ goto err_reset;
ndev->irq = ret;
@@ -1843,21 +1856,21 @@ static int xcan_probe(struct platform_device *pdev)
if (IS_ERR(priv->can_clk)) {
ret = dev_err_probe(&pdev->dev, PTR_ERR(priv->can_clk),
"device clock not found\n");
- goto err_free;
+ goto err_reset;
}
priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name);
if (IS_ERR(priv->bus_clk)) {
ret = dev_err_probe(&pdev->dev, PTR_ERR(priv->bus_clk),
"bus clock not found\n");
- goto err_free;
+ goto err_reset;
}
transceiver = devm_phy_optional_get(&pdev->dev, NULL);
if (IS_ERR(transceiver)) {
ret = PTR_ERR(transceiver);
dev_err_probe(&pdev->dev, ret, "failed to get phy\n");
- goto err_free;
+ goto err_reset;
}
priv->transceiver = transceiver;
@@ -1904,6 +1917,8 @@ static int xcan_probe(struct platform_device *pdev)
err_disableclks:
pm_runtime_put(priv->dev);
pm_runtime_disable(&pdev->dev);
+err_reset:
+ reset_control_assert(priv->rstc);
err_free:
free_candev(ndev);
err:
@@ -1920,9 +1935,11 @@ err:
static void xcan_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
+ struct xcan_priv *priv = netdev_priv(ndev);
unregister_candev(ndev);
pm_runtime_disable(&pdev->dev);
+ reset_control_assert(priv->rstc);
free_candev(ndev);
}
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 3464ce5e7470..4e27dc913cf7 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1393,12 +1393,6 @@ static void b53_phylink_get_caps(struct dsa_switch *ds, int port,
/* Get the implementation specific capabilities */
if (dev->ops->phylink_get_caps)
dev->ops->phylink_get_caps(dev, port, config);
-
- /* This driver does not make use of the speed, duplex, pause or the
- * advertisement in its mac_config, so it is safe to mark this driver
- * as non-legacy.
- */
- config->legacy_pre_march2020 = false;
}
static struct phylink_pcs *b53_phylink_mac_select_pcs(struct dsa_switch *ds,
diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c
index 8b422b298cd5..4d55d8d18376 100644
--- a/drivers/net/dsa/b53/b53_mdio.c
+++ b/drivers/net/dsa/b53/b53_mdio.c
@@ -19,6 +19,7 @@
#include <linux/kernel.h>
#include <linux/phy.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/delay.h>
#include <linux/brcmphy.h>
#include <linux/rtnetlink.h>
diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
index 5db1ed26f03a..5e39641ea887 100644
--- a/drivers/net/dsa/b53/b53_mmap.c
+++ b/drivers/net/dsa/b53/b53_mmap.c
@@ -19,6 +19,7 @@
#include <linux/bits.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/platform_data/b53.h>
diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c
index af50001ccdd4..720f4e4ed0b0 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.c
+++ b/drivers/net/dsa/hirschmann/hellcreek.c
@@ -11,7 +11,6 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_mdio.h>
#include <linux/platform_device.h>
#include <linux/bitops.h>
diff --git a/drivers/net/dsa/hirschmann/hellcreek_ptp.c b/drivers/net/dsa/hirschmann/hellcreek_ptp.c
index 3e44ccb7db84..5249a1c2a80b 100644
--- a/drivers/net/dsa/hirschmann/hellcreek_ptp.c
+++ b/drivers/net/dsa/hirschmann/hellcreek_ptp.c
@@ -9,6 +9,7 @@
* Kurt Kanzenbach <[email protected]>
*/
+#include <linux/of.h>
#include <linux/ptp_clock_kernel.h>
#include "hellcreek.h"
#include "hellcreek_ptp.h"
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index ff76444057d2..ee67adeb2cdb 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -8,6 +8,7 @@
#include <linux/regmap.h>
#include <linux/mutex.h>
#include <linux/mii.h>
+#include <linux/of.h>
#include <linux/phy.h>
#include <linux/if_bridge.h>
#include <linux/if_vlan.h>
@@ -1290,12 +1291,6 @@ static void lan9303_phylink_get_caps(struct dsa_switch *ds, int port,
__set_bit(PHY_INTERFACE_MODE_GMII,
config->supported_interfaces);
}
-
- /* This driver does not make use of the speed, duplex, pause or the
- * advertisement in its mac_config, so it is safe to mark this driver
- * as non-legacy.
- */
- config->legacy_pre_march2020 = false;
}
static void lan9303_phylink_mac_link_up(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/microchip/ksz8863_smi.c b/drivers/net/dsa/microchip/ksz8863_smi.c
index fd6e2e69a42a..5711a59e2ac9 100644
--- a/drivers/net/dsa/microchip/ksz8863_smi.c
+++ b/drivers/net/dsa/microchip/ksz8863_smi.c
@@ -5,6 +5,9 @@
* Copyright (C) 2019 Pengutronix, Michael Grzeschik <[email protected]>
*/
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
+
#include "ksz8.h"
#include "ksz_common.h"
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index b18cd170ec06..2414c1f8bc2d 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -18,8 +18,8 @@
#include <linux/if_vlan.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
+#include <linux/of.h>
#include <linux/of_mdio.h>
-#include <linux/of_device.h>
#include <linux/of_net.h>
#include <linux/micrel_phy.h>
#include <net/dsa.h>
@@ -1624,8 +1624,6 @@ static void ksz_phylink_get_caps(struct dsa_switch *ds, int port,
{
struct ksz_device *dev = ds->priv;
- config->legacy_pre_march2020 = false;
-
if (dev->info->supports_mii[port])
__set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces);
diff --git a/drivers/net/dsa/mt7530-mmio.c b/drivers/net/dsa/mt7530-mmio.c
index 1a3d4b692f34..0a6a2fe34e64 100644
--- a/drivers/net/dsa/mt7530-mmio.c
+++ b/drivers/net/dsa/mt7530-mmio.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 38b3c6dda386..8fbda739c1b3 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -2949,12 +2949,6 @@ static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port,
config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000FD;
- /* This driver does not make use of the speed, duplex, pause or the
- * advertisement in its mac_config, so it is safe to mark this driver
- * as non-legacy.
- */
- config->legacy_pre_march2020 = false;
-
priv->info->mac_port_get_caps(ds, port, config);
}
diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile
index 1409e691ab77..a9a9651187db 100644
--- a/drivers/net/dsa/mv88e6xxx/Makefile
+++ b/drivers/net/dsa/mv88e6xxx/Makefile
@@ -9,6 +9,9 @@ mv88e6xxx-objs += global2.o
mv88e6xxx-objs += global2_avb.o
mv88e6xxx-objs += global2_scratch.o
mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_PTP) += hwtstamp.o
+mv88e6xxx-objs += pcs-6185.o
+mv88e6xxx-objs += pcs-6352.o
+mv88e6xxx-objs += pcs-639x.o
mv88e6xxx-objs += phy.o
mv88e6xxx-objs += port.o
mv88e6xxx-objs += port_hidden.o
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index c7d51a539451..bce9c9e43752 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -23,7 +23,7 @@
#include <linux/list.h>
#include <linux/mdio.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/platform_data/mv88e6xxx.h>
@@ -499,81 +499,6 @@ static int mv88e6xxx_port_ppu_updates(struct mv88e6xxx_chip *chip, int port)
return !!(reg & MV88E6XXX_PORT_STS_PHY_DETECT);
}
-static int mv88e6xxx_serdes_pcs_get_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state)
-{
- struct mv88e6xxx_chip *chip = ds->priv;
- int lane;
- int err;
-
- mv88e6xxx_reg_lock(chip);
- lane = mv88e6xxx_serdes_get_lane(chip, port);
- if (lane >= 0 && chip->info->ops->serdes_pcs_get_state)
- err = chip->info->ops->serdes_pcs_get_state(chip, port, lane,
- state);
- else
- err = -EOPNOTSUPP;
- mv88e6xxx_reg_unlock(chip);
-
- return err;
-}
-
-static int mv88e6xxx_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
- unsigned int mode,
- phy_interface_t interface,
- const unsigned long *advertise)
-{
- const struct mv88e6xxx_ops *ops = chip->info->ops;
- int lane;
-
- if (ops->serdes_pcs_config) {
- lane = mv88e6xxx_serdes_get_lane(chip, port);
- if (lane >= 0)
- return ops->serdes_pcs_config(chip, port, lane, mode,
- interface, advertise);
- }
-
- return 0;
-}
-
-static void mv88e6xxx_serdes_pcs_an_restart(struct dsa_switch *ds, int port)
-{
- struct mv88e6xxx_chip *chip = ds->priv;
- const struct mv88e6xxx_ops *ops;
- int err = 0;
- int lane;
-
- ops = chip->info->ops;
-
- if (ops->serdes_pcs_an_restart) {
- mv88e6xxx_reg_lock(chip);
- lane = mv88e6xxx_serdes_get_lane(chip, port);
- if (lane >= 0)
- err = ops->serdes_pcs_an_restart(chip, port, lane);
- mv88e6xxx_reg_unlock(chip);
-
- if (err)
- dev_err(ds->dev, "p%d: failed to restart AN\n", port);
- }
-}
-
-static int mv88e6xxx_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port,
- unsigned int mode,
- int speed, int duplex)
-{
- const struct mv88e6xxx_ops *ops = chip->info->ops;
- int lane;
-
- if (!phylink_autoneg_inband(mode) && ops->serdes_pcs_link_up) {
- lane = mv88e6xxx_serdes_get_lane(chip, port);
- if (lane >= 0)
- return ops->serdes_pcs_link_up(chip, port, lane,
- speed, duplex);
- }
-
- return 0;
-}
-
static const u8 mv88e6185_phy_interface_modes[] = {
[MV88E6185_PORT_STS_CMODE_GMII_FD] = PHY_INTERFACE_MODE_GMII,
[MV88E6185_PORT_STS_CMODE_MII_100_FD_PS] = PHY_INTERFACE_MODE_MII,
@@ -853,6 +778,20 @@ static void mv88e6xxx_get_caps(struct dsa_switch *ds, int port,
}
}
+static struct phylink_pcs *mv88e6xxx_mac_select_pcs(struct dsa_switch *ds,
+ int port,
+ phy_interface_t interface)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct phylink_pcs *pcs = ERR_PTR(-EOPNOTSUPP);
+
+ if (chip->info->ops->pcs_ops)
+ pcs = chip->info->ops->pcs_ops->pcs_select(chip, port,
+ interface);
+
+ return pcs;
+}
+
static int mv88e6xxx_mac_prepare(struct dsa_switch *ds, int port,
unsigned int mode, phy_interface_t interface)
{
@@ -889,16 +828,6 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port,
state->interface);
if (err && err != -EOPNOTSUPP)
goto err_unlock;
-
- err = mv88e6xxx_serdes_pcs_config(chip, port, mode,
- state->interface,
- state->advertising);
- /* FIXME: we should restart negotiation if something changed -
- * which is something we get if we convert to using phylinks
- * PCS operations.
- */
- if (err > 0)
- err = 0;
}
err_unlock:
@@ -982,17 +911,6 @@ static void mv88e6xxx_mac_link_up(struct dsa_switch *ds, int port,
*/
if (!mv88e6xxx_port_ppu_updates(chip, port) ||
mode == MLO_AN_FIXED) {
- /* FIXME: for an automedia port, should we force the link
- * down here - what if the link comes up due to "other" media
- * while we're bringing the port up, how is the exclusivity
- * handled in the Marvell hardware? E.g. port 2 on 88E6390
- * shared between internal PHY and Serdes.
- */
- err = mv88e6xxx_serdes_pcs_link_up(chip, port, mode, speed,
- duplex);
- if (err)
- goto error;
-
if (ops->port_set_speed_duplex) {
err = ops->port_set_speed_duplex(chip, port,
speed, duplex);
@@ -3163,102 +3081,6 @@ static int mv88e6xxx_setup_egress_floods(struct mv88e6xxx_chip *chip, int port)
return 0;
}
-static irqreturn_t mv88e6xxx_serdes_irq_thread_fn(int irq, void *dev_id)
-{
- struct mv88e6xxx_port *mvp = dev_id;
- struct mv88e6xxx_chip *chip = mvp->chip;
- irqreturn_t ret = IRQ_NONE;
- int port = mvp->port;
- int lane;
-
- mv88e6xxx_reg_lock(chip);
- lane = mv88e6xxx_serdes_get_lane(chip, port);
- if (lane >= 0)
- ret = mv88e6xxx_serdes_irq_status(chip, port, lane);
- mv88e6xxx_reg_unlock(chip);
-
- return ret;
-}
-
-static int mv88e6xxx_serdes_irq_request(struct mv88e6xxx_chip *chip, int port,
- int lane)
-{
- struct mv88e6xxx_port *dev_id = &chip->ports[port];
- unsigned int irq;
- int err;
-
- /* Nothing to request if this SERDES port has no IRQ */
- irq = mv88e6xxx_serdes_irq_mapping(chip, port);
- if (!irq)
- return 0;
-
- snprintf(dev_id->serdes_irq_name, sizeof(dev_id->serdes_irq_name),
- "mv88e6xxx-%s-serdes-%d", dev_name(chip->dev), port);
-
- /* Requesting the IRQ will trigger IRQ callbacks, so release the lock */
- mv88e6xxx_reg_unlock(chip);
- err = request_threaded_irq(irq, NULL, mv88e6xxx_serdes_irq_thread_fn,
- IRQF_ONESHOT, dev_id->serdes_irq_name,
- dev_id);
- mv88e6xxx_reg_lock(chip);
- if (err)
- return err;
-
- dev_id->serdes_irq = irq;
-
- return mv88e6xxx_serdes_irq_enable(chip, port, lane);
-}
-
-static int mv88e6xxx_serdes_irq_free(struct mv88e6xxx_chip *chip, int port,
- int lane)
-{
- struct mv88e6xxx_port *dev_id = &chip->ports[port];
- unsigned int irq = dev_id->serdes_irq;
- int err;
-
- /* Nothing to free if no IRQ has been requested */
- if (!irq)
- return 0;
-
- err = mv88e6xxx_serdes_irq_disable(chip, port, lane);
-
- /* Freeing the IRQ will trigger IRQ callbacks, so release the lock */
- mv88e6xxx_reg_unlock(chip);
- free_irq(irq, dev_id);
- mv88e6xxx_reg_lock(chip);
-
- dev_id->serdes_irq = 0;
-
- return err;
-}
-
-static int mv88e6xxx_serdes_power(struct mv88e6xxx_chip *chip, int port,
- bool on)
-{
- int lane;
- int err;
-
- lane = mv88e6xxx_serdes_get_lane(chip, port);
- if (lane < 0)
- return 0;
-
- if (on) {
- err = mv88e6xxx_serdes_power_up(chip, port, lane);
- if (err)
- return err;
-
- err = mv88e6xxx_serdes_irq_request(chip, port, lane);
- } else {
- err = mv88e6xxx_serdes_irq_free(chip, port, lane);
- if (err)
- return err;
-
- err = mv88e6xxx_serdes_power_down(chip, port, lane);
- }
-
- return err;
-}
-
static int mv88e6xxx_set_egress_port(struct mv88e6xxx_chip *chip,
enum mv88e6xxx_egress_direction direction,
int port)
@@ -3322,56 +3144,17 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
{
struct device_node *phy_handle = NULL;
struct dsa_switch *ds = chip->ds;
- phy_interface_t mode;
struct dsa_port *dp;
- int tx_amp, speed;
+ int tx_amp;
int err;
u16 reg;
chip->ports[port].chip = chip;
chip->ports[port].port = port;
- dp = dsa_to_port(ds, port);
-
- /* MAC Forcing register: don't force link, speed, duplex or flow control
- * state to any particular values on physical ports, but force the CPU
- * port and all DSA ports to their maximum bandwidth and full duplex.
- */
- if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
- struct phylink_config pl_config = {};
- unsigned long caps;
-
- chip->info->ops->phylink_get_caps(chip, port, &pl_config);
-
- caps = pl_config.mac_capabilities;
-
- if (chip->info->ops->port_max_speed_mode)
- mode = chip->info->ops->port_max_speed_mode(chip, port);
- else
- mode = PHY_INTERFACE_MODE_NA;
-
- if (caps & MAC_10000FD)
- speed = SPEED_10000;
- else if (caps & MAC_5000FD)
- speed = SPEED_5000;
- else if (caps & MAC_2500FD)
- speed = SPEED_2500;
- else if (caps & MAC_1000)
- speed = SPEED_1000;
- else if (caps & MAC_100)
- speed = SPEED_100;
- else
- speed = SPEED_10;
-
- err = mv88e6xxx_port_setup_mac(chip, port, LINK_FORCED_UP,
- speed, DUPLEX_FULL,
- PAUSE_OFF, mode);
- } else {
- err = mv88e6xxx_port_setup_mac(chip, port, LINK_UNFORCED,
- SPEED_UNFORCED, DUPLEX_UNFORCED,
- PAUSE_ON,
- PHY_INTERFACE_MODE_NA);
- }
+ err = mv88e6xxx_port_setup_mac(chip, port, LINK_UNFORCED,
+ SPEED_UNFORCED, DUPLEX_UNFORCED,
+ PAUSE_ON, PHY_INTERFACE_MODE_NA);
if (err)
return err;
@@ -3548,6 +3331,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
}
if (chip->info->ops->serdes_set_tx_amplitude) {
+ dp = dsa_to_port(ds, port);
if (dp)
phy_handle = of_parse_phandle(dp->dn, "phy-handle", 0);
@@ -3621,29 +3405,6 @@ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
return ret;
}
-static int mv88e6xxx_port_enable(struct dsa_switch *ds, int port,
- struct phy_device *phydev)
-{
- struct mv88e6xxx_chip *chip = ds->priv;
- int err;
-
- mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_serdes_power(chip, port, true);
- mv88e6xxx_reg_unlock(chip);
-
- return err;
-}
-
-static void mv88e6xxx_port_disable(struct dsa_switch *ds, int port)
-{
- struct mv88e6xxx_chip *chip = ds->priv;
-
- mv88e6xxx_reg_lock(chip);
- if (mv88e6xxx_serdes_power(chip, port, false))
- dev_err(chip->dev, "failed to power off SERDES\n");
- mv88e6xxx_reg_unlock(chip);
-}
-
static int mv88e6xxx_set_ageing_time(struct dsa_switch *ds,
unsigned int ageing_time)
{
@@ -4106,12 +3867,26 @@ out_mdios:
static int mv88e6xxx_port_setup(struct dsa_switch *ds, int port)
{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ if (chip->info->ops->pcs_ops->pcs_init) {
+ err = chip->info->ops->pcs_ops->pcs_init(chip, port);
+ if (err)
+ return err;
+ }
+
return mv88e6xxx_setup_devlink_regions_port(ds, port);
}
static void mv88e6xxx_port_teardown(struct dsa_switch *ds, int port)
{
+ struct mv88e6xxx_chip *chip = ds->priv;
+
mv88e6xxx_teardown_devlink_regions_port(ds, port);
+
+ if (chip->info->ops->pcs_ops->pcs_teardown)
+ chip->info->ops->pcs_ops->pcs_teardown(chip, port);
}
static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds)
@@ -4228,15 +4003,13 @@ static const struct mv88e6xxx_ops mv88e6095_ops = {
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
.mgmt_rsvd2cpu = mv88e6185_g2_mgmt_rsvd2cpu,
- .serdes_power = mv88e6185_serdes_power,
- .serdes_get_lane = mv88e6185_serdes_get_lane,
- .serdes_pcs_get_state = mv88e6185_serdes_pcs_get_state,
.ppu_enable = mv88e6185_g1_ppu_enable,
.ppu_disable = mv88e6185_g1_ppu_disable,
.reset = mv88e6185_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
.phylink_get_caps = mv88e6095_phylink_get_caps,
+ .pcs_ops = &mv88e6185_pcs_ops,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
@@ -4274,18 +4047,14 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
- .serdes_power = mv88e6185_serdes_power,
- .serdes_get_lane = mv88e6185_serdes_get_lane,
- .serdes_pcs_get_state = mv88e6185_serdes_pcs_get_state,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
- .serdes_irq_enable = mv88e6097_serdes_irq_enable,
- .serdes_irq_status = mv88e6097_serdes_irq_status,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6085_g1_rmu_disable,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.phylink_get_caps = mv88e6095_phylink_get_caps,
+ .pcs_ops = &mv88e6185_pcs_ops,
.stu_getnext = mv88e6352_g1_stu_getnext,
.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
@@ -4421,16 +4190,8 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.stu_getnext = mv88e6352_g1_stu_getnext,
.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
- .serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6341_serdes_get_lane,
- /* Check status register pause & lpa register */
- .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
- .serdes_pcs_config = mv88e6390_serdes_pcs_config,
- .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
- .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
- .serdes_irq_enable = mv88e6390_serdes_irq_enable,
- .serdes_irq_status = mv88e6390_serdes_irq_status,
.gpio_ops = &mv88e6352_gpio_ops,
.serdes_get_sset_count = mv88e6390_serdes_get_sset_count,
.serdes_get_strings = mv88e6390_serdes_get_strings,
@@ -4438,6 +4199,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
.phylink_get_caps = mv88e6341_phylink_get_caps,
+ .pcs_ops = &mv88e6390_pcs_ops,
};
static const struct mv88e6xxx_ops mv88e6161_ops = {
@@ -4618,16 +4380,11 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.stu_getnext = mv88e6352_g1_stu_getnext,
.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
- .serdes_get_lane = mv88e6352_serdes_get_lane,
- .serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
- .serdes_pcs_config = mv88e6352_serdes_pcs_config,
- .serdes_pcs_an_restart = mv88e6352_serdes_pcs_an_restart,
- .serdes_pcs_link_up = mv88e6352_serdes_pcs_link_up,
- .serdes_power = mv88e6352_serdes_power,
.serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
.serdes_get_regs = mv88e6352_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
.phylink_get_caps = mv88e6352_phylink_get_caps,
+ .pcs_ops = &mv88e6352_pcs_ops,
};
static const struct mv88e6xxx_ops mv88e6175_ops = {
@@ -4723,20 +4480,13 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.stu_getnext = mv88e6352_g1_stu_getnext,
.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
- .serdes_get_lane = mv88e6352_serdes_get_lane,
- .serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
- .serdes_pcs_config = mv88e6352_serdes_pcs_config,
- .serdes_pcs_an_restart = mv88e6352_serdes_pcs_an_restart,
- .serdes_pcs_link_up = mv88e6352_serdes_pcs_link_up,
- .serdes_power = mv88e6352_serdes_power,
.serdes_irq_mapping = mv88e6352_serdes_irq_mapping,
- .serdes_irq_enable = mv88e6352_serdes_irq_enable,
- .serdes_irq_status = mv88e6352_serdes_irq_status,
.serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
.serdes_get_regs = mv88e6352_serdes_get_regs,
.serdes_set_tx_amplitude = mv88e6352_serdes_set_tx_amplitude,
.gpio_ops = &mv88e6352_gpio_ops,
.phylink_get_caps = mv88e6352_phylink_get_caps,
+ .pcs_ops = &mv88e6352_pcs_ops,
};
static const struct mv88e6xxx_ops mv88e6185_ops = {
@@ -4766,9 +4516,6 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6185_g2_mgmt_rsvd2cpu,
- .serdes_power = mv88e6185_serdes_power,
- .serdes_get_lane = mv88e6185_serdes_get_lane,
- .serdes_pcs_get_state = mv88e6185_serdes_pcs_get_state,
.set_cascade_port = mv88e6185_g1_set_cascade_port,
.ppu_enable = mv88e6185_g1_ppu_enable,
.ppu_disable = mv88e6185_g1_ppu_disable,
@@ -4776,6 +4523,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
.phylink_get_caps = mv88e6185_phylink_get_caps,
+ .pcs_ops = &mv88e6185_pcs_ops,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
@@ -4826,22 +4574,15 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.stu_getnext = mv88e6390_g1_stu_getnext,
.stu_loadpurge = mv88e6390_g1_stu_loadpurge,
- .serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390_serdes_get_lane,
- /* Check status register pause & lpa register */
- .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
- .serdes_pcs_config = mv88e6390_serdes_pcs_config,
- .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
- .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
- .serdes_irq_enable = mv88e6390_serdes_irq_enable,
- .serdes_irq_status = mv88e6390_serdes_irq_status,
.serdes_get_strings = mv88e6390_serdes_get_strings,
.serdes_get_stats = mv88e6390_serdes_get_stats,
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
.phylink_get_caps = mv88e6390_phylink_get_caps,
+ .pcs_ops = &mv88e6390_pcs_ops,
};
static const struct mv88e6xxx_ops mv88e6190x_ops = {
@@ -4891,22 +4632,15 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.stu_getnext = mv88e6390_g1_stu_getnext,
.stu_loadpurge = mv88e6390_g1_stu_loadpurge,
- .serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390x_serdes_get_lane,
- /* Check status register pause & lpa register */
- .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
- .serdes_pcs_config = mv88e6390_serdes_pcs_config,
- .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
- .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
- .serdes_irq_enable = mv88e6390_serdes_irq_enable,
- .serdes_irq_status = mv88e6390_serdes_irq_status,
.serdes_get_strings = mv88e6390_serdes_get_strings,
.serdes_get_stats = mv88e6390_serdes_get_stats,
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
.phylink_get_caps = mv88e6390x_phylink_get_caps,
+ .pcs_ops = &mv88e6390_pcs_ops,
};
static const struct mv88e6xxx_ops mv88e6191_ops = {
@@ -4954,16 +4688,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.stu_getnext = mv88e6390_g1_stu_getnext,
.stu_loadpurge = mv88e6390_g1_stu_loadpurge,
- .serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390_serdes_get_lane,
- /* Check status register pause & lpa register */
- .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
- .serdes_pcs_config = mv88e6390_serdes_pcs_config,
- .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
- .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
- .serdes_irq_enable = mv88e6390_serdes_irq_enable,
- .serdes_irq_status = mv88e6390_serdes_irq_status,
.serdes_get_strings = mv88e6390_serdes_get_strings,
.serdes_get_stats = mv88e6390_serdes_get_stats,
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
@@ -4971,6 +4697,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
.phylink_get_caps = mv88e6390_phylink_get_caps,
+ .pcs_ops = &mv88e6390_pcs_ops,
};
static const struct mv88e6xxx_ops mv88e6240_ops = {
@@ -5020,15 +4747,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.stu_getnext = mv88e6352_g1_stu_getnext,
.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
- .serdes_get_lane = mv88e6352_serdes_get_lane,
- .serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
- .serdes_pcs_config = mv88e6352_serdes_pcs_config,
- .serdes_pcs_an_restart = mv88e6352_serdes_pcs_an_restart,
- .serdes_pcs_link_up = mv88e6352_serdes_pcs_link_up,
- .serdes_power = mv88e6352_serdes_power,
.serdes_irq_mapping = mv88e6352_serdes_irq_mapping,
- .serdes_irq_enable = mv88e6352_serdes_irq_enable,
- .serdes_irq_status = mv88e6352_serdes_irq_status,
.serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
.serdes_get_regs = mv88e6352_serdes_get_regs,
.serdes_set_tx_amplitude = mv88e6352_serdes_set_tx_amplitude,
@@ -5036,6 +4755,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
.phylink_get_caps = mv88e6352_phylink_get_caps,
+ .pcs_ops = &mv88e6352_pcs_ops,
};
static const struct mv88e6xxx_ops mv88e6250_ops = {
@@ -5127,16 +4847,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.stu_getnext = mv88e6390_g1_stu_getnext,
.stu_loadpurge = mv88e6390_g1_stu_loadpurge,
- .serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390_serdes_get_lane,
- /* Check status register pause & lpa register */
- .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
- .serdes_pcs_config = mv88e6390_serdes_pcs_config,
- .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
- .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
- .serdes_irq_enable = mv88e6390_serdes_irq_enable,
- .serdes_irq_status = mv88e6390_serdes_irq_status,
.serdes_get_strings = mv88e6390_serdes_get_strings,
.serdes_get_stats = mv88e6390_serdes_get_stats,
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
@@ -5145,6 +4857,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6390_ptp_ops,
.phylink_get_caps = mv88e6390_phylink_get_caps,
+ .pcs_ops = &mv88e6390_pcs_ops,
};
static const struct mv88e6xxx_ops mv88e6320_ops = {
@@ -5289,16 +5002,8 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.stu_getnext = mv88e6352_g1_stu_getnext,
.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
- .serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6341_serdes_get_lane,
- /* Check status register pause & lpa register */
- .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
- .serdes_pcs_config = mv88e6390_serdes_pcs_config,
- .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
- .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
- .serdes_irq_enable = mv88e6390_serdes_irq_enable,
- .serdes_irq_status = mv88e6390_serdes_irq_status,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
@@ -5308,6 +5013,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
.phylink_get_caps = mv88e6341_phylink_get_caps,
+ .pcs_ops = &mv88e6390_pcs_ops,
};
static const struct mv88e6xxx_ops mv88e6350_ops = {
@@ -5451,15 +5157,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.stu_getnext = mv88e6352_g1_stu_getnext,
.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
- .serdes_get_lane = mv88e6352_serdes_get_lane,
- .serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
- .serdes_pcs_config = mv88e6352_serdes_pcs_config,
- .serdes_pcs_an_restart = mv88e6352_serdes_pcs_an_restart,
- .serdes_pcs_link_up = mv88e6352_serdes_pcs_link_up,
- .serdes_power = mv88e6352_serdes_power,
.serdes_irq_mapping = mv88e6352_serdes_irq_mapping,
- .serdes_irq_enable = mv88e6352_serdes_irq_enable,
- .serdes_irq_status = mv88e6352_serdes_irq_status,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
@@ -5470,6 +5168,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.serdes_get_regs = mv88e6352_serdes_get_regs,
.serdes_set_tx_amplitude = mv88e6352_serdes_set_tx_amplitude,
.phylink_get_caps = mv88e6352_phylink_get_caps,
+ .pcs_ops = &mv88e6352_pcs_ops,
};
static const struct mv88e6xxx_ops mv88e6390_ops = {
@@ -5520,16 +5219,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.stu_getnext = mv88e6390_g1_stu_getnext,
.stu_loadpurge = mv88e6390_g1_stu_loadpurge,
- .serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390_serdes_get_lane,
- /* Check status register pause & lpa register */
- .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
- .serdes_pcs_config = mv88e6390_serdes_pcs_config,
- .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
- .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
- .serdes_irq_enable = mv88e6390_serdes_irq_enable,
- .serdes_irq_status = mv88e6390_serdes_irq_status,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6390_ptp_ops,
@@ -5539,6 +5230,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
.phylink_get_caps = mv88e6390_phylink_get_caps,
+ .pcs_ops = &mv88e6390_pcs_ops,
};
static const struct mv88e6xxx_ops mv88e6390x_ops = {
@@ -5589,15 +5281,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.stu_getnext = mv88e6390_g1_stu_getnext,
.stu_loadpurge = mv88e6390_g1_stu_loadpurge,
- .serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390x_serdes_get_lane,
- .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
- .serdes_pcs_config = mv88e6390_serdes_pcs_config,
- .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
- .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
- .serdes_irq_enable = mv88e6390_serdes_irq_enable,
- .serdes_irq_status = mv88e6390_serdes_irq_status,
.serdes_get_sset_count = mv88e6390_serdes_get_sset_count,
.serdes_get_strings = mv88e6390_serdes_get_strings,
.serdes_get_stats = mv88e6390_serdes_get_stats,
@@ -5607,11 +5292,11 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6390_ptp_ops,
.phylink_get_caps = mv88e6390x_phylink_get_caps,
+ .pcs_ops = &mv88e6390_pcs_ops,
};
static const struct mv88e6xxx_ops mv88e6393x_ops = {
/* MV88E6XXX_FAMILY_6393 */
- .setup_errata = mv88e6393x_serdes_setup_errata,
.irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
@@ -5661,20 +5346,14 @@ static const struct mv88e6xxx_ops mv88e6393x_ops = {
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.stu_getnext = mv88e6390_g1_stu_getnext,
.stu_loadpurge = mv88e6390_g1_stu_loadpurge,
- .serdes_power = mv88e6393x_serdes_power,
.serdes_get_lane = mv88e6393x_serdes_get_lane,
- .serdes_pcs_get_state = mv88e6393x_serdes_pcs_get_state,
- .serdes_pcs_config = mv88e6390_serdes_pcs_config,
- .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
- .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
- .serdes_irq_enable = mv88e6393x_serdes_irq_enable,
- .serdes_irq_status = mv88e6393x_serdes_irq_status,
/* TODO: serdes stats */
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
.phylink_get_caps = mv88e6393x_phylink_get_caps,
+ .pcs_ops = &mv88e6393x_pcs_ops,
};
static const struct mv88e6xxx_info mv88e6xxx_table[] = {
@@ -7106,18 +6785,15 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.port_setup = mv88e6xxx_port_setup,
.port_teardown = mv88e6xxx_port_teardown,
.phylink_get_caps = mv88e6xxx_get_caps,
- .phylink_mac_link_state = mv88e6xxx_serdes_pcs_get_state,
+ .phylink_mac_select_pcs = mv88e6xxx_mac_select_pcs,
.phylink_mac_prepare = mv88e6xxx_mac_prepare,
.phylink_mac_config = mv88e6xxx_mac_config,
.phylink_mac_finish = mv88e6xxx_mac_finish,
- .phylink_mac_an_restart = mv88e6xxx_serdes_pcs_an_restart,
.phylink_mac_link_down = mv88e6xxx_mac_link_down,
.phylink_mac_link_up = mv88e6xxx_mac_link_up,
.get_strings = mv88e6xxx_get_strings,
.get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
.get_sset_count = mv88e6xxx_get_sset_count,
- .port_enable = mv88e6xxx_port_enable,
- .port_disable = mv88e6xxx_port_disable,
.port_max_mtu = mv88e6xxx_get_max_mtu,
.port_change_mtu = mv88e6xxx_change_mtu,
.get_mac_eee = mv88e6xxx_get_mac_eee,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 0ad34b2d8913..44383a03ef2f 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -205,6 +205,7 @@ struct mv88e6xxx_irq_ops;
struct mv88e6xxx_gpio_ops;
struct mv88e6xxx_avb_ops;
struct mv88e6xxx_ptp_ops;
+struct mv88e6xxx_pcs_ops;
struct mv88e6xxx_irq {
u16 masked;
@@ -285,9 +286,8 @@ struct mv88e6xxx_port {
u8 cmode;
bool mirror_ingress;
bool mirror_egress;
- unsigned int serdes_irq;
- char serdes_irq_name[64];
struct devlink_region *region;
+ void *pcs_private;
/* MacAuth Bypass control flag */
bool mab;
@@ -590,31 +590,12 @@ struct mv88e6xxx_ops {
int (*mgmt_rsvd2cpu)(struct mv88e6xxx_chip *chip);
- /* Power on/off a SERDES interface */
- int (*serdes_power)(struct mv88e6xxx_chip *chip, int port, int lane,
- bool up);
-
/* SERDES lane mapping */
int (*serdes_get_lane)(struct mv88e6xxx_chip *chip, int port);
- int (*serdes_pcs_get_state)(struct mv88e6xxx_chip *chip, int port,
- int lane, struct phylink_link_state *state);
- int (*serdes_pcs_config)(struct mv88e6xxx_chip *chip, int port,
- int lane, unsigned int mode,
- phy_interface_t interface,
- const unsigned long *advertise);
- int (*serdes_pcs_an_restart)(struct mv88e6xxx_chip *chip, int port,
- int lane);
- int (*serdes_pcs_link_up)(struct mv88e6xxx_chip *chip, int port,
- int lane, int speed, int duplex);
-
/* SERDES interrupt handling */
unsigned int (*serdes_irq_mapping)(struct mv88e6xxx_chip *chip,
int port);
- int (*serdes_irq_enable)(struct mv88e6xxx_chip *chip, int port, int lane,
- bool enable);
- irqreturn_t (*serdes_irq_status)(struct mv88e6xxx_chip *chip, int port,
- int lane);
/* Statistics from the SERDES interface */
int (*serdes_get_sset_count)(struct mv88e6xxx_chip *chip, int port);
@@ -664,6 +645,8 @@ struct mv88e6xxx_ops {
void (*phylink_get_caps)(struct mv88e6xxx_chip *chip, int port,
struct phylink_config *config);
+ const struct mv88e6xxx_pcs_ops *pcs_ops;
+
/* Max Frame Size */
int (*set_max_frame_size)(struct mv88e6xxx_chip *chip, int mtu);
};
@@ -736,6 +719,14 @@ struct mv88e6xxx_ptp_ops {
u32 cc_mult_dem;
};
+struct mv88e6xxx_pcs_ops {
+ int (*pcs_init)(struct mv88e6xxx_chip *chip, int port);
+ void (*pcs_teardown)(struct mv88e6xxx_chip *chip, int port);
+ struct phylink_pcs *(*pcs_select)(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode);
+
+};
+
#define STATS_TYPE_PORT BIT(0)
#define STATS_TYPE_BANK0 BIT(1)
#define STATS_TYPE_BANK1 BIT(2)
diff --git a/drivers/net/dsa/mv88e6xxx/pcs-6185.c b/drivers/net/dsa/mv88e6xxx/pcs-6185.c
new file mode 100644
index 000000000000..4d677f836807
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/pcs-6185.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Marvell 88E6185 family SERDES PCS support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2017 Andrew Lunn <[email protected]>
+ */
+#include <linux/phylink.h>
+
+#include "global2.h"
+#include "port.h"
+#include "serdes.h"
+
+struct mv88e6185_pcs {
+ struct phylink_pcs phylink_pcs;
+ unsigned int irq;
+ char name[64];
+
+ struct mv88e6xxx_chip *chip;
+ int port;
+};
+
+static struct mv88e6185_pcs *pcs_to_mv88e6185_pcs(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct mv88e6185_pcs, phylink_pcs);
+}
+
+static irqreturn_t mv88e6185_pcs_handle_irq(int irq, void *dev_id)
+{
+ struct mv88e6185_pcs *mpcs = dev_id;
+ struct mv88e6xxx_chip *chip;
+ irqreturn_t ret = IRQ_NONE;
+ bool link_up;
+ u16 status;
+ int port;
+ int err;
+
+ chip = mpcs->chip;
+ port = mpcs->port;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &status);
+ mv88e6xxx_reg_unlock(chip);
+
+ if (!err) {
+ link_up = !!(status & MV88E6XXX_PORT_STS_LINK);
+
+ phylink_pcs_change(&mpcs->phylink_pcs, link_up);
+
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static void mv88e6185_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct mv88e6185_pcs *mpcs = pcs_to_mv88e6185_pcs(pcs);
+ struct mv88e6xxx_chip *chip = mpcs->chip;
+ int port = mpcs->port;
+ u16 status;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &status);
+ mv88e6xxx_reg_unlock(chip);
+
+ if (err)
+ status = 0;
+
+ state->link = !!(status & MV88E6XXX_PORT_STS_LINK);
+ if (state->link) {
+ state->duplex = status & MV88E6XXX_PORT_STS_DUPLEX ?
+ DUPLEX_FULL : DUPLEX_HALF;
+
+ switch (status & MV88E6XXX_PORT_STS_SPEED_MASK) {
+ case MV88E6XXX_PORT_STS_SPEED_1000:
+ state->speed = SPEED_1000;
+ break;
+
+ case MV88E6XXX_PORT_STS_SPEED_100:
+ state->speed = SPEED_100;
+ break;
+
+ case MV88E6XXX_PORT_STS_SPEED_10:
+ state->speed = SPEED_10;
+ break;
+
+ default:
+ state->link = false;
+ break;
+ }
+ }
+}
+
+static int mv88e6185_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ return 0;
+}
+
+static void mv88e6185_pcs_an_restart(struct phylink_pcs *pcs)
+{
+}
+
+static const struct phylink_pcs_ops mv88e6185_phylink_pcs_ops = {
+ .pcs_get_state = mv88e6185_pcs_get_state,
+ .pcs_config = mv88e6185_pcs_config,
+ .pcs_an_restart = mv88e6185_pcs_an_restart,
+};
+
+static int mv88e6185_pcs_init(struct mv88e6xxx_chip *chip, int port)
+{
+ struct mv88e6185_pcs *mpcs;
+ struct device *dev;
+ unsigned int irq;
+ int err;
+
+ /* There are no configurable serdes lanes on this switch chip, so
+ * we use the static cmode configuration to determine whether we
+ * have a PCS or not.
+ */
+ if (chip->ports[port].cmode != MV88E6185_PORT_STS_CMODE_SERDES &&
+ chip->ports[port].cmode != MV88E6185_PORT_STS_CMODE_1000BASE_X)
+ return 0;
+
+ dev = chip->dev;
+
+ mpcs = kzalloc(sizeof(*mpcs), GFP_KERNEL);
+ if (!mpcs)
+ return -ENOMEM;
+
+ mpcs->chip = chip;
+ mpcs->port = port;
+ mpcs->phylink_pcs.ops = &mv88e6185_phylink_pcs_ops;
+
+ irq = mv88e6xxx_serdes_irq_mapping(chip, port);
+ if (irq) {
+ snprintf(mpcs->name, sizeof(mpcs->name),
+ "mv88e6xxx-%s-serdes-%d", dev_name(dev), port);
+
+ err = request_threaded_irq(irq, NULL, mv88e6185_pcs_handle_irq,
+ IRQF_ONESHOT, mpcs->name, mpcs);
+ if (err) {
+ kfree(mpcs);
+ return err;
+ }
+
+ mpcs->irq = irq;
+ } else {
+ mpcs->phylink_pcs.poll = true;
+ }
+
+ chip->ports[port].pcs_private = &mpcs->phylink_pcs;
+
+ return 0;
+}
+
+static void mv88e6185_pcs_teardown(struct mv88e6xxx_chip *chip, int port)
+{
+ struct mv88e6185_pcs *mpcs;
+
+ mpcs = chip->ports[port].pcs_private;
+ if (!mpcs)
+ return;
+
+ if (mpcs->irq)
+ free_irq(mpcs->irq, mpcs);
+
+ kfree(mpcs);
+
+ chip->ports[port].pcs_private = NULL;
+}
+
+static struct phylink_pcs *mv88e6185_pcs_select(struct mv88e6xxx_chip *chip,
+ int port,
+ phy_interface_t interface)
+{
+ return chip->ports[port].pcs_private;
+}
+
+const struct mv88e6xxx_pcs_ops mv88e6185_pcs_ops = {
+ .pcs_init = mv88e6185_pcs_init,
+ .pcs_teardown = mv88e6185_pcs_teardown,
+ .pcs_select = mv88e6185_pcs_select,
+};
diff --git a/drivers/net/dsa/mv88e6xxx/pcs-6352.c b/drivers/net/dsa/mv88e6xxx/pcs-6352.c
new file mode 100644
index 000000000000..88f624b65470
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/pcs-6352.c
@@ -0,0 +1,390 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Marvell 88E6352 family SERDES PCS support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2017 Andrew Lunn <[email protected]>
+ */
+#include <linux/phylink.h>
+
+#include "global2.h"
+#include "port.h"
+#include "serdes.h"
+
+/* Definitions from drivers/net/phy/marvell.c, which would be good to reuse. */
+#define MII_M1011_PHY_STATUS 17
+#define MII_M1011_IMASK 18
+#define MII_M1011_IMASK_LINK_CHANGE BIT(10)
+#define MII_M1011_IEVENT 19
+#define MII_M1011_IEVENT_LINK_CHANGE BIT(10)
+#define MII_MARVELL_PHY_PAGE 22
+#define MII_MARVELL_FIBER_PAGE 1
+
+struct marvell_c22_pcs {
+ struct mdio_device mdio;
+ struct phylink_pcs phylink_pcs;
+ unsigned int irq;
+ char name[64];
+ bool (*link_check)(struct marvell_c22_pcs *mpcs);
+ struct mv88e6xxx_port *port;
+};
+
+static struct marvell_c22_pcs *pcs_to_marvell_c22_pcs(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct marvell_c22_pcs, phylink_pcs);
+}
+
+static int marvell_c22_pcs_set_fiber_page(struct marvell_c22_pcs *mpcs)
+{
+ u16 page;
+ int err;
+
+ mutex_lock(&mpcs->mdio.bus->mdio_lock);
+
+ err = __mdiodev_read(&mpcs->mdio, MII_MARVELL_PHY_PAGE);
+ if (err < 0) {
+ dev_err(mpcs->mdio.dev.parent,
+ "%s: can't read Serdes page register: %pe\n",
+ mpcs->name, ERR_PTR(err));
+ return err;
+ }
+
+ page = err;
+
+ err = __mdiodev_write(&mpcs->mdio, MII_MARVELL_PHY_PAGE,
+ MII_MARVELL_FIBER_PAGE);
+ if (err) {
+ dev_err(mpcs->mdio.dev.parent,
+ "%s: can't set Serdes page register: %pe\n",
+ mpcs->name, ERR_PTR(err));
+ return err;
+ }
+
+ return page;
+}
+
+static int marvell_c22_pcs_restore_page(struct marvell_c22_pcs *mpcs,
+ int oldpage, int ret)
+{
+ int err;
+
+ if (oldpage >= 0) {
+ err = __mdiodev_write(&mpcs->mdio, MII_MARVELL_PHY_PAGE,
+ oldpage);
+ if (err)
+ dev_err(mpcs->mdio.dev.parent,
+ "%s: can't restore Serdes page register: %pe\n",
+ mpcs->name, ERR_PTR(err));
+ if (!err || ret < 0)
+ err = ret;
+ } else {
+ err = oldpage;
+ }
+ mutex_unlock(&mpcs->mdio.bus->mdio_lock);
+
+ return err;
+}
+
+static irqreturn_t marvell_c22_pcs_handle_irq(int irq, void *dev_id)
+{
+ struct marvell_c22_pcs *mpcs = dev_id;
+ irqreturn_t status = IRQ_NONE;
+ int err, oldpage;
+
+ oldpage = marvell_c22_pcs_set_fiber_page(mpcs);
+ if (oldpage < 0)
+ goto fail;
+
+ err = __mdiodev_read(&mpcs->mdio, MII_M1011_IEVENT);
+ if (err >= 0 && err & MII_M1011_IEVENT_LINK_CHANGE) {
+ phylink_pcs_change(&mpcs->phylink_pcs, true);
+ status = IRQ_HANDLED;
+ }
+
+fail:
+ marvell_c22_pcs_restore_page(mpcs, oldpage, 0);
+
+ return status;
+}
+
+static int marvell_c22_pcs_modify(struct marvell_c22_pcs *mpcs, u8 reg,
+ u16 mask, u16 val)
+{
+ int oldpage, err = 0;
+
+ oldpage = marvell_c22_pcs_set_fiber_page(mpcs);
+ if (oldpage >= 0)
+ err = __mdiodev_modify(&mpcs->mdio, reg, mask, val);
+
+ return marvell_c22_pcs_restore_page(mpcs, oldpage, err);
+}
+
+static int marvell_c22_pcs_power(struct marvell_c22_pcs *mpcs,
+ bool on)
+{
+ u16 val = on ? 0 : BMCR_PDOWN;
+
+ return marvell_c22_pcs_modify(mpcs, MII_BMCR, BMCR_PDOWN, val);
+}
+
+static int marvell_c22_pcs_control_irq(struct marvell_c22_pcs *mpcs,
+ bool enable)
+{
+ u16 val = enable ? MII_M1011_IMASK_LINK_CHANGE : 0;
+
+ return marvell_c22_pcs_modify(mpcs, MII_M1011_IMASK,
+ MII_M1011_IMASK_LINK_CHANGE, val);
+}
+
+static int marvell_c22_pcs_enable(struct phylink_pcs *pcs)
+{
+ struct marvell_c22_pcs *mpcs = pcs_to_marvell_c22_pcs(pcs);
+ int err;
+
+ err = marvell_c22_pcs_power(mpcs, true);
+ if (err)
+ return err;
+
+ return marvell_c22_pcs_control_irq(mpcs, !!mpcs->irq);
+}
+
+static void marvell_c22_pcs_disable(struct phylink_pcs *pcs)
+{
+ struct marvell_c22_pcs *mpcs = pcs_to_marvell_c22_pcs(pcs);
+
+ marvell_c22_pcs_control_irq(mpcs, false);
+ marvell_c22_pcs_power(mpcs, false);
+}
+
+static void marvell_c22_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct marvell_c22_pcs *mpcs = pcs_to_marvell_c22_pcs(pcs);
+ int oldpage, bmsr, lpa, status;
+
+ state->link = false;
+
+ if (mpcs->link_check && !mpcs->link_check(mpcs))
+ return;
+
+ oldpage = marvell_c22_pcs_set_fiber_page(mpcs);
+ if (oldpage >= 0) {
+ bmsr = __mdiodev_read(&mpcs->mdio, MII_BMSR);
+ lpa = __mdiodev_read(&mpcs->mdio, MII_LPA);
+ status = __mdiodev_read(&mpcs->mdio, MII_M1011_PHY_STATUS);
+ }
+
+ if (marvell_c22_pcs_restore_page(mpcs, oldpage, 0) >= 0 &&
+ bmsr >= 0 && lpa >= 0 && status >= 0)
+ mv88e6xxx_pcs_decode_state(mpcs->mdio.dev.parent, bmsr, lpa,
+ status, state);
+}
+
+static int marvell_c22_pcs_config(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct marvell_c22_pcs *mpcs = pcs_to_marvell_c22_pcs(pcs);
+ int oldpage, adv, err, ret = 0;
+ u16 bmcr;
+
+ adv = phylink_mii_c22_pcs_encode_advertisement(interface, advertising);
+ if (adv < 0)
+ return 0;
+
+ bmcr = neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED ? BMCR_ANENABLE : 0;
+
+ oldpage = marvell_c22_pcs_set_fiber_page(mpcs);
+ if (oldpage < 0)
+ goto restore;
+
+ err = __mdiodev_modify_changed(&mpcs->mdio, MII_ADVERTISE, 0xffff, adv);
+ ret = err;
+ if (err < 0)
+ goto restore;
+
+ err = __mdiodev_modify_changed(&mpcs->mdio, MII_BMCR, BMCR_ANENABLE,
+ bmcr);
+ if (err < 0) {
+ ret = err;
+ goto restore;
+ }
+
+ /* If the ANENABLE bit was changed, the PHY will restart negotiation,
+ * so we don't need to flag a change to trigger its own restart.
+ */
+ if (err)
+ ret = 0;
+
+restore:
+ return marvell_c22_pcs_restore_page(mpcs, oldpage, ret);
+}
+
+static void marvell_c22_pcs_an_restart(struct phylink_pcs *pcs)
+{
+ struct marvell_c22_pcs *mpcs = pcs_to_marvell_c22_pcs(pcs);
+
+ marvell_c22_pcs_modify(mpcs, MII_BMCR, BMCR_ANRESTART, BMCR_ANRESTART);
+}
+
+static void marvell_c22_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface, int speed,
+ int duplex)
+{
+ struct marvell_c22_pcs *mpcs = pcs_to_marvell_c22_pcs(pcs);
+ u16 bmcr;
+ int err;
+
+ if (phylink_autoneg_inband(mode))
+ return;
+
+ bmcr = mii_bmcr_encode_fixed(speed, duplex);
+
+ err = marvell_c22_pcs_modify(mpcs, MII_BMCR, BMCR_SPEED100 |
+ BMCR_FULLDPLX | BMCR_SPEED1000, bmcr);
+ if (err)
+ dev_err(mpcs->mdio.dev.parent,
+ "%s: failed to configure mpcs: %pe\n", mpcs->name,
+ ERR_PTR(err));
+}
+
+static const struct phylink_pcs_ops marvell_c22_pcs_ops = {
+ .pcs_enable = marvell_c22_pcs_enable,
+ .pcs_disable = marvell_c22_pcs_disable,
+ .pcs_get_state = marvell_c22_pcs_get_state,
+ .pcs_config = marvell_c22_pcs_config,
+ .pcs_an_restart = marvell_c22_pcs_an_restart,
+ .pcs_link_up = marvell_c22_pcs_link_up,
+};
+
+static struct marvell_c22_pcs *marvell_c22_pcs_alloc(struct device *dev,
+ struct mii_bus *bus,
+ unsigned int addr)
+{
+ struct marvell_c22_pcs *mpcs;
+
+ mpcs = kzalloc(sizeof(*mpcs), GFP_KERNEL);
+ if (!mpcs)
+ return NULL;
+
+ mpcs->mdio.dev.parent = dev;
+ mpcs->mdio.bus = bus;
+ mpcs->mdio.addr = addr;
+ mpcs->phylink_pcs.ops = &marvell_c22_pcs_ops;
+ mpcs->phylink_pcs.neg_mode = true;
+
+ return mpcs;
+}
+
+static int marvell_c22_pcs_setup_irq(struct marvell_c22_pcs *mpcs,
+ unsigned int irq)
+{
+ int err;
+
+ mpcs->phylink_pcs.poll = !irq;
+ mpcs->irq = irq;
+
+ if (irq) {
+ err = request_threaded_irq(irq, NULL,
+ marvell_c22_pcs_handle_irq,
+ IRQF_ONESHOT, mpcs->name, mpcs);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* mv88e6352 specifics */
+
+static bool mv88e6352_pcs_link_check(struct marvell_c22_pcs *mpcs)
+{
+ struct mv88e6xxx_port *port = mpcs->port;
+ struct mv88e6xxx_chip *chip = port->chip;
+ u8 cmode;
+
+ /* Port 4 can be in auto-media mode. Check that the port is
+ * associated with the mpcs.
+ */
+ mv88e6xxx_reg_lock(chip);
+ chip->info->ops->port_get_cmode(chip, port->port, &cmode);
+ mv88e6xxx_reg_unlock(chip);
+
+ return cmode == MV88E6XXX_PORT_STS_CMODE_100BASEX ||
+ cmode == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
+ cmode == MV88E6XXX_PORT_STS_CMODE_SGMII;
+}
+
+static int mv88e6352_pcs_init(struct mv88e6xxx_chip *chip, int port)
+{
+ struct marvell_c22_pcs *mpcs;
+ struct mii_bus *bus;
+ struct device *dev;
+ unsigned int irq;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ mv88e6xxx_reg_unlock(chip);
+ if (err <= 0)
+ return err;
+
+ irq = mv88e6xxx_serdes_irq_mapping(chip, port);
+ bus = mv88e6xxx_default_mdio_bus(chip);
+ dev = chip->dev;
+
+ mpcs = marvell_c22_pcs_alloc(dev, bus, MV88E6352_ADDR_SERDES);
+ if (!mpcs)
+ return -ENOMEM;
+
+ snprintf(mpcs->name, sizeof(mpcs->name),
+ "mv88e6xxx-%s-serdes-%d", dev_name(dev), port);
+
+ mpcs->link_check = mv88e6352_pcs_link_check;
+ mpcs->port = &chip->ports[port];
+
+ err = marvell_c22_pcs_setup_irq(mpcs, irq);
+ if (err) {
+ kfree(mpcs);
+ return err;
+ }
+
+ chip->ports[port].pcs_private = &mpcs->phylink_pcs;
+
+ return 0;
+}
+
+static void mv88e6352_pcs_teardown(struct mv88e6xxx_chip *chip, int port)
+{
+ struct marvell_c22_pcs *mpcs;
+ struct phylink_pcs *pcs;
+
+ pcs = chip->ports[port].pcs_private;
+ if (!pcs)
+ return;
+
+ mpcs = pcs_to_marvell_c22_pcs(pcs);
+
+ if (mpcs->irq)
+ free_irq(mpcs->irq, mpcs);
+
+ kfree(mpcs);
+
+ chip->ports[port].pcs_private = NULL;
+}
+
+static struct phylink_pcs *mv88e6352_pcs_select(struct mv88e6xxx_chip *chip,
+ int port,
+ phy_interface_t interface)
+{
+ return chip->ports[port].pcs_private;
+}
+
+const struct mv88e6xxx_pcs_ops mv88e6352_pcs_ops = {
+ .pcs_init = mv88e6352_pcs_init,
+ .pcs_teardown = mv88e6352_pcs_teardown,
+ .pcs_select = mv88e6352_pcs_select,
+};
diff --git a/drivers/net/dsa/mv88e6xxx/pcs-639x.c b/drivers/net/dsa/mv88e6xxx/pcs-639x.c
new file mode 100644
index 000000000000..98dd49dac421
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/pcs-639x.c
@@ -0,0 +1,898 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Marvell 88E6352 family SERDES PCS support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2017 Andrew Lunn <[email protected]>
+ */
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/mii.h>
+
+#include "chip.h"
+#include "global2.h"
+#include "phy.h"
+#include "port.h"
+#include "serdes.h"
+
+struct mv88e639x_pcs {
+ struct mdio_device mdio;
+ struct phylink_pcs sgmii_pcs;
+ struct phylink_pcs xg_pcs;
+ bool supports_5g;
+ phy_interface_t interface;
+ unsigned int irq;
+ char name[64];
+ irqreturn_t (*handle_irq)(struct mv88e639x_pcs *mpcs);
+};
+
+static int mv88e639x_read(struct mv88e639x_pcs *mpcs, u16 regnum, u16 *val)
+{
+ int err;
+
+ err = mdiodev_c45_read(&mpcs->mdio, MDIO_MMD_PHYXS, regnum);
+ if (err < 0)
+ return err;
+
+ *val = err;
+
+ return 0;
+}
+
+static int mv88e639x_write(struct mv88e639x_pcs *mpcs, u16 regnum, u16 val)
+{
+ return mdiodev_c45_write(&mpcs->mdio, MDIO_MMD_PHYXS, regnum, val);
+}
+
+static int mv88e639x_modify(struct mv88e639x_pcs *mpcs, u16 regnum, u16 mask,
+ u16 val)
+{
+ return mdiodev_c45_modify(&mpcs->mdio, MDIO_MMD_PHYXS, regnum, mask,
+ val);
+}
+
+static int mv88e639x_modify_changed(struct mv88e639x_pcs *mpcs, u16 regnum,
+ u16 mask, u16 set)
+{
+ return mdiodev_c45_modify_changed(&mpcs->mdio, MDIO_MMD_PHYXS, regnum,
+ mask, set);
+}
+
+static struct mv88e639x_pcs *
+mv88e639x_pcs_alloc(struct device *dev, struct mii_bus *bus, unsigned int addr,
+ int port)
+{
+ struct mv88e639x_pcs *mpcs;
+
+ mpcs = kzalloc(sizeof(*mpcs), GFP_KERNEL);
+ if (!mpcs)
+ return NULL;
+
+ mpcs->mdio.dev.parent = dev;
+ mpcs->mdio.bus = bus;
+ mpcs->mdio.addr = addr;
+
+ snprintf(mpcs->name, sizeof(mpcs->name),
+ "mv88e6xxx-%s-serdes-%d", dev_name(dev), port);
+
+ return mpcs;
+}
+
+static irqreturn_t mv88e639x_pcs_handle_irq(int irq, void *dev_id)
+{
+ struct mv88e639x_pcs *mpcs = dev_id;
+ irqreturn_t (*handler)(struct mv88e639x_pcs *);
+
+ handler = READ_ONCE(mpcs->handle_irq);
+ if (!handler)
+ return IRQ_NONE;
+
+ return handler(mpcs);
+}
+
+static int mv88e639x_pcs_setup_irq(struct mv88e639x_pcs *mpcs,
+ struct mv88e6xxx_chip *chip, int port)
+{
+ unsigned int irq;
+
+ irq = mv88e6xxx_serdes_irq_mapping(chip, port);
+ if (!irq) {
+ /* Use polling mode */
+ mpcs->sgmii_pcs.poll = true;
+ mpcs->xg_pcs.poll = true;
+ return 0;
+ }
+
+ mpcs->irq = irq;
+
+ return request_threaded_irq(irq, NULL, mv88e639x_pcs_handle_irq,
+ IRQF_ONESHOT, mpcs->name, mpcs);
+}
+
+static void mv88e639x_pcs_teardown(struct mv88e6xxx_chip *chip, int port)
+{
+ struct mv88e639x_pcs *mpcs = chip->ports[port].pcs_private;
+
+ if (!mpcs)
+ return;
+
+ if (mpcs->irq)
+ free_irq(mpcs->irq, mpcs);
+
+ kfree(mpcs);
+
+ chip->ports[port].pcs_private = NULL;
+}
+
+static struct mv88e639x_pcs *sgmii_pcs_to_mv88e639x_pcs(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct mv88e639x_pcs, sgmii_pcs);
+}
+
+static irqreturn_t mv88e639x_sgmii_handle_irq(struct mv88e639x_pcs *mpcs)
+{
+ u16 int_status;
+ int err;
+
+ err = mv88e639x_read(mpcs, MV88E6390_SGMII_INT_STATUS, &int_status);
+ if (err)
+ return IRQ_NONE;
+
+ if (int_status & (MV88E6390_SGMII_INT_LINK_DOWN |
+ MV88E6390_SGMII_INT_LINK_UP)) {
+ phylink_pcs_change(&mpcs->sgmii_pcs,
+ int_status & MV88E6390_SGMII_INT_LINK_UP);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int mv88e639x_sgmii_pcs_control_irq(struct mv88e639x_pcs *mpcs,
+ bool enable)
+{
+ u16 val = 0;
+
+ if (enable)
+ val |= MV88E6390_SGMII_INT_LINK_DOWN |
+ MV88E6390_SGMII_INT_LINK_UP;
+
+ return mv88e639x_modify(mpcs, MV88E6390_SGMII_INT_ENABLE,
+ MV88E6390_SGMII_INT_LINK_DOWN |
+ MV88E6390_SGMII_INT_LINK_UP, val);
+}
+
+static int mv88e639x_sgmii_pcs_control_pwr(struct mv88e639x_pcs *mpcs,
+ bool enable)
+{
+ u16 mask, val;
+
+ if (enable) {
+ mask = BMCR_RESET | BMCR_LOOPBACK | BMCR_PDOWN;
+ val = 0;
+ } else {
+ mask = val = BMCR_PDOWN;
+ }
+
+ return mv88e639x_modify(mpcs, MV88E6390_SGMII_BMCR, mask, val);
+}
+
+static int mv88e639x_sgmii_pcs_enable(struct phylink_pcs *pcs)
+{
+ struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
+
+ /* power enable done in post_config */
+ mpcs->handle_irq = mv88e639x_sgmii_handle_irq;
+
+ return mv88e639x_sgmii_pcs_control_irq(mpcs, !!mpcs->irq);
+}
+
+static void mv88e639x_sgmii_pcs_disable(struct phylink_pcs *pcs)
+{
+ struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
+
+ mv88e639x_sgmii_pcs_control_irq(mpcs, false);
+ mv88e639x_sgmii_pcs_control_pwr(mpcs, false);
+}
+
+static void mv88e639x_sgmii_pcs_pre_config(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
+
+ mv88e639x_sgmii_pcs_control_pwr(mpcs, false);
+}
+
+static int mv88e639x_sgmii_pcs_post_config(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
+
+ mv88e639x_sgmii_pcs_control_pwr(mpcs, true);
+
+ return 0;
+}
+
+static void mv88e639x_sgmii_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
+ u16 bmsr, lpa, status;
+ int err;
+
+ err = mv88e639x_read(mpcs, MV88E6390_SGMII_BMSR, &bmsr);
+ if (err) {
+ dev_err(mpcs->mdio.dev.parent,
+ "can't read Serdes PHY %s: %pe\n",
+ "BMSR", ERR_PTR(err));
+ state->link = false;
+ return;
+ }
+
+ err = mv88e639x_read(mpcs, MV88E6390_SGMII_LPA, &lpa);
+ if (err) {
+ dev_err(mpcs->mdio.dev.parent,
+ "can't read Serdes PHY %s: %pe\n",
+ "LPA", ERR_PTR(err));
+ state->link = false;
+ return;
+ }
+
+ err = mv88e639x_read(mpcs, MV88E6390_SGMII_PHY_STATUS, &status);
+ if (err) {
+ dev_err(mpcs->mdio.dev.parent,
+ "can't read Serdes PHY %s: %pe\n",
+ "status", ERR_PTR(err));
+ state->link = false;
+ return;
+ }
+
+ mv88e6xxx_pcs_decode_state(mpcs->mdio.dev.parent, bmsr, lpa, status,
+ state);
+}
+
+static int mv88e639x_sgmii_pcs_config(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
+ u16 val, bmcr;
+ bool changed;
+ int adv, err;
+
+ adv = phylink_mii_c22_pcs_encode_advertisement(interface, advertising);
+ if (adv < 0)
+ return 0;
+
+ mpcs->interface = interface;
+
+ err = mv88e639x_modify_changed(mpcs, MV88E6390_SGMII_ADVERTISE,
+ 0xffff, adv);
+ if (err < 0)
+ return err;
+
+ changed = err > 0;
+
+ err = mv88e639x_read(mpcs, MV88E6390_SGMII_BMCR, &val);
+ if (err)
+ return err;
+
+ if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED)
+ bmcr = val | BMCR_ANENABLE;
+ else
+ bmcr = val & ~BMCR_ANENABLE;
+
+ /* setting ANENABLE triggers a restart of negotiation */
+ if (bmcr == val)
+ return changed;
+
+ return mv88e639x_write(mpcs, MV88E6390_SGMII_BMCR, bmcr);
+}
+
+static void mv88e639x_sgmii_pcs_an_restart(struct phylink_pcs *pcs)
+{
+ struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
+
+ mv88e639x_modify(mpcs, MV88E6390_SGMII_BMCR,
+ BMCR_ANRESTART, BMCR_ANRESTART);
+}
+
+static void mv88e639x_sgmii_pcs_link_up(struct phylink_pcs *pcs,
+ unsigned int mode,
+ phy_interface_t interface,
+ int speed, int duplex)
+{
+ struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
+ u16 bmcr;
+ int err;
+
+ if (phylink_autoneg_inband(mode))
+ return;
+
+ bmcr = mii_bmcr_encode_fixed(speed, duplex);
+
+ err = mv88e639x_modify(mpcs, MV88E6390_SGMII_BMCR,
+ BMCR_SPEED1000 | BMCR_SPEED100 | BMCR_FULLDPLX,
+ bmcr);
+ if (err)
+ dev_err(mpcs->mdio.dev.parent,
+ "can't access Serdes PHY %s: %pe\n",
+ "BMCR", ERR_PTR(err));
+}
+
+static const struct phylink_pcs_ops mv88e639x_sgmii_pcs_ops = {
+ .pcs_enable = mv88e639x_sgmii_pcs_enable,
+ .pcs_disable = mv88e639x_sgmii_pcs_disable,
+ .pcs_pre_config = mv88e639x_sgmii_pcs_pre_config,
+ .pcs_post_config = mv88e639x_sgmii_pcs_post_config,
+ .pcs_get_state = mv88e639x_sgmii_pcs_get_state,
+ .pcs_an_restart = mv88e639x_sgmii_pcs_an_restart,
+ .pcs_config = mv88e639x_sgmii_pcs_config,
+ .pcs_link_up = mv88e639x_sgmii_pcs_link_up,
+};
+
+static struct mv88e639x_pcs *xg_pcs_to_mv88e639x_pcs(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct mv88e639x_pcs, xg_pcs);
+}
+
+static int mv88e639x_xg_pcs_enable(struct mv88e639x_pcs *mpcs)
+{
+ return mv88e639x_modify(mpcs, MV88E6390_10G_CTRL1,
+ MDIO_CTRL1_RESET | MDIO_PCS_CTRL1_LOOPBACK |
+ MDIO_CTRL1_LPOWER, 0);
+}
+
+static void mv88e639x_xg_pcs_disable(struct mv88e639x_pcs *mpcs)
+{
+ mv88e639x_modify(mpcs, MV88E6390_10G_CTRL1, MDIO_CTRL1_LPOWER,
+ MDIO_CTRL1_LPOWER);
+}
+
+static void mv88e639x_xg_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
+ u16 status;
+ int err;
+
+ state->link = false;
+
+ err = mv88e639x_read(mpcs, MV88E6390_10G_STAT1, &status);
+ if (err) {
+ dev_err(mpcs->mdio.dev.parent,
+ "can't read Serdes PHY %s: %pe\n",
+ "STAT1", ERR_PTR(err));
+ return;
+ }
+
+ state->link = !!(status & MDIO_STAT1_LSTATUS);
+ if (state->link) {
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_5GBASER:
+ state->speed = SPEED_5000;
+ break;
+
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_XAUI:
+ state->speed = SPEED_10000;
+ break;
+
+ default:
+ state->link = false;
+ return;
+ }
+
+ state->duplex = DUPLEX_FULL;
+ }
+}
+
+static int mv88e639x_xg_pcs_config(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ return 0;
+}
+
+static struct phylink_pcs *
+mv88e639x_pcs_select(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode)
+{
+ struct mv88e639x_pcs *mpcs;
+
+ mpcs = chip->ports[port].pcs_private;
+ if (!mpcs)
+ return NULL;
+
+ switch (mode) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return &mpcs->sgmii_pcs;
+
+ case PHY_INTERFACE_MODE_5GBASER:
+ if (!mpcs->supports_5g)
+ return NULL;
+ fallthrough;
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
+ return &mpcs->xg_pcs;
+
+ default:
+ return NULL;
+ }
+}
+
+/* Marvell 88E6390 Specific support */
+
+static irqreturn_t mv88e6390_xg_handle_irq(struct mv88e639x_pcs *mpcs)
+{
+ u16 int_status;
+ int err;
+
+ err = mv88e639x_read(mpcs, MV88E6390_10G_INT_STATUS, &int_status);
+ if (err)
+ return IRQ_NONE;
+
+ if (int_status & (MV88E6390_10G_INT_LINK_DOWN |
+ MV88E6390_10G_INT_LINK_UP)) {
+ phylink_pcs_change(&mpcs->xg_pcs,
+ int_status & MV88E6390_10G_INT_LINK_UP);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int mv88e6390_xg_control_irq(struct mv88e639x_pcs *mpcs, bool enable)
+{
+ u16 val = 0;
+
+ if (enable)
+ val = MV88E6390_10G_INT_LINK_DOWN | MV88E6390_10G_INT_LINK_UP;
+
+ return mv88e639x_modify(mpcs, MV88E6390_10G_INT_ENABLE,
+ MV88E6390_10G_INT_LINK_DOWN |
+ MV88E6390_10G_INT_LINK_UP, val);
+}
+
+static int mv88e6390_xg_pcs_enable(struct phylink_pcs *pcs)
+{
+ struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
+ int err;
+
+ err = mv88e639x_xg_pcs_enable(mpcs);
+ if (err)
+ return err;
+
+ mpcs->handle_irq = mv88e6390_xg_handle_irq;
+
+ return mv88e6390_xg_control_irq(mpcs, !!mpcs->irq);
+}
+
+static void mv88e6390_xg_pcs_disable(struct phylink_pcs *pcs)
+{
+ struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
+
+ mv88e6390_xg_control_irq(mpcs, false);
+ mv88e639x_xg_pcs_disable(mpcs);
+}
+
+static const struct phylink_pcs_ops mv88e6390_xg_pcs_ops = {
+ .pcs_enable = mv88e6390_xg_pcs_enable,
+ .pcs_disable = mv88e6390_xg_pcs_disable,
+ .pcs_get_state = mv88e639x_xg_pcs_get_state,
+ .pcs_config = mv88e639x_xg_pcs_config,
+};
+
+static int mv88e6390_pcs_enable_checker(struct mv88e639x_pcs *mpcs)
+{
+ return mv88e639x_modify(mpcs, MV88E6390_PG_CONTROL,
+ MV88E6390_PG_CONTROL_ENABLE_PC,
+ MV88E6390_PG_CONTROL_ENABLE_PC);
+}
+
+static int mv88e6390_pcs_init(struct mv88e6xxx_chip *chip, int port)
+{
+ struct mv88e639x_pcs *mpcs;
+ struct mii_bus *bus;
+ struct device *dev;
+ int lane, err;
+
+ lane = mv88e6xxx_serdes_get_lane(chip, port);
+ if (lane < 0)
+ return 0;
+
+ bus = mv88e6xxx_default_mdio_bus(chip);
+ dev = chip->dev;
+
+ mpcs = mv88e639x_pcs_alloc(dev, bus, lane, port);
+ if (!mpcs)
+ return -ENOMEM;
+
+ mpcs->sgmii_pcs.ops = &mv88e639x_sgmii_pcs_ops;
+ mpcs->sgmii_pcs.neg_mode = true;
+ mpcs->xg_pcs.ops = &mv88e6390_xg_pcs_ops;
+ mpcs->xg_pcs.neg_mode = true;
+
+ err = mv88e639x_pcs_setup_irq(mpcs, chip, port);
+ if (err)
+ goto err_free;
+
+ /* 6390 and 6390x has the checker, 6393x doesn't appear to? */
+ /* This is to enable gathering the statistics. Maybe this
+ * should call out to a helper? Or we could do this at init time.
+ */
+ err = mv88e6390_pcs_enable_checker(mpcs);
+ if (err)
+ goto err_free;
+
+ chip->ports[port].pcs_private = mpcs;
+
+ return 0;
+
+err_free:
+ kfree(mpcs);
+ return err;
+}
+
+const struct mv88e6xxx_pcs_ops mv88e6390_pcs_ops = {
+ .pcs_init = mv88e6390_pcs_init,
+ .pcs_teardown = mv88e639x_pcs_teardown,
+ .pcs_select = mv88e639x_pcs_select,
+};
+
+/* Marvell 88E6393X Specific support */
+
+static int mv88e6393x_power_lane(struct mv88e639x_pcs *mpcs, bool enable)
+{
+ u16 val = MV88E6393X_SERDES_CTRL1_TX_PDOWN |
+ MV88E6393X_SERDES_CTRL1_RX_PDOWN;
+
+ return mv88e639x_modify(mpcs, MV88E6393X_SERDES_CTRL1, val,
+ enable ? 0 : val);
+}
+
+/* mv88e6393x family errata 4.6:
+ * Cannot clear PwrDn bit on SERDES if device is configured CPU_MGD mode or
+ * P0_mode is configured for [x]MII.
+ * Workaround: Set SERDES register 4.F002 bit 5=0 and bit 15=1.
+ *
+ * It seems that after this workaround the SERDES is automatically powered up
+ * (the bit is cleared), so power it down.
+ */
+static int mv88e6393x_erratum_4_6(struct mv88e639x_pcs *mpcs)
+{
+ int err;
+
+ err = mv88e639x_modify(mpcs, MV88E6393X_SERDES_POC,
+ MV88E6393X_SERDES_POC_PDOWN |
+ MV88E6393X_SERDES_POC_RESET,
+ MV88E6393X_SERDES_POC_RESET);
+ if (err)
+ return err;
+
+ err = mv88e639x_modify(mpcs, MV88E6390_SGMII_BMCR,
+ BMCR_PDOWN, BMCR_PDOWN);
+ if (err)
+ return err;
+
+ err = mv88e639x_sgmii_pcs_control_pwr(mpcs, false);
+ if (err)
+ return err;
+
+ return mv88e6393x_power_lane(mpcs, false);
+}
+
+/* mv88e6393x family errata 4.8:
+ * When a SERDES port is operating in 1000BASE-X or SGMII mode link may not
+ * come up after hardware reset or software reset of SERDES core. Workaround
+ * is to write SERDES register 4.F074.14=1 for only those modes and 0 in all
+ * other modes.
+ */
+static int mv88e6393x_erratum_4_8(struct mv88e639x_pcs *mpcs)
+{
+ u16 reg, poc;
+ int err;
+
+ err = mv88e639x_read(mpcs, MV88E6393X_SERDES_POC, &poc);
+ if (err)
+ return err;
+
+ poc &= MV88E6393X_SERDES_POC_PCS_MASK;
+ if (poc == MV88E6393X_SERDES_POC_PCS_1000BASEX ||
+ poc == MV88E6393X_SERDES_POC_PCS_SGMII_PHY ||
+ poc == MV88E6393X_SERDES_POC_PCS_SGMII_MAC)
+ reg = MV88E6393X_ERRATA_4_8_BIT;
+ else
+ reg = 0;
+
+ return mv88e639x_modify(mpcs, MV88E6393X_ERRATA_4_8_REG,
+ MV88E6393X_ERRATA_4_8_BIT, reg);
+}
+
+/* mv88e6393x family errata 5.2:
+ * For optimal signal integrity the following sequence should be applied to
+ * SERDES operating in 10G mode. These registers only apply to 10G operation
+ * and have no effect on other speeds.
+ */
+static int mv88e6393x_erratum_5_2(struct mv88e639x_pcs *mpcs)
+{
+ static const struct {
+ u16 dev, reg, val, mask;
+ } fixes[] = {
+ { MDIO_MMD_VEND1, 0x8093, 0xcb5a, 0xffff },
+ { MDIO_MMD_VEND1, 0x8171, 0x7088, 0xffff },
+ { MDIO_MMD_VEND1, 0x80c9, 0x311a, 0xffff },
+ { MDIO_MMD_VEND1, 0x80a2, 0x8000, 0xff7f },
+ { MDIO_MMD_VEND1, 0x80a9, 0x0000, 0xfff0 },
+ { MDIO_MMD_VEND1, 0x80a3, 0x0000, 0xf8ff },
+ { MDIO_MMD_PHYXS, MV88E6393X_SERDES_POC,
+ MV88E6393X_SERDES_POC_RESET, MV88E6393X_SERDES_POC_RESET },
+ };
+ int err, i;
+
+ for (i = 0; i < ARRAY_SIZE(fixes); ++i) {
+ err = mdiodev_c45_modify(&mpcs->mdio, fixes[i].dev,
+ fixes[i].reg, fixes[i].mask,
+ fixes[i].val);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* Inband AN is broken on Amethyst in 2500base-x mode when set by standard
+ * mechanism (via cmode).
+ * We can get around this by configuring the PCS mode to 1000base-x and then
+ * writing value 0x58 to register 1e.8000. (This must be done while SerDes
+ * receiver and transmitter are disabled, which is, when this function is
+ * called.)
+ * It seem that when we do this configuration to 2500base-x mode (by changing
+ * PCS mode to 1000base-x and frequency to 3.125 GHz from 1.25 GHz) and then
+ * configure to sgmii or 1000base-x, the device thinks that it already has
+ * SerDes at 1.25 GHz and does not change the 1e.8000 register, leaving SerDes
+ * at 3.125 GHz.
+ * To avoid this, change PCS mode back to 2500base-x when disabling SerDes from
+ * 2500base-x mode.
+ */
+static int mv88e6393x_fix_2500basex_an(struct mv88e639x_pcs *mpcs, bool on)
+{
+ u16 reg;
+ int err;
+
+ if (on)
+ reg = MV88E6393X_SERDES_POC_PCS_1000BASEX |
+ MV88E6393X_SERDES_POC_AN;
+ else
+ reg = MV88E6393X_SERDES_POC_PCS_2500BASEX;
+
+ reg |= MV88E6393X_SERDES_POC_RESET;
+
+ err = mv88e639x_modify(mpcs, MV88E6393X_SERDES_POC,
+ MV88E6393X_SERDES_POC_PCS_MASK |
+ MV88E6393X_SERDES_POC_AN |
+ MV88E6393X_SERDES_POC_RESET, reg);
+ if (err)
+ return err;
+
+ return mdiodev_c45_write(&mpcs->mdio, MDIO_MMD_VEND1, 0x8000, 0x58);
+}
+
+static int mv88e6393x_sgmii_apply_2500basex_an(struct mv88e639x_pcs *mpcs,
+ phy_interface_t interface,
+ bool enable)
+{
+ int err;
+
+ if (interface != PHY_INTERFACE_MODE_2500BASEX)
+ return 0;
+
+ err = mv88e6393x_fix_2500basex_an(mpcs, enable);
+ if (err)
+ dev_err(mpcs->mdio.dev.parent,
+ "failed to %s 2500basex fix: %pe\n",
+ enable ? "enable" : "disable", ERR_PTR(err));
+
+ return err;
+}
+
+static void mv88e6393x_sgmii_pcs_disable(struct phylink_pcs *pcs)
+{
+ struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
+
+ mv88e639x_sgmii_pcs_disable(pcs);
+ mv88e6393x_power_lane(mpcs, false);
+ mv88e6393x_sgmii_apply_2500basex_an(mpcs, mpcs->interface, false);
+}
+
+static void mv88e6393x_sgmii_pcs_pre_config(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
+
+ mv88e639x_sgmii_pcs_pre_config(pcs, interface);
+ mv88e6393x_power_lane(mpcs, false);
+ mv88e6393x_sgmii_apply_2500basex_an(mpcs, mpcs->interface, false);
+}
+
+static int mv88e6393x_sgmii_pcs_post_config(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
+ int err;
+
+ err = mv88e6393x_erratum_4_8(mpcs);
+ if (err)
+ return err;
+
+ err = mv88e6393x_sgmii_apply_2500basex_an(mpcs, interface, true);
+ if (err)
+ return err;
+
+ err = mv88e6393x_power_lane(mpcs, true);
+ if (err)
+ return err;
+
+ return mv88e639x_sgmii_pcs_post_config(pcs, interface);
+}
+
+static const struct phylink_pcs_ops mv88e6393x_sgmii_pcs_ops = {
+ .pcs_enable = mv88e639x_sgmii_pcs_enable,
+ .pcs_disable = mv88e6393x_sgmii_pcs_disable,
+ .pcs_pre_config = mv88e6393x_sgmii_pcs_pre_config,
+ .pcs_post_config = mv88e6393x_sgmii_pcs_post_config,
+ .pcs_get_state = mv88e639x_sgmii_pcs_get_state,
+ .pcs_an_restart = mv88e639x_sgmii_pcs_an_restart,
+ .pcs_config = mv88e639x_sgmii_pcs_config,
+ .pcs_link_up = mv88e639x_sgmii_pcs_link_up,
+};
+
+static irqreturn_t mv88e6393x_xg_handle_irq(struct mv88e639x_pcs *mpcs)
+{
+ u16 int_status, stat1;
+ bool link_down;
+ int err;
+
+ err = mv88e639x_read(mpcs, MV88E6393X_10G_INT_STATUS, &int_status);
+ if (err)
+ return IRQ_NONE;
+
+ if (int_status & MV88E6393X_10G_INT_LINK_CHANGE) {
+ err = mv88e639x_read(mpcs, MV88E6390_10G_STAT1, &stat1);
+ if (err)
+ return IRQ_NONE;
+
+ link_down = !(stat1 & MDIO_STAT1_LSTATUS);
+
+ phylink_pcs_change(&mpcs->xg_pcs, !link_down);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int mv88e6393x_xg_control_irq(struct mv88e639x_pcs *mpcs, bool enable)
+{
+ u16 val = 0;
+
+ if (enable)
+ val = MV88E6393X_10G_INT_LINK_CHANGE;
+
+ return mv88e639x_modify(mpcs, MV88E6393X_10G_INT_ENABLE,
+ MV88E6393X_10G_INT_LINK_CHANGE, val);
+}
+
+static int mv88e6393x_xg_pcs_enable(struct phylink_pcs *pcs)
+{
+ struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
+
+ mpcs->handle_irq = mv88e6393x_xg_handle_irq;
+
+ return mv88e6393x_xg_control_irq(mpcs, !!mpcs->irq);
+}
+
+static void mv88e6393x_xg_pcs_disable(struct phylink_pcs *pcs)
+{
+ struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
+
+ mv88e6393x_xg_control_irq(mpcs, false);
+ mv88e639x_xg_pcs_disable(mpcs);
+ mv88e6393x_power_lane(mpcs, false);
+}
+
+/* The PCS has to be powered down while CMODE is changed */
+static void mv88e6393x_xg_pcs_pre_config(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
+
+ mv88e639x_xg_pcs_disable(mpcs);
+ mv88e6393x_power_lane(mpcs, false);
+}
+
+static int mv88e6393x_xg_pcs_post_config(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
+ int err;
+
+ if (interface == PHY_INTERFACE_MODE_10GBASER) {
+ err = mv88e6393x_erratum_5_2(mpcs);
+ if (err)
+ return err;
+ }
+
+ err = mv88e6393x_power_lane(mpcs, true);
+ if (err)
+ return err;
+
+ return mv88e639x_xg_pcs_enable(mpcs);
+}
+
+static const struct phylink_pcs_ops mv88e6393x_xg_pcs_ops = {
+ .pcs_enable = mv88e6393x_xg_pcs_enable,
+ .pcs_disable = mv88e6393x_xg_pcs_disable,
+ .pcs_pre_config = mv88e6393x_xg_pcs_pre_config,
+ .pcs_post_config = mv88e6393x_xg_pcs_post_config,
+ .pcs_get_state = mv88e639x_xg_pcs_get_state,
+ .pcs_config = mv88e639x_xg_pcs_config,
+};
+
+static int mv88e6393x_pcs_init(struct mv88e6xxx_chip *chip, int port)
+{
+ struct mv88e639x_pcs *mpcs;
+ struct mii_bus *bus;
+ struct device *dev;
+ int lane, err;
+
+ lane = mv88e6xxx_serdes_get_lane(chip, port);
+ if (lane < 0)
+ return 0;
+
+ bus = mv88e6xxx_default_mdio_bus(chip);
+ dev = chip->dev;
+
+ mpcs = mv88e639x_pcs_alloc(dev, bus, lane, port);
+ if (!mpcs)
+ return -ENOMEM;
+
+ mpcs->sgmii_pcs.ops = &mv88e6393x_sgmii_pcs_ops;
+ mpcs->sgmii_pcs.neg_mode = true;
+ mpcs->xg_pcs.ops = &mv88e6393x_xg_pcs_ops;
+ mpcs->xg_pcs.neg_mode = true;
+ mpcs->supports_5g = true;
+
+ err = mv88e6393x_erratum_4_6(mpcs);
+ if (err)
+ goto err_free;
+
+ err = mv88e639x_pcs_setup_irq(mpcs, chip, port);
+ if (err)
+ goto err_free;
+
+ chip->ports[port].pcs_private = mpcs;
+
+ return 0;
+
+err_free:
+ kfree(mpcs);
+ return err;
+}
+
+const struct mv88e6xxx_pcs_ops mv88e6393x_pcs_ops = {
+ .pcs_init = mv88e6393x_pcs_init,
+ .pcs_teardown = mv88e639x_pcs_teardown,
+ .pcs_select = mv88e639x_pcs_select,
+};
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index dd66ec902d4c..5394a8cf7bf1 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -524,7 +524,6 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode, bool force)
{
u16 cmode;
- int lane;
u16 reg;
int err;
@@ -577,19 +576,6 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
if (cmode == chip->ports[port].cmode && !force)
return 0;
- lane = mv88e6xxx_serdes_get_lane(chip, port);
- if (lane >= 0) {
- if (chip->ports[port].serdes_irq) {
- err = mv88e6xxx_serdes_irq_disable(chip, port, lane);
- if (err)
- return err;
- }
-
- err = mv88e6xxx_serdes_power_down(chip, port, lane);
- if (err)
- return err;
- }
-
chip->ports[port].cmode = 0;
if (cmode) {
@@ -605,22 +591,6 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
return err;
chip->ports[port].cmode = cmode;
-
- lane = mv88e6xxx_serdes_get_lane(chip, port);
- if (lane == -ENODEV)
- return 0;
- if (lane < 0)
- return lane;
-
- err = mv88e6xxx_serdes_power_up(chip, port, lane);
- if (err)
- return err;
-
- if (chip->ports[port].serdes_irq) {
- err = mv88e6xxx_serdes_irq_enable(chip, port, lane);
- if (err)
- return err;
- }
}
return 0;
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index 80167d53212f..3b4b42651fa3 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -39,15 +39,8 @@ static int mv88e6390_serdes_read(struct mv88e6xxx_chip *chip,
return mv88e6xxx_phy_read_c45(chip, lane, device, reg, val);
}
-static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip,
- int lane, int device, int reg, u16 val)
-{
- return mv88e6xxx_phy_write_c45(chip, lane, device, reg, val);
-}
-
-static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip,
- u16 bmsr, u16 lpa, u16 status,
- struct phylink_link_state *state)
+int mv88e6xxx_pcs_decode_state(struct device *dev, u16 bmsr, u16 lpa,
+ u16 status, struct phylink_link_state *state)
{
state->link = false;
@@ -88,7 +81,7 @@ static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip,
state->speed = SPEED_10;
break;
default:
- dev_err(chip->dev, "invalid PHY speed\n");
+ dev_err(dev, "invalid PHY speed\n");
return -EINVAL;
}
} else if (state->link &&
@@ -117,160 +110,6 @@ static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip,
return 0;
}
-int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
- bool up)
-{
- u16 val, new_val;
- int err;
-
- err = mv88e6352_serdes_read(chip, MII_BMCR, &val);
- if (err)
- return err;
-
- if (up)
- new_val = val & ~BMCR_PDOWN;
- else
- new_val = val | BMCR_PDOWN;
-
- if (val != new_val)
- err = mv88e6352_serdes_write(chip, MII_BMCR, new_val);
-
- return err;
-}
-
-int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
- int lane, unsigned int mode,
- phy_interface_t interface,
- const unsigned long *advertise)
-{
- u16 adv, bmcr, val;
- bool changed;
- int err;
-
- switch (interface) {
- case PHY_INTERFACE_MODE_SGMII:
- adv = 0x0001;
- break;
-
- case PHY_INTERFACE_MODE_1000BASEX:
- adv = linkmode_adv_to_mii_adv_x(advertise,
- ETHTOOL_LINK_MODE_1000baseX_Full_BIT);
- break;
-
- default:
- return 0;
- }
-
- err = mv88e6352_serdes_read(chip, MII_ADVERTISE, &val);
- if (err)
- return err;
-
- changed = val != adv;
- if (changed) {
- err = mv88e6352_serdes_write(chip, MII_ADVERTISE, adv);
- if (err)
- return err;
- }
-
- err = mv88e6352_serdes_read(chip, MII_BMCR, &val);
- if (err)
- return err;
-
- if (phylink_autoneg_inband(mode))
- bmcr = val | BMCR_ANENABLE;
- else
- bmcr = val & ~BMCR_ANENABLE;
-
- if (bmcr == val)
- return changed;
-
- return mv88e6352_serdes_write(chip, MII_BMCR, bmcr);
-}
-
-int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
- int lane, struct phylink_link_state *state)
-{
- u16 bmsr, lpa, status;
- int err;
-
- err = mv88e6352_serdes_read(chip, MII_BMSR, &bmsr);
- if (err) {
- dev_err(chip->dev, "can't read Serdes PHY BMSR: %d\n", err);
- return err;
- }
-
- err = mv88e6352_serdes_read(chip, 0x11, &status);
- if (err) {
- dev_err(chip->dev, "can't read Serdes PHY status: %d\n", err);
- return err;
- }
-
- err = mv88e6352_serdes_read(chip, MII_LPA, &lpa);
- if (err) {
- dev_err(chip->dev, "can't read Serdes PHY LPA: %d\n", err);
- return err;
- }
-
- return mv88e6xxx_serdes_pcs_get_state(chip, bmsr, lpa, status, state);
-}
-
-int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
- int lane)
-{
- u16 bmcr;
- int err;
-
- err = mv88e6352_serdes_read(chip, MII_BMCR, &bmcr);
- if (err)
- return err;
-
- return mv88e6352_serdes_write(chip, MII_BMCR, bmcr | BMCR_ANRESTART);
-}
-
-int mv88e6352_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port,
- int lane, int speed, int duplex)
-{
- u16 val, bmcr;
- int err;
-
- err = mv88e6352_serdes_read(chip, MII_BMCR, &val);
- if (err)
- return err;
-
- bmcr = val & ~(BMCR_SPEED100 | BMCR_FULLDPLX | BMCR_SPEED1000);
- switch (speed) {
- case SPEED_1000:
- bmcr |= BMCR_SPEED1000;
- break;
- case SPEED_100:
- bmcr |= BMCR_SPEED100;
- break;
- case SPEED_10:
- break;
- }
-
- if (duplex == DUPLEX_FULL)
- bmcr |= BMCR_FULLDPLX;
-
- if (bmcr == val)
- return 0;
-
- return mv88e6352_serdes_write(chip, MII_BMCR, bmcr);
-}
-
-int mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
-{
- u8 cmode = chip->ports[port].cmode;
- int lane = -ENODEV;
-
- if ((cmode == MV88E6XXX_PORT_STS_CMODE_100BASEX) ||
- (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASEX) ||
- (cmode == MV88E6XXX_PORT_STS_CMODE_SGMII))
- lane = 0xff; /* Unused */
-
- return lane;
-}
-
struct mv88e6352_serdes_hw_stat {
char string[ETH_GSTRING_LEN];
int sizeof_stat;
@@ -363,51 +202,6 @@ int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
return ARRAY_SIZE(mv88e6352_serdes_hw_stats);
}
-static void mv88e6352_serdes_irq_link(struct mv88e6xxx_chip *chip, int port)
-{
- u16 bmsr;
- int err;
-
- /* If the link has dropped, we want to know about it. */
- err = mv88e6352_serdes_read(chip, MII_BMSR, &bmsr);
- if (err) {
- dev_err(chip->dev, "can't read Serdes BMSR: %d\n", err);
- return;
- }
-
- dsa_port_phylink_mac_change(chip->ds, port, !!(bmsr & BMSR_LSTATUS));
-}
-
-irqreturn_t mv88e6352_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
- int lane)
-{
- irqreturn_t ret = IRQ_NONE;
- u16 status;
- int err;
-
- err = mv88e6352_serdes_read(chip, MV88E6352_SERDES_INT_STATUS, &status);
- if (err)
- return ret;
-
- if (status & MV88E6352_SERDES_INT_LINK_CHANGE) {
- ret = IRQ_HANDLED;
- mv88e6352_serdes_irq_link(chip, port);
- }
-
- return ret;
-}
-
-int mv88e6352_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane,
- bool enable)
-{
- u16 val = 0;
-
- if (enable)
- val |= MV88E6352_SERDES_INT_LINK_CHANGE;
-
- return mv88e6352_serdes_write(chip, MV88E6352_SERDES_INT_ENABLE, val);
-}
-
unsigned int mv88e6352_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port)
{
return irq_find_mapping(chip->g2_irq.domain, MV88E6352_SERDES_IRQ);
@@ -461,115 +255,6 @@ int mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
return lane;
}
-int mv88e6185_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
- bool up)
-{
- /* The serdes power can't be controlled on this switch chip but we need
- * to supply this function to avoid returning -EOPNOTSUPP in
- * mv88e6xxx_serdes_power_up/mv88e6xxx_serdes_power_down
- */
- return 0;
-}
-
-int mv88e6185_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
-{
- /* There are no configurable serdes lanes on this switch chip but we
- * need to return a non-negative lane number so that callers of
- * mv88e6xxx_serdes_get_lane() know this is a serdes port.
- */
- switch (chip->ports[port].cmode) {
- case MV88E6185_PORT_STS_CMODE_SERDES:
- case MV88E6185_PORT_STS_CMODE_1000BASE_X:
- return 0;
- default:
- return -ENODEV;
- }
-}
-
-int mv88e6185_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
- int lane, struct phylink_link_state *state)
-{
- int err;
- u16 status;
-
- err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &status);
- if (err)
- return err;
-
- state->link = !!(status & MV88E6XXX_PORT_STS_LINK);
-
- if (state->link) {
- state->duplex = status & MV88E6XXX_PORT_STS_DUPLEX ? DUPLEX_FULL : DUPLEX_HALF;
-
- switch (status & MV88E6XXX_PORT_STS_SPEED_MASK) {
- case MV88E6XXX_PORT_STS_SPEED_1000:
- state->speed = SPEED_1000;
- break;
- case MV88E6XXX_PORT_STS_SPEED_100:
- state->speed = SPEED_100;
- break;
- case MV88E6XXX_PORT_STS_SPEED_10:
- state->speed = SPEED_10;
- break;
- default:
- dev_err(chip->dev, "invalid PHY speed\n");
- return -EINVAL;
- }
- } else {
- state->duplex = DUPLEX_UNKNOWN;
- state->speed = SPEED_UNKNOWN;
- }
-
- return 0;
-}
-
-int mv88e6097_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane,
- bool enable)
-{
- u8 cmode = chip->ports[port].cmode;
-
- /* The serdes interrupts are enabled in the G2_INT_MASK register. We
- * need to return 0 to avoid returning -EOPNOTSUPP in
- * mv88e6xxx_serdes_irq_enable/mv88e6xxx_serdes_irq_disable
- */
- switch (cmode) {
- case MV88E6185_PORT_STS_CMODE_SERDES:
- case MV88E6185_PORT_STS_CMODE_1000BASE_X:
- return 0;
- }
-
- return -EOPNOTSUPP;
-}
-
-static void mv88e6097_serdes_irq_link(struct mv88e6xxx_chip *chip, int port)
-{
- u16 status;
- int err;
-
- err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &status);
- if (err) {
- dev_err(chip->dev, "can't read port status: %d\n", err);
- return;
- }
-
- dsa_port_phylink_mac_change(chip->ds, port, !!(status & MV88E6XXX_PORT_STS_LINK));
-}
-
-irqreturn_t mv88e6097_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
- int lane)
-{
- u8 cmode = chip->ports[port].cmode;
-
- switch (cmode) {
- case MV88E6185_PORT_STS_CMODE_SERDES:
- case MV88E6185_PORT_STS_CMODE_1000BASE_X:
- mv88e6097_serdes_irq_link(chip, port);
- return IRQ_HANDLED;
- }
-
- return IRQ_NONE;
-}
-
int mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
{
u8 cmode = chip->ports[port].cmode;
@@ -690,57 +375,6 @@ int mv88e6393x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
return lane;
}
-/* Set power up/down for 10GBASE-R and 10GBASE-X4/X2 */
-static int mv88e6390_serdes_power_10g(struct mv88e6xxx_chip *chip, int lane,
- bool up)
-{
- u16 val, new_val;
- int err;
-
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_10G_CTRL1, &val);
-
- if (err)
- return err;
-
- if (up)
- new_val = val & ~(MDIO_CTRL1_RESET |
- MDIO_PCS_CTRL1_LOOPBACK |
- MDIO_CTRL1_LPOWER);
- else
- new_val = val | MDIO_CTRL1_LPOWER;
-
- if (val != new_val)
- err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_10G_CTRL1, new_val);
-
- return err;
-}
-
-/* Set power up/down for SGMII and 1000Base-X */
-static int mv88e6390_serdes_power_sgmii(struct mv88e6xxx_chip *chip, int lane,
- bool up)
-{
- u16 val, new_val;
- int err;
-
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_BMCR, &val);
- if (err)
- return err;
-
- if (up)
- new_val = val & ~(BMCR_RESET | BMCR_LOOPBACK | BMCR_PDOWN);
- else
- new_val = val | BMCR_PDOWN;
-
- if (val != new_val)
- err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_BMCR, new_val);
-
- return err;
-}
-
struct mv88e6390_serdes_hw_stat {
char string[ETH_GSTRING_LEN];
int reg;
@@ -814,484 +448,6 @@ int mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
return ARRAY_SIZE(mv88e6390_serdes_hw_stats);
}
-static int mv88e6390_serdes_enable_checker(struct mv88e6xxx_chip *chip, int lane)
-{
- u16 reg;
- int err;
-
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_PG_CONTROL, &reg);
- if (err)
- return err;
-
- reg |= MV88E6390_PG_CONTROL_ENABLE_PC;
- return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_PG_CONTROL, reg);
-}
-
-int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
- bool up)
-{
- u8 cmode = chip->ports[port].cmode;
- int err;
-
- switch (cmode) {
- case MV88E6XXX_PORT_STS_CMODE_SGMII:
- case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
- case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
- err = mv88e6390_serdes_power_sgmii(chip, lane, up);
- break;
- case MV88E6XXX_PORT_STS_CMODE_XAUI:
- case MV88E6XXX_PORT_STS_CMODE_RXAUI:
- err = mv88e6390_serdes_power_10g(chip, lane, up);
- break;
- default:
- err = -EINVAL;
- break;
- }
-
- if (!err && up)
- err = mv88e6390_serdes_enable_checker(chip, lane);
-
- return err;
-}
-
-int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
- int lane, unsigned int mode,
- phy_interface_t interface,
- const unsigned long *advertise)
-{
- u16 val, bmcr, adv;
- bool changed;
- int err;
-
- switch (interface) {
- case PHY_INTERFACE_MODE_SGMII:
- adv = 0x0001;
- break;
-
- case PHY_INTERFACE_MODE_1000BASEX:
- adv = linkmode_adv_to_mii_adv_x(advertise,
- ETHTOOL_LINK_MODE_1000baseX_Full_BIT);
- break;
-
- case PHY_INTERFACE_MODE_2500BASEX:
- adv = linkmode_adv_to_mii_adv_x(advertise,
- ETHTOOL_LINK_MODE_2500baseX_Full_BIT);
- break;
-
- default:
- return 0;
- }
-
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_ADVERTISE, &val);
- if (err)
- return err;
-
- changed = val != adv;
- if (changed) {
- err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_ADVERTISE, adv);
- if (err)
- return err;
- }
-
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_BMCR, &val);
- if (err)
- return err;
-
- if (phylink_autoneg_inband(mode))
- bmcr = val | BMCR_ANENABLE;
- else
- bmcr = val & ~BMCR_ANENABLE;
-
- /* setting ANENABLE triggers a restart of negotiation */
- if (bmcr == val)
- return changed;
-
- return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_BMCR, bmcr);
-}
-
-static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
- int port, int lane, struct phylink_link_state *state)
-{
- u16 bmsr, lpa, status;
- int err;
-
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_BMSR, &bmsr);
- if (err) {
- dev_err(chip->dev, "can't read Serdes PHY BMSR: %d\n", err);
- return err;
- }
-
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_PHY_STATUS, &status);
- if (err) {
- dev_err(chip->dev, "can't read Serdes PHY status: %d\n", err);
- return err;
- }
-
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_LPA, &lpa);
- if (err) {
- dev_err(chip->dev, "can't read Serdes PHY LPA: %d\n", err);
- return err;
- }
-
- return mv88e6xxx_serdes_pcs_get_state(chip, bmsr, lpa, status, state);
-}
-
-static int mv88e6390_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip,
- int port, int lane, struct phylink_link_state *state)
-{
- u16 status;
- int err;
-
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_10G_STAT1, &status);
- if (err)
- return err;
-
- state->link = !!(status & MDIO_STAT1_LSTATUS);
- if (state->link) {
- state->speed = SPEED_10000;
- state->duplex = DUPLEX_FULL;
- }
-
- return 0;
-}
-
-static int mv88e6393x_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip,
- int port, int lane,
- struct phylink_link_state *state)
-{
- u16 status;
- int err;
-
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_10G_STAT1, &status);
- if (err)
- return err;
-
- state->link = !!(status & MDIO_STAT1_LSTATUS);
- if (state->link) {
- if (state->interface == PHY_INTERFACE_MODE_5GBASER)
- state->speed = SPEED_5000;
- else
- state->speed = SPEED_10000;
- state->duplex = DUPLEX_FULL;
- }
- return 0;
-}
-
-/* USXGMII registers for Marvell switch 88e639x are undocumented and this function is based
- * on some educated guesses. It appears that there are no status bits related to
- * autonegotiation complete or flow control.
- */
-static int mv88e639x_serdes_pcs_get_state_usxgmii(struct mv88e6xxx_chip *chip,
- int port, int lane,
- struct phylink_link_state *state)
-{
- u16 status, lp_status;
- int err;
-
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_USXGMII_PHY_STATUS, &status);
- if (err) {
- dev_err(chip->dev, "can't read Serdes USXGMII PHY status: %d\n", err);
- return err;
- }
- dev_dbg(chip->dev, "USXGMII PHY status: 0x%x\n", status);
-
- state->link = !!(status & MDIO_USXGMII_LINK);
- state->an_complete = state->link;
-
- if (state->link) {
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_USXGMII_LP_STATUS, &lp_status);
- if (err) {
- dev_err(chip->dev, "can't read Serdes USXGMII LP status: %d\n", err);
- return err;
- }
- dev_dbg(chip->dev, "USXGMII LP status: 0x%x\n", lp_status);
- /* lp_status appears to include the "link" bit as per USXGMII spec. */
- phylink_decode_usxgmii_word(state, lp_status);
- }
- return 0;
-}
-
-int mv88e6390_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
- int lane, struct phylink_link_state *state)
-{
- switch (state->interface) {
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_2500BASEX:
- return mv88e6390_serdes_pcs_get_state_sgmii(chip, port, lane,
- state);
- case PHY_INTERFACE_MODE_XAUI:
- case PHY_INTERFACE_MODE_RXAUI:
- return mv88e6390_serdes_pcs_get_state_10g(chip, port, lane,
- state);
-
- default:
- return -EOPNOTSUPP;
- }
-}
-
-int mv88e6393x_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
- int lane, struct phylink_link_state *state)
-{
- switch (state->interface) {
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_2500BASEX:
- return mv88e6390_serdes_pcs_get_state_sgmii(chip, port, lane,
- state);
- case PHY_INTERFACE_MODE_5GBASER:
- case PHY_INTERFACE_MODE_10GBASER:
- return mv88e6393x_serdes_pcs_get_state_10g(chip, port, lane,
- state);
- case PHY_INTERFACE_MODE_USXGMII:
- return mv88e639x_serdes_pcs_get_state_usxgmii(chip, port, lane,
- state);
-
- default:
- return -EOPNOTSUPP;
- }
-}
-
-int mv88e6390_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
- int lane)
-{
- u16 bmcr;
- int err;
-
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_BMCR, &bmcr);
- if (err)
- return err;
-
- return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_BMCR,
- bmcr | BMCR_ANRESTART);
-}
-
-int mv88e6390_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port,
- int lane, int speed, int duplex)
-{
- u16 val, bmcr;
- int err;
-
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_BMCR, &val);
- if (err)
- return err;
-
- bmcr = val & ~(BMCR_SPEED100 | BMCR_FULLDPLX | BMCR_SPEED1000);
- switch (speed) {
- case SPEED_2500:
- case SPEED_1000:
- bmcr |= BMCR_SPEED1000;
- break;
- case SPEED_100:
- bmcr |= BMCR_SPEED100;
- break;
- case SPEED_10:
- break;
- }
-
- if (duplex == DUPLEX_FULL)
- bmcr |= BMCR_FULLDPLX;
-
- if (bmcr == val)
- return 0;
-
- return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_BMCR, bmcr);
-}
-
-static void mv88e6390_serdes_irq_link_sgmii(struct mv88e6xxx_chip *chip,
- int port, int lane)
-{
- u16 bmsr;
- int err;
-
- /* If the link has dropped, we want to know about it. */
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_BMSR, &bmsr);
- if (err) {
- dev_err(chip->dev, "can't read Serdes BMSR: %d\n", err);
- return;
- }
-
- dsa_port_phylink_mac_change(chip->ds, port, !!(bmsr & BMSR_LSTATUS));
-}
-
-static void mv88e6393x_serdes_irq_link_10g(struct mv88e6xxx_chip *chip,
- int port, u8 lane)
-{
- u16 status;
- int err;
-
- /* If the link has dropped, we want to know about it. */
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_10G_STAT1, &status);
- if (err) {
- dev_err(chip->dev, "can't read Serdes STAT1: %d\n", err);
- return;
- }
-
- dsa_port_phylink_mac_change(chip->ds, port, !!(status & MDIO_STAT1_LSTATUS));
-}
-
-static int mv88e6390_serdes_irq_enable_sgmii(struct mv88e6xxx_chip *chip,
- int lane, bool enable)
-{
- u16 val = 0;
-
- if (enable)
- val |= MV88E6390_SGMII_INT_LINK_DOWN |
- MV88E6390_SGMII_INT_LINK_UP;
-
- return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_INT_ENABLE, val);
-}
-
-int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane,
- bool enable)
-{
- u8 cmode = chip->ports[port].cmode;
-
- switch (cmode) {
- case MV88E6XXX_PORT_STS_CMODE_SGMII:
- case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
- case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
- return mv88e6390_serdes_irq_enable_sgmii(chip, lane, enable);
- }
-
- return 0;
-}
-
-static int mv88e6390_serdes_irq_status_sgmii(struct mv88e6xxx_chip *chip,
- int lane, u16 *status)
-{
- int err;
-
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_INT_STATUS, status);
-
- return err;
-}
-
-static int mv88e6393x_serdes_irq_enable_10g(struct mv88e6xxx_chip *chip,
- u8 lane, bool enable)
-{
- u16 val = 0;
-
- if (enable)
- val |= MV88E6393X_10G_INT_LINK_CHANGE;
-
- return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
- MV88E6393X_10G_INT_ENABLE, val);
-}
-
-int mv88e6393x_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port,
- int lane, bool enable)
-{
- u8 cmode = chip->ports[port].cmode;
-
- switch (cmode) {
- case MV88E6XXX_PORT_STS_CMODE_SGMII:
- case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
- case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
- return mv88e6390_serdes_irq_enable_sgmii(chip, lane, enable);
- case MV88E6393X_PORT_STS_CMODE_5GBASER:
- case MV88E6393X_PORT_STS_CMODE_10GBASER:
- case MV88E6393X_PORT_STS_CMODE_USXGMII:
- return mv88e6393x_serdes_irq_enable_10g(chip, lane, enable);
- }
-
- return 0;
-}
-
-static int mv88e6393x_serdes_irq_status_10g(struct mv88e6xxx_chip *chip,
- u8 lane, u16 *status)
-{
- int err;
-
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6393X_10G_INT_STATUS, status);
-
- return err;
-}
-
-irqreturn_t mv88e6393x_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
- int lane)
-{
- u8 cmode = chip->ports[port].cmode;
- irqreturn_t ret = IRQ_NONE;
- u16 status;
- int err;
-
- switch (cmode) {
- case MV88E6XXX_PORT_STS_CMODE_SGMII:
- case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
- case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
- err = mv88e6390_serdes_irq_status_sgmii(chip, lane, &status);
- if (err)
- return ret;
- if (status & (MV88E6390_SGMII_INT_LINK_DOWN |
- MV88E6390_SGMII_INT_LINK_UP)) {
- ret = IRQ_HANDLED;
- mv88e6390_serdes_irq_link_sgmii(chip, port, lane);
- }
- break;
- case MV88E6393X_PORT_STS_CMODE_5GBASER:
- case MV88E6393X_PORT_STS_CMODE_10GBASER:
- case MV88E6393X_PORT_STS_CMODE_USXGMII:
- err = mv88e6393x_serdes_irq_status_10g(chip, lane, &status);
- if (err)
- return err;
- if (status & MV88E6393X_10G_INT_LINK_CHANGE) {
- ret = IRQ_HANDLED;
- mv88e6393x_serdes_irq_link_10g(chip, port, lane);
- }
- break;
- }
-
- return ret;
-}
-
-irqreturn_t mv88e6390_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
- int lane)
-{
- u8 cmode = chip->ports[port].cmode;
- irqreturn_t ret = IRQ_NONE;
- u16 status;
- int err;
-
- switch (cmode) {
- case MV88E6XXX_PORT_STS_CMODE_SGMII:
- case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
- case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
- err = mv88e6390_serdes_irq_status_sgmii(chip, lane, &status);
- if (err)
- return ret;
- if (status & (MV88E6390_SGMII_INT_LINK_DOWN |
- MV88E6390_SGMII_INT_LINK_UP)) {
- ret = IRQ_HANDLED;
- mv88e6390_serdes_irq_link_sgmii(chip, port, lane);
- }
- }
-
- return ret;
-}
-
unsigned int mv88e6390_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port)
{
return irq_find_mapping(chip->g2_irq.domain, port);
@@ -1390,259 +546,3 @@ int mv88e6352_serdes_set_tx_amplitude(struct mv88e6xxx_chip *chip, int port,
return mv88e6352_serdes_write(chip, MV88E6352_SERDES_SPEC_CTRL2, ctrl);
}
-
-static int mv88e6393x_serdes_power_lane(struct mv88e6xxx_chip *chip, int lane,
- bool on)
-{
- u16 reg;
- int err;
-
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6393X_SERDES_CTRL1, &reg);
- if (err)
- return err;
-
- if (on)
- reg &= ~(MV88E6393X_SERDES_CTRL1_TX_PDOWN |
- MV88E6393X_SERDES_CTRL1_RX_PDOWN);
- else
- reg |= MV88E6393X_SERDES_CTRL1_TX_PDOWN |
- MV88E6393X_SERDES_CTRL1_RX_PDOWN;
-
- return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
- MV88E6393X_SERDES_CTRL1, reg);
-}
-
-static int mv88e6393x_serdes_erratum_4_6(struct mv88e6xxx_chip *chip, int lane)
-{
- u16 reg;
- int err;
-
- /* mv88e6393x family errata 4.6:
- * Cannot clear PwrDn bit on SERDES if device is configured CPU_MGD
- * mode or P0_mode is configured for [x]MII.
- * Workaround: Set SERDES register 4.F002 bit 5=0 and bit 15=1.
- *
- * It seems that after this workaround the SERDES is automatically
- * powered up (the bit is cleared), so power it down.
- */
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6393X_SERDES_POC, &reg);
- if (err)
- return err;
-
- reg &= ~MV88E6393X_SERDES_POC_PDOWN;
- reg |= MV88E6393X_SERDES_POC_RESET;
-
- err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
- MV88E6393X_SERDES_POC, reg);
- if (err)
- return err;
-
- err = mv88e6390_serdes_power_sgmii(chip, lane, false);
- if (err)
- return err;
-
- return mv88e6393x_serdes_power_lane(chip, lane, false);
-}
-
-int mv88e6393x_serdes_setup_errata(struct mv88e6xxx_chip *chip)
-{
- int err;
-
- err = mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT0_LANE);
- if (err)
- return err;
-
- err = mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT9_LANE);
- if (err)
- return err;
-
- return mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT10_LANE);
-}
-
-static int mv88e6393x_serdes_erratum_4_8(struct mv88e6xxx_chip *chip, int lane)
-{
- u16 reg, pcs;
- int err;
-
- /* mv88e6393x family errata 4.8:
- * When a SERDES port is operating in 1000BASE-X or SGMII mode link may
- * not come up after hardware reset or software reset of SERDES core.
- * Workaround is to write SERDES register 4.F074.14=1 for only those
- * modes and 0 in all other modes.
- */
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6393X_SERDES_POC, &pcs);
- if (err)
- return err;
-
- pcs &= MV88E6393X_SERDES_POC_PCS_MASK;
-
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6393X_ERRATA_4_8_REG, &reg);
- if (err)
- return err;
-
- if (pcs == MV88E6393X_SERDES_POC_PCS_1000BASEX ||
- pcs == MV88E6393X_SERDES_POC_PCS_SGMII_PHY ||
- pcs == MV88E6393X_SERDES_POC_PCS_SGMII_MAC)
- reg |= MV88E6393X_ERRATA_4_8_BIT;
- else
- reg &= ~MV88E6393X_ERRATA_4_8_BIT;
-
- return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
- MV88E6393X_ERRATA_4_8_REG, reg);
-}
-
-static int mv88e6393x_serdes_erratum_5_2(struct mv88e6xxx_chip *chip, int lane,
- u8 cmode)
-{
- static const struct {
- u16 dev, reg, val, mask;
- } fixes[] = {
- { MDIO_MMD_VEND1, 0x8093, 0xcb5a, 0xffff },
- { MDIO_MMD_VEND1, 0x8171, 0x7088, 0xffff },
- { MDIO_MMD_VEND1, 0x80c9, 0x311a, 0xffff },
- { MDIO_MMD_VEND1, 0x80a2, 0x8000, 0xff7f },
- { MDIO_MMD_VEND1, 0x80a9, 0x0000, 0xfff0 },
- { MDIO_MMD_VEND1, 0x80a3, 0x0000, 0xf8ff },
- { MDIO_MMD_PHYXS, MV88E6393X_SERDES_POC,
- MV88E6393X_SERDES_POC_RESET, MV88E6393X_SERDES_POC_RESET },
- };
- int err, i;
- u16 reg;
-
- /* mv88e6393x family errata 5.2:
- * For optimal signal integrity the following sequence should be applied
- * to SERDES operating in 10G mode. These registers only apply to 10G
- * operation and have no effect on other speeds.
- */
- if (cmode != MV88E6393X_PORT_STS_CMODE_10GBASER &&
- cmode != MV88E6393X_PORT_STS_CMODE_USXGMII)
- return 0;
-
- for (i = 0; i < ARRAY_SIZE(fixes); ++i) {
- err = mv88e6390_serdes_read(chip, lane, fixes[i].dev,
- fixes[i].reg, &reg);
- if (err)
- return err;
-
- reg &= ~fixes[i].mask;
- reg |= fixes[i].val;
-
- err = mv88e6390_serdes_write(chip, lane, fixes[i].dev,
- fixes[i].reg, reg);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int mv88e6393x_serdes_fix_2500basex_an(struct mv88e6xxx_chip *chip,
- int lane, u8 cmode, bool on)
-{
- u16 reg;
- int err;
-
- if (cmode != MV88E6XXX_PORT_STS_CMODE_2500BASEX)
- return 0;
-
- /* Inband AN is broken on Amethyst in 2500base-x mode when set by
- * standard mechanism (via cmode).
- * We can get around this by configuring the PCS mode to 1000base-x
- * and then writing value 0x58 to register 1e.8000. (This must be done
- * while SerDes receiver and transmitter are disabled, which is, when
- * this function is called.)
- * It seem that when we do this configuration to 2500base-x mode (by
- * changing PCS mode to 1000base-x and frequency to 3.125 GHz from
- * 1.25 GHz) and then configure to sgmii or 1000base-x, the device
- * thinks that it already has SerDes at 1.25 GHz and does not change
- * the 1e.8000 register, leaving SerDes at 3.125 GHz.
- * To avoid this, change PCS mode back to 2500base-x when disabling
- * SerDes from 2500base-x mode.
- */
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6393X_SERDES_POC, &reg);
- if (err)
- return err;
-
- reg &= ~(MV88E6393X_SERDES_POC_PCS_MASK | MV88E6393X_SERDES_POC_AN);
- if (on)
- reg |= MV88E6393X_SERDES_POC_PCS_1000BASEX |
- MV88E6393X_SERDES_POC_AN;
- else
- reg |= MV88E6393X_SERDES_POC_PCS_2500BASEX;
- reg |= MV88E6393X_SERDES_POC_RESET;
-
- err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
- MV88E6393X_SERDES_POC, reg);
- if (err)
- return err;
-
- err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_VEND1, 0x8000, 0x58);
- if (err)
- return err;
-
- return 0;
-}
-
-int mv88e6393x_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
- bool on)
-{
- u8 cmode = chip->ports[port].cmode;
- int err;
-
- if (port != 0 && port != 9 && port != 10)
- return -EOPNOTSUPP;
-
- if (on) {
- err = mv88e6393x_serdes_erratum_4_8(chip, lane);
- if (err)
- return err;
-
- err = mv88e6393x_serdes_erratum_5_2(chip, lane, cmode);
- if (err)
- return err;
-
- err = mv88e6393x_serdes_fix_2500basex_an(chip, lane, cmode,
- true);
- if (err)
- return err;
-
- err = mv88e6393x_serdes_power_lane(chip, lane, true);
- if (err)
- return err;
- }
-
- switch (cmode) {
- case MV88E6XXX_PORT_STS_CMODE_SGMII:
- case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
- case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
- err = mv88e6390_serdes_power_sgmii(chip, lane, on);
- break;
- case MV88E6393X_PORT_STS_CMODE_5GBASER:
- case MV88E6393X_PORT_STS_CMODE_10GBASER:
- case MV88E6393X_PORT_STS_CMODE_USXGMII:
- err = mv88e6390_serdes_power_10g(chip, lane, on);
- break;
- default:
- err = -EINVAL;
- break;
- }
-
- if (err)
- return err;
-
- if (!on) {
- err = mv88e6393x_serdes_power_lane(chip, lane, false);
- if (err)
- return err;
-
- err = mv88e6393x_serdes_fix_2500basex_an(chip, lane, cmode,
- false);
- }
-
- return err;
-}
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
index e245687ddb1d..aac95cab46e3 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.h
+++ b/drivers/net/dsa/mv88e6xxx/serdes.h
@@ -12,6 +12,8 @@
#include "chip.h"
+struct phylink_link_state;
+
#define MV88E6352_ADDR_SERDES 0x0f
#define MV88E6352_SERDES_PAGE_FIBER 0x01
#define MV88E6352_SERDES_IRQ 0x0b
@@ -44,6 +46,10 @@
/* 10GBASE-R and 10GBASE-X4/X2 */
#define MV88E6390_10G_CTRL1 (0x1000 + MDIO_CTRL1)
#define MV88E6390_10G_STAT1 (0x1000 + MDIO_STAT1)
+#define MV88E6390_10G_INT_ENABLE 0x9001
+#define MV88E6390_10G_INT_LINK_DOWN BIT(3)
+#define MV88E6390_10G_INT_LINK_UP BIT(2)
+#define MV88E6390_10G_INT_STATUS 0x9003
#define MV88E6393X_10G_INT_ENABLE 0x9000
#define MV88E6393X_10G_INT_LINK_CHANGE BIT(2)
#define MV88E6393X_10G_INT_STATUS 0x9001
@@ -107,65 +113,17 @@
#define MV88E6393X_ERRATA_4_8_REG 0xF074
#define MV88E6393X_ERRATA_4_8_BIT BIT(14)
-int mv88e6185_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
+int mv88e6xxx_pcs_decode_state(struct device *dev, u16 bmsr, u16 lpa,
+ u16 status, struct phylink_link_state *state);
+
int mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
-int mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
int mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
int mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
int mv88e6393x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
-int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
- int lane, unsigned int mode,
- phy_interface_t interface,
- const unsigned long *advertise);
-int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
- int lane, unsigned int mode,
- phy_interface_t interface,
- const unsigned long *advertise);
-int mv88e6185_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
- int lane, struct phylink_link_state *state);
-int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
- int lane, struct phylink_link_state *state);
-int mv88e6390_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
- int lane, struct phylink_link_state *state);
-int mv88e6393x_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
- int lane, struct phylink_link_state *state);
-int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
- int lane);
-int mv88e6390_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
- int lane);
-int mv88e6352_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port,
- int lane, int speed, int duplex);
-int mv88e6390_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port,
- int lane, int speed, int duplex);
unsigned int mv88e6352_serdes_irq_mapping(struct mv88e6xxx_chip *chip,
int port);
unsigned int mv88e6390_serdes_irq_mapping(struct mv88e6xxx_chip *chip,
int port);
-int mv88e6185_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
- bool up);
-int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
- bool on);
-int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
- bool on);
-int mv88e6393x_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
- bool on);
-int mv88e6393x_serdes_setup_errata(struct mv88e6xxx_chip *chip);
-int mv88e6097_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane,
- bool enable);
-int mv88e6352_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane,
- bool enable);
-int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane,
- bool enable);
-int mv88e6393x_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port,
- int lane, bool enable);
-irqreturn_t mv88e6097_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
- int lane);
-irqreturn_t mv88e6352_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
- int lane);
-irqreturn_t mv88e6390_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
- int lane);
-irqreturn_t mv88e6393x_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
- int lane);
int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port);
int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip,
int port, uint8_t *data);
@@ -195,24 +153,6 @@ static inline int mv88e6xxx_serdes_get_lane(struct mv88e6xxx_chip *chip,
return chip->info->ops->serdes_get_lane(chip, port);
}
-static inline int mv88e6xxx_serdes_power_up(struct mv88e6xxx_chip *chip,
- int port, int lane)
-{
- if (!chip->info->ops->serdes_power)
- return -EOPNOTSUPP;
-
- return chip->info->ops->serdes_power(chip, port, lane, true);
-}
-
-static inline int mv88e6xxx_serdes_power_down(struct mv88e6xxx_chip *chip,
- int port, int lane)
-{
- if (!chip->info->ops->serdes_power)
- return -EOPNOTSUPP;
-
- return chip->info->ops->serdes_power(chip, port, lane, false);
-}
-
static inline unsigned int
mv88e6xxx_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port)
{
@@ -222,31 +162,9 @@ mv88e6xxx_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port)
return chip->info->ops->serdes_irq_mapping(chip, port);
}
-static inline int mv88e6xxx_serdes_irq_enable(struct mv88e6xxx_chip *chip,
- int port, int lane)
-{
- if (!chip->info->ops->serdes_irq_enable)
- return -EOPNOTSUPP;
-
- return chip->info->ops->serdes_irq_enable(chip, port, lane, true);
-}
-
-static inline int mv88e6xxx_serdes_irq_disable(struct mv88e6xxx_chip *chip,
- int port, int lane)
-{
- if (!chip->info->ops->serdes_irq_enable)
- return -EOPNOTSUPP;
-
- return chip->info->ops->serdes_irq_enable(chip, port, lane, false);
-}
-
-static inline irqreturn_t
-mv88e6xxx_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, int lane)
-{
- if (!chip->info->ops->serdes_irq_status)
- return IRQ_NONE;
-
- return chip->info->ops->serdes_irq_status(chip, port, lane);
-}
+extern const struct mv88e6xxx_pcs_ops mv88e6185_pcs_ops;
+extern const struct mv88e6xxx_pcs_ops mv88e6352_pcs_ops;
+extern const struct mv88e6xxx_pcs_ops mv88e6390_pcs_ops;
+extern const struct mv88e6xxx_pcs_ops mv88e6393x_pcs_ops;
#endif
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 8da46d284e35..fd7eb4a52918 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -1042,12 +1042,6 @@ static void felix_phylink_get_caps(struct dsa_switch *ds, int port,
{
struct ocelot *ocelot = ds->priv;
- /* This driver does not make use of the speed, duplex, pause or the
- * advertisement in its mac_config, so it is safe to mark this driver
- * as non-legacy.
- */
- config->legacy_pre_march2020 = false;
-
config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000FD |
MAC_2500FD;
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 1c113957fcf4..dce3548dcd05 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -16,6 +16,7 @@
#include <net/pkt_sched.h>
#include <linux/iopoll.h>
#include <linux/mdio.h>
+#include <linux/of.h>
#include <linux/pci.h>
#include <linux/time.h>
#include "felix.h"
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index 15003b2af264..8f912bda120b 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -2,13 +2,14 @@
/* Distributed Switch Architecture VSC9953 driver
* Copyright (C) 2020, Maxim Kochetkov <[email protected]>
*/
+#include <linux/platform_device.h>
#include <linux/types.h>
#include <soc/mscc/ocelot_vcap.h>
#include <soc/mscc/ocelot_sys.h>
#include <soc/mscc/ocelot.h>
#include <linux/mdio/mdio-mscc-miim.h>
+#include <linux/mod_devicetable.h>
#include <linux/of_mdio.h>
-#include <linux/of_platform.h>
#include <linux/pcs-lynx.h>
#include <linux/dsa/ocelot.h>
#include <linux/iopoll.h>
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
index 3b0937031499..8d9d271ac3af 100644
--- a/drivers/net/dsa/qca/ar9331.c
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -1012,7 +1012,7 @@ static const struct regmap_config ar9331_mdio_regmap_config = {
.wr_table = &ar9331_register_set,
.rd_table = &ar9331_register_set,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static struct regmap_bus ar9331_sw_bus = {
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index efe9380d4a15..47b9f5bf98fb 100644
--- a/drivers/net/dsa/qca/qca8k-8xxx.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -1400,8 +1400,6 @@ static void qca8k_phylink_get_caps(struct dsa_switch *ds, int port,
config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000FD;
-
- config->legacy_pre_march2020 = false;
}
static void
diff --git a/drivers/net/dsa/qca/qca8k-leds.c b/drivers/net/dsa/qca/qca8k-leds.c
index 1261e0bb21ef..e8c16e76e34b 100644
--- a/drivers/net/dsa/qca/qca8k-leds.c
+++ b/drivers/net/dsa/qca/qca8k-leds.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/property.h>
#include <linux/regmap.h>
#include <net/dsa.h>
diff --git a/drivers/net/dsa/realtek/realtek-mdio.c b/drivers/net/dsa/realtek/realtek-mdio.c
index 5a8fe707ca25..4310e7793e58 100644
--- a/drivers/net/dsa/realtek/realtek-mdio.c
+++ b/drivers/net/dsa/realtek/realtek-mdio.c
@@ -20,7 +20,7 @@
*/
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/overflow.h>
#include <linux/regmap.h>
diff --git a/drivers/net/dsa/realtek/realtek-smi.c b/drivers/net/dsa/realtek/realtek-smi.c
index 1b447d96b9c4..c2bd8bb6c9c2 100644
--- a/drivers/net/dsa/realtek/realtek-smi.c
+++ b/drivers/net/dsa/realtek/realtek-smi.c
@@ -31,7 +31,6 @@
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_mdio.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 3529a565b4aa..331bb1c6676a 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -15,7 +15,6 @@
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
-#include <linux/of_device.h>
#include <linux/pcs/pcs-xpcs.h>
#include <linux/netdev_features.h>
#include <linux/netdevice.h>
@@ -1396,12 +1395,6 @@ static void sja1105_phylink_get_caps(struct dsa_switch *ds, int port,
struct sja1105_xmii_params_entry *mii;
phy_interface_t phy_mode;
- /* This driver does not make use of the speed, duplex, pause or the
- * advertisement in its mac_config, so it is safe to mark this driver
- * as non-legacy.
- */
- config->legacy_pre_march2020 = false;
-
phy_mode = priv->phy_mode[port];
if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
phy_mode == PHY_INTERFACE_MODE_2500BASEX) {
diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
index ef1a4a7c47b2..4f09e7438f3b 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
@@ -18,7 +18,6 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_mdio.h>
#include <linux/bitops.h>
#include <linux/if_bridge.h>
diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c
index fa622639d640..753fef757f11 100644
--- a/drivers/net/dsa/xrs700x/xrs700x.c
+++ b/drivers/net/dsa/xrs700x/xrs700x.c
@@ -7,7 +7,7 @@
#include <net/dsa.h>
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/netdev_features.h>
#include <linux/if_hsr.h>
#include "xrs700x.h"
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
index 5dfc751572ed..220400a633f5 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
@@ -93,7 +93,7 @@ static u32 hw_atl2_sem_act_rslvr_get(struct aq_hw_s *self)
static int hw_atl2_hw_reset(struct aq_hw_s *self)
{
- struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ struct hw_atl2_priv *priv = self->priv;
int err;
err = hw_atl2_utils_soft_reset(self);
@@ -378,8 +378,8 @@ static int hw_atl2_hw_init_tx_path(struct aq_hw_s *self)
static void hw_atl2_hw_init_new_rx_filters(struct aq_hw_s *self)
{
- struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
u8 *prio_tc_map = self->aq_nic_cfg->prio_tc_map;
+ struct hw_atl2_priv *priv = self->priv;
u16 action;
u8 index;
int i;
@@ -433,7 +433,7 @@ static void hw_atl2_hw_new_rx_filter_vlan_promisc(struct aq_hw_s *self,
u16 off_action = (!promisc &&
!hw_atl_rpfl2promiscuous_mode_en_get(self)) ?
HW_ATL2_ACTION_DROP : HW_ATL2_ACTION_DISABLE;
- struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ struct hw_atl2_priv *priv = self->priv;
u8 index;
index = priv->art_base_index + HW_ATL2_RPF_VLAN_PROMISC_OFF_INDEX;
@@ -445,7 +445,7 @@ static void hw_atl2_hw_new_rx_filter_vlan_promisc(struct aq_hw_s *self,
static void hw_atl2_hw_new_rx_filter_promisc(struct aq_hw_s *self, bool promisc)
{
u16 off_action = promisc ? HW_ATL2_ACTION_DISABLE : HW_ATL2_ACTION_DROP;
- struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ struct hw_atl2_priv *priv = self->priv;
bool vlan_promisc_enable;
u8 index;
@@ -539,8 +539,8 @@ static int hw_atl2_hw_init(struct aq_hw_s *self, const u8 *mac_addr)
[AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U },
};
- struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
+ struct hw_atl2_priv *priv = self->priv;
u8 base_index, count;
int err;
@@ -770,7 +770,7 @@ static struct aq_stats_s *hw_atl2_utils_get_hw_stats(struct aq_hw_s *self)
static int hw_atl2_hw_vlan_set(struct aq_hw_s *self,
struct aq_rx_filter_vlan *aq_vlans)
{
- struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ struct hw_atl2_priv *priv = self->priv;
u32 queue;
u8 index;
int i;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
index 674683b54304..52e2070a4a2f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
@@ -413,8 +413,8 @@ do { \
static int aq_a2_fw_update_stats(struct aq_hw_s *self)
{
- struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
struct aq_stats_s *cs = &self->curr_stats;
+ struct hw_atl2_priv *priv = self->priv;
struct statistics_s stats;
struct version_s version;
int err;
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 948586bf1b5b..75ca3ddda1f5 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -255,4 +255,16 @@ config BNXT_HWMON
Say Y if you want to expose the thermal sensor data on NetXtreme-C/E
devices, via the hwmon sysfs interface.
+config BCMASP
+ tristate "Broadcom ASP 2.0 Ethernet support"
+ depends on ARCH_BRCMSTB || COMPILE_TEST
+ default ARCH_BRCMSTB
+ depends on OF
+ select MII
+ select PHYLIB
+ select MDIO_BCM_UNIMAC
+ help
+ This configuration enables the Broadcom ASP 2.0 Ethernet controller
+ driver which is present in Broadcom STB SoCs such as 72165.
+
endif # NET_VENDOR_BROADCOM
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index 0ddfb5b5d53c..bac5cb6ad0cd 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -17,3 +17,4 @@ obj-$(CONFIG_BGMAC_BCMA) += bgmac-bcma.o bgmac-bcma-mdio.o
obj-$(CONFIG_BGMAC_PLATFORM) += bgmac-platform.o
obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o
obj-$(CONFIG_BNXT) += bnxt/
+obj-$(CONFIG_BCMASP) += asp2/
diff --git a/drivers/net/ethernet/broadcom/asp2/Makefile b/drivers/net/ethernet/broadcom/asp2/Makefile
new file mode 100644
index 000000000000..e07550315f83
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/asp2/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_BCMASP) += bcm-asp.o
+bcm-asp-objs := bcmasp.o bcmasp_intf.o bcmasp_ethtool.o
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
new file mode 100644
index 000000000000..a9984efff6d1
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
@@ -0,0 +1,1437 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Broadcom STB ASP 2.0 Driver
+ *
+ * Copyright (c) 2023 Broadcom
+ */
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/clk.h>
+
+#include "bcmasp.h"
+#include "bcmasp_intf_defs.h"
+
+static void _intr2_mask_clear(struct bcmasp_priv *priv, u32 mask)
+{
+ intr2_core_wl(priv, mask, ASP_INTR2_MASK_CLEAR);
+ priv->irq_mask &= ~mask;
+}
+
+static void _intr2_mask_set(struct bcmasp_priv *priv, u32 mask)
+{
+ intr2_core_wl(priv, mask, ASP_INTR2_MASK_SET);
+ priv->irq_mask |= mask;
+}
+
+void bcmasp_enable_tx_irq(struct bcmasp_intf *intf, int en)
+{
+ struct bcmasp_priv *priv = intf->parent;
+
+ if (en)
+ _intr2_mask_clear(priv, ASP_INTR2_TX_DESC(intf->channel));
+ else
+ _intr2_mask_set(priv, ASP_INTR2_TX_DESC(intf->channel));
+}
+EXPORT_SYMBOL_GPL(bcmasp_enable_tx_irq);
+
+void bcmasp_enable_rx_irq(struct bcmasp_intf *intf, int en)
+{
+ struct bcmasp_priv *priv = intf->parent;
+
+ if (en)
+ _intr2_mask_clear(priv, ASP_INTR2_RX_ECH(intf->channel));
+ else
+ _intr2_mask_set(priv, ASP_INTR2_RX_ECH(intf->channel));
+}
+EXPORT_SYMBOL_GPL(bcmasp_enable_rx_irq);
+
+static void bcmasp_intr2_mask_set_all(struct bcmasp_priv *priv)
+{
+ _intr2_mask_set(priv, 0xffffffff);
+ priv->irq_mask = 0xffffffff;
+}
+
+static void bcmasp_intr2_clear_all(struct bcmasp_priv *priv)
+{
+ intr2_core_wl(priv, 0xffffffff, ASP_INTR2_CLEAR);
+}
+
+static void bcmasp_intr2_handling(struct bcmasp_intf *intf, u32 status)
+{
+ if (status & ASP_INTR2_RX_ECH(intf->channel)) {
+ if (likely(napi_schedule_prep(&intf->rx_napi))) {
+ bcmasp_enable_rx_irq(intf, 0);
+ __napi_schedule_irqoff(&intf->rx_napi);
+ }
+ }
+
+ if (status & ASP_INTR2_TX_DESC(intf->channel)) {
+ if (likely(napi_schedule_prep(&intf->tx_napi))) {
+ bcmasp_enable_tx_irq(intf, 0);
+ __napi_schedule_irqoff(&intf->tx_napi);
+ }
+ }
+}
+
+static irqreturn_t bcmasp_isr(int irq, void *data)
+{
+ struct bcmasp_priv *priv = data;
+ struct bcmasp_intf *intf;
+ u32 status;
+
+ status = intr2_core_rl(priv, ASP_INTR2_STATUS) &
+ ~intr2_core_rl(priv, ASP_INTR2_MASK_STATUS);
+
+ intr2_core_wl(priv, status, ASP_INTR2_CLEAR);
+
+ if (unlikely(status == 0)) {
+ dev_warn(&priv->pdev->dev, "l2 spurious interrupt\n");
+ return IRQ_NONE;
+ }
+
+ /* Handle intferfaces */
+ list_for_each_entry(intf, &priv->intfs, list)
+ bcmasp_intr2_handling(intf, status);
+
+ return IRQ_HANDLED;
+}
+
+void bcmasp_flush_rx_port(struct bcmasp_intf *intf)
+{
+ struct bcmasp_priv *priv = intf->parent;
+ u32 mask;
+
+ switch (intf->port) {
+ case 0:
+ mask = ASP_CTRL_UMAC0_FLUSH_MASK;
+ break;
+ case 1:
+ mask = ASP_CTRL_UMAC1_FLUSH_MASK;
+ break;
+ case 2:
+ mask = ASP_CTRL_SPB_FLUSH_MASK;
+ break;
+ default:
+ /* Not valid port */
+ return;
+ }
+
+ rx_ctrl_core_wl(priv, mask, priv->hw_info->rx_ctrl_flush);
+}
+
+static void bcmasp_netfilt_hw_en_wake(struct bcmasp_priv *priv,
+ struct bcmasp_net_filter *nfilt)
+{
+ rx_filter_core_wl(priv, ASP_RX_FILTER_NET_OFFSET_L3_1(64),
+ ASP_RX_FILTER_NET_OFFSET(nfilt->hw_index));
+
+ rx_filter_core_wl(priv, ASP_RX_FILTER_NET_OFFSET_L2(32) |
+ ASP_RX_FILTER_NET_OFFSET_L3_0(32) |
+ ASP_RX_FILTER_NET_OFFSET_L3_1(96) |
+ ASP_RX_FILTER_NET_OFFSET_L4(32),
+ ASP_RX_FILTER_NET_OFFSET(nfilt->hw_index + 1));
+
+ rx_filter_core_wl(priv, ASP_RX_FILTER_NET_CFG_CH(nfilt->port + 8) |
+ ASP_RX_FILTER_NET_CFG_EN |
+ ASP_RX_FILTER_NET_CFG_L2_EN |
+ ASP_RX_FILTER_NET_CFG_L3_EN |
+ ASP_RX_FILTER_NET_CFG_L4_EN |
+ ASP_RX_FILTER_NET_CFG_L3_FRM(2) |
+ ASP_RX_FILTER_NET_CFG_L4_FRM(2) |
+ ASP_RX_FILTER_NET_CFG_UMC(nfilt->port),
+ ASP_RX_FILTER_NET_CFG(nfilt->hw_index));
+
+ rx_filter_core_wl(priv, ASP_RX_FILTER_NET_CFG_CH(nfilt->port + 8) |
+ ASP_RX_FILTER_NET_CFG_EN |
+ ASP_RX_FILTER_NET_CFG_L2_EN |
+ ASP_RX_FILTER_NET_CFG_L3_EN |
+ ASP_RX_FILTER_NET_CFG_L4_EN |
+ ASP_RX_FILTER_NET_CFG_L3_FRM(2) |
+ ASP_RX_FILTER_NET_CFG_L4_FRM(2) |
+ ASP_RX_FILTER_NET_CFG_UMC(nfilt->port),
+ ASP_RX_FILTER_NET_CFG(nfilt->hw_index + 1));
+}
+
+#define MAX_WAKE_FILTER_SIZE 256
+enum asp_netfilt_reg_type {
+ ASP_NETFILT_MATCH = 0,
+ ASP_NETFILT_MASK,
+ ASP_NETFILT_MAX
+};
+
+static int bcmasp_netfilt_get_reg_offset(struct bcmasp_priv *priv,
+ struct bcmasp_net_filter *nfilt,
+ enum asp_netfilt_reg_type reg_type,
+ u32 offset)
+{
+ u32 block_index, filter_sel;
+
+ if (offset < 32) {
+ block_index = ASP_RX_FILTER_NET_L2;
+ filter_sel = nfilt->hw_index;
+ } else if (offset < 64) {
+ block_index = ASP_RX_FILTER_NET_L2;
+ filter_sel = nfilt->hw_index + 1;
+ } else if (offset < 96) {
+ block_index = ASP_RX_FILTER_NET_L3_0;
+ filter_sel = nfilt->hw_index;
+ } else if (offset < 128) {
+ block_index = ASP_RX_FILTER_NET_L3_0;
+ filter_sel = nfilt->hw_index + 1;
+ } else if (offset < 160) {
+ block_index = ASP_RX_FILTER_NET_L3_1;
+ filter_sel = nfilt->hw_index;
+ } else if (offset < 192) {
+ block_index = ASP_RX_FILTER_NET_L3_1;
+ filter_sel = nfilt->hw_index + 1;
+ } else if (offset < 224) {
+ block_index = ASP_RX_FILTER_NET_L4;
+ filter_sel = nfilt->hw_index;
+ } else if (offset < 256) {
+ block_index = ASP_RX_FILTER_NET_L4;
+ filter_sel = nfilt->hw_index + 1;
+ } else {
+ return -EINVAL;
+ }
+
+ switch (reg_type) {
+ case ASP_NETFILT_MATCH:
+ return ASP_RX_FILTER_NET_PAT(filter_sel, block_index,
+ (offset % 32));
+ case ASP_NETFILT_MASK:
+ return ASP_RX_FILTER_NET_MASK(filter_sel, block_index,
+ (offset % 32));
+ default:
+ return -EINVAL;
+ }
+}
+
+static void bcmasp_netfilt_wr(struct bcmasp_priv *priv,
+ struct bcmasp_net_filter *nfilt,
+ enum asp_netfilt_reg_type reg_type,
+ u32 val, u32 offset)
+{
+ int reg_offset;
+
+ /* HW only accepts 4 byte aligned writes */
+ if (!IS_ALIGNED(offset, 4) || offset > MAX_WAKE_FILTER_SIZE)
+ return;
+
+ reg_offset = bcmasp_netfilt_get_reg_offset(priv, nfilt, reg_type,
+ offset);
+
+ rx_filter_core_wl(priv, val, reg_offset);
+}
+
+static u32 bcmasp_netfilt_rd(struct bcmasp_priv *priv,
+ struct bcmasp_net_filter *nfilt,
+ enum asp_netfilt_reg_type reg_type,
+ u32 offset)
+{
+ int reg_offset;
+
+ /* HW only accepts 4 byte aligned writes */
+ if (!IS_ALIGNED(offset, 4) || offset > MAX_WAKE_FILTER_SIZE)
+ return 0;
+
+ reg_offset = bcmasp_netfilt_get_reg_offset(priv, nfilt, reg_type,
+ offset);
+
+ return rx_filter_core_rl(priv, reg_offset);
+}
+
+static int bcmasp_netfilt_wr_m_wake(struct bcmasp_priv *priv,
+ struct bcmasp_net_filter *nfilt,
+ u32 offset, void *match, void *mask,
+ size_t size)
+{
+ u32 shift, mask_val = 0, match_val = 0;
+ bool first_byte = true;
+
+ if ((offset + size) > MAX_WAKE_FILTER_SIZE)
+ return -EINVAL;
+
+ while (size--) {
+ /* The HW only accepts 4 byte aligned writes, so if we
+ * begin unaligned or if remaining bytes less than 4,
+ * we need to read then write to avoid losing current
+ * register state
+ */
+ if (first_byte && (!IS_ALIGNED(offset, 4) || size < 3)) {
+ match_val = bcmasp_netfilt_rd(priv, nfilt,
+ ASP_NETFILT_MATCH,
+ ALIGN_DOWN(offset, 4));
+ mask_val = bcmasp_netfilt_rd(priv, nfilt,
+ ASP_NETFILT_MASK,
+ ALIGN_DOWN(offset, 4));
+ }
+
+ shift = (3 - (offset % 4)) * 8;
+ match_val &= ~GENMASK(shift + 7, shift);
+ mask_val &= ~GENMASK(shift + 7, shift);
+ match_val |= (u32)(*((u8 *)match) << shift);
+ mask_val |= (u32)(*((u8 *)mask) << shift);
+
+ /* If last byte or last byte of word, write to reg */
+ if (!size || ((offset % 4) == 3)) {
+ bcmasp_netfilt_wr(priv, nfilt, ASP_NETFILT_MATCH,
+ match_val, ALIGN_DOWN(offset, 4));
+ bcmasp_netfilt_wr(priv, nfilt, ASP_NETFILT_MASK,
+ mask_val, ALIGN_DOWN(offset, 4));
+ first_byte = true;
+ } else {
+ first_byte = false;
+ }
+
+ offset++;
+ match++;
+ mask++;
+ }
+
+ return 0;
+}
+
+static void bcmasp_netfilt_reset_hw(struct bcmasp_priv *priv,
+ struct bcmasp_net_filter *nfilt)
+{
+ int i;
+
+ for (i = 0; i < MAX_WAKE_FILTER_SIZE; i += 4) {
+ bcmasp_netfilt_wr(priv, nfilt, ASP_NETFILT_MATCH, 0, i);
+ bcmasp_netfilt_wr(priv, nfilt, ASP_NETFILT_MASK, 0, i);
+ }
+}
+
+static void bcmasp_netfilt_tcpip4_wr(struct bcmasp_priv *priv,
+ struct bcmasp_net_filter *nfilt,
+ struct ethtool_tcpip4_spec *match,
+ struct ethtool_tcpip4_spec *mask,
+ u32 offset)
+{
+ __be16 val_16, mask_16;
+
+ val_16 = htons(ETH_P_IP);
+ mask_16 = htons(0xFFFF);
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2) + offset,
+ &val_16, &mask_16, sizeof(val_16));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 1,
+ &match->tos, &mask->tos,
+ sizeof(match->tos));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 12,
+ &match->ip4src, &mask->ip4src,
+ sizeof(match->ip4src));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 16,
+ &match->ip4dst, &mask->ip4dst,
+ sizeof(match->ip4dst));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 20,
+ &match->psrc, &mask->psrc,
+ sizeof(match->psrc));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 22,
+ &match->pdst, &mask->pdst,
+ sizeof(match->pdst));
+}
+
+static void bcmasp_netfilt_tcpip6_wr(struct bcmasp_priv *priv,
+ struct bcmasp_net_filter *nfilt,
+ struct ethtool_tcpip6_spec *match,
+ struct ethtool_tcpip6_spec *mask,
+ u32 offset)
+{
+ __be16 val_16, mask_16;
+
+ val_16 = htons(ETH_P_IPV6);
+ mask_16 = htons(0xFFFF);
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2) + offset,
+ &val_16, &mask_16, sizeof(val_16));
+ val_16 = htons(match->tclass << 4);
+ mask_16 = htons(mask->tclass << 4);
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset,
+ &val_16, &mask_16, sizeof(val_16));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 8,
+ &match->ip6src, &mask->ip6src,
+ sizeof(match->ip6src));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 24,
+ &match->ip6dst, &mask->ip6dst,
+ sizeof(match->ip6dst));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 40,
+ &match->psrc, &mask->psrc,
+ sizeof(match->psrc));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 42,
+ &match->pdst, &mask->pdst,
+ sizeof(match->pdst));
+}
+
+static int bcmasp_netfilt_wr_to_hw(struct bcmasp_priv *priv,
+ struct bcmasp_net_filter *nfilt)
+{
+ struct ethtool_rx_flow_spec *fs = &nfilt->fs;
+ unsigned int offset = 0;
+ __be16 val_16, mask_16;
+ u8 val_8, mask_8;
+
+ /* Currently only supports wake filters */
+ if (!nfilt->wake_filter)
+ return -EINVAL;
+
+ bcmasp_netfilt_reset_hw(priv, nfilt);
+
+ if (fs->flow_type & FLOW_MAC_EXT) {
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, 0, &fs->h_ext.h_dest,
+ &fs->m_ext.h_dest,
+ sizeof(fs->h_ext.h_dest));
+ }
+
+ if ((fs->flow_type & FLOW_EXT) &&
+ (fs->m_ext.vlan_etype || fs->m_ext.vlan_tci)) {
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2),
+ &fs->h_ext.vlan_etype,
+ &fs->m_ext.vlan_etype,
+ sizeof(fs->h_ext.vlan_etype));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ((ETH_ALEN * 2) + 2),
+ &fs->h_ext.vlan_tci,
+ &fs->m_ext.vlan_tci,
+ sizeof(fs->h_ext.vlan_tci));
+ offset += VLAN_HLEN;
+ }
+
+ switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+ case ETHER_FLOW:
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, 0,
+ &fs->h_u.ether_spec.h_dest,
+ &fs->m_u.ether_spec.h_dest,
+ sizeof(fs->h_u.ether_spec.h_dest));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_ALEN,
+ &fs->h_u.ether_spec.h_source,
+ &fs->m_u.ether_spec.h_source,
+ sizeof(fs->h_u.ether_spec.h_source));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2) + offset,
+ &fs->h_u.ether_spec.h_proto,
+ &fs->m_u.ether_spec.h_proto,
+ sizeof(fs->h_u.ether_spec.h_proto));
+
+ break;
+ case IP_USER_FLOW:
+ val_16 = htons(ETH_P_IP);
+ mask_16 = htons(0xFFFF);
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2) + offset,
+ &val_16, &mask_16, sizeof(val_16));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 1,
+ &fs->h_u.usr_ip4_spec.tos,
+ &fs->m_u.usr_ip4_spec.tos,
+ sizeof(fs->h_u.usr_ip4_spec.tos));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 9,
+ &fs->h_u.usr_ip4_spec.proto,
+ &fs->m_u.usr_ip4_spec.proto,
+ sizeof(fs->h_u.usr_ip4_spec.proto));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 12,
+ &fs->h_u.usr_ip4_spec.ip4src,
+ &fs->m_u.usr_ip4_spec.ip4src,
+ sizeof(fs->h_u.usr_ip4_spec.ip4src));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 16,
+ &fs->h_u.usr_ip4_spec.ip4dst,
+ &fs->m_u.usr_ip4_spec.ip4dst,
+ sizeof(fs->h_u.usr_ip4_spec.ip4dst));
+ if (!fs->m_u.usr_ip4_spec.l4_4_bytes)
+ break;
+
+ /* Only supports 20 byte IPv4 header */
+ val_8 = 0x45;
+ mask_8 = 0xFF;
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset,
+ &val_8, &mask_8, sizeof(val_8));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt,
+ ETH_HLEN + 20 + offset,
+ &fs->h_u.usr_ip4_spec.l4_4_bytes,
+ &fs->m_u.usr_ip4_spec.l4_4_bytes,
+ sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes)
+ );
+ break;
+ case TCP_V4_FLOW:
+ val_8 = IPPROTO_TCP;
+ mask_8 = 0xFF;
+ bcmasp_netfilt_tcpip4_wr(priv, nfilt, &fs->h_u.tcp_ip4_spec,
+ &fs->m_u.tcp_ip4_spec, offset);
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 9,
+ &val_8, &mask_8, sizeof(val_8));
+ break;
+ case UDP_V4_FLOW:
+ val_8 = IPPROTO_UDP;
+ mask_8 = 0xFF;
+ bcmasp_netfilt_tcpip4_wr(priv, nfilt, &fs->h_u.udp_ip4_spec,
+ &fs->m_u.udp_ip4_spec, offset);
+
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 9,
+ &val_8, &mask_8, sizeof(val_8));
+ break;
+ case TCP_V6_FLOW:
+ val_8 = IPPROTO_TCP;
+ mask_8 = 0xFF;
+ bcmasp_netfilt_tcpip6_wr(priv, nfilt, &fs->h_u.tcp_ip6_spec,
+ &fs->m_u.tcp_ip6_spec, offset);
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 6,
+ &val_8, &mask_8, sizeof(val_8));
+ break;
+ case UDP_V6_FLOW:
+ val_8 = IPPROTO_UDP;
+ mask_8 = 0xFF;
+ bcmasp_netfilt_tcpip6_wr(priv, nfilt, &fs->h_u.udp_ip6_spec,
+ &fs->m_u.udp_ip6_spec, offset);
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 6,
+ &val_8, &mask_8, sizeof(val_8));
+ break;
+ }
+
+ bcmasp_netfilt_hw_en_wake(priv, nfilt);
+
+ return 0;
+}
+
+void bcmasp_netfilt_suspend(struct bcmasp_intf *intf)
+{
+ struct bcmasp_priv *priv = intf->parent;
+ bool write = false;
+ int ret, i;
+
+ /* Write all filters to HW */
+ for (i = 0; i < NUM_NET_FILTERS; i++) {
+ /* If the filter does not match the port, skip programming. */
+ if (!priv->net_filters[i].claimed ||
+ priv->net_filters[i].port != intf->port)
+ continue;
+
+ if (i > 0 && (i % 2) &&
+ priv->net_filters[i].wake_filter &&
+ priv->net_filters[i - 1].wake_filter)
+ continue;
+
+ ret = bcmasp_netfilt_wr_to_hw(priv, &priv->net_filters[i]);
+ if (!ret)
+ write = true;
+ }
+
+ /* Successfully programmed at least one wake filter
+ * so enable top level wake config
+ */
+ if (write)
+ rx_filter_core_wl(priv, (ASP_RX_FILTER_OPUT_EN |
+ ASP_RX_FILTER_LNR_MD |
+ ASP_RX_FILTER_GEN_WK_EN |
+ ASP_RX_FILTER_NT_FLT_EN),
+ ASP_RX_FILTER_BLK_CTRL);
+}
+
+void bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
+ u32 *rule_cnt)
+{
+ struct bcmasp_priv *priv = intf->parent;
+ int j = 0, i;
+
+ for (i = 0; i < NUM_NET_FILTERS; i++) {
+ if (!priv->net_filters[i].claimed ||
+ priv->net_filters[i].port != intf->port)
+ continue;
+
+ if (i > 0 && (i % 2) &&
+ priv->net_filters[i].wake_filter &&
+ priv->net_filters[i - 1].wake_filter)
+ continue;
+
+ rule_locs[j++] = priv->net_filters[i].fs.location;
+ }
+
+ *rule_cnt = j;
+}
+
+int bcmasp_netfilt_get_active(struct bcmasp_intf *intf)
+{
+ struct bcmasp_priv *priv = intf->parent;
+ int cnt = 0, i;
+
+ for (i = 0; i < NUM_NET_FILTERS; i++) {
+ if (!priv->net_filters[i].claimed ||
+ priv->net_filters[i].port != intf->port)
+ continue;
+
+ /* Skip over a wake filter pair */
+ if (i > 0 && (i % 2) &&
+ priv->net_filters[i].wake_filter &&
+ priv->net_filters[i - 1].wake_filter)
+ continue;
+
+ cnt++;
+ }
+
+ return cnt;
+}
+
+bool bcmasp_netfilt_check_dup(struct bcmasp_intf *intf,
+ struct ethtool_rx_flow_spec *fs)
+{
+ struct bcmasp_priv *priv = intf->parent;
+ struct ethtool_rx_flow_spec *cur;
+ size_t fs_size = 0;
+ int i;
+
+ for (i = 0; i < NUM_NET_FILTERS; i++) {
+ if (!priv->net_filters[i].claimed ||
+ priv->net_filters[i].port != intf->port)
+ continue;
+
+ cur = &priv->net_filters[i].fs;
+
+ if (cur->flow_type != fs->flow_type ||
+ cur->ring_cookie != fs->ring_cookie)
+ continue;
+
+ switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+ case ETHER_FLOW:
+ fs_size = sizeof(struct ethhdr);
+ break;
+ case IP_USER_FLOW:
+ fs_size = sizeof(struct ethtool_usrip4_spec);
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ fs_size = sizeof(struct ethtool_tcpip6_spec);
+ break;
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ fs_size = sizeof(struct ethtool_tcpip4_spec);
+ break;
+ default:
+ continue;
+ }
+
+ if (memcmp(&cur->h_u, &fs->h_u, fs_size) ||
+ memcmp(&cur->m_u, &fs->m_u, fs_size))
+ continue;
+
+ if (cur->flow_type & FLOW_EXT) {
+ if (cur->h_ext.vlan_etype != fs->h_ext.vlan_etype ||
+ cur->m_ext.vlan_etype != fs->m_ext.vlan_etype ||
+ cur->h_ext.vlan_tci != fs->h_ext.vlan_tci ||
+ cur->m_ext.vlan_tci != fs->m_ext.vlan_tci ||
+ cur->h_ext.data[0] != fs->h_ext.data[0])
+ continue;
+ }
+ if (cur->flow_type & FLOW_MAC_EXT) {
+ if (memcmp(&cur->h_ext.h_dest,
+ &fs->h_ext.h_dest, ETH_ALEN) ||
+ memcmp(&cur->m_ext.h_dest,
+ &fs->m_ext.h_dest, ETH_ALEN))
+ continue;
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
+/* If no network filter found, return open filter.
+ * If no more open filters return NULL
+ */
+struct bcmasp_net_filter *bcmasp_netfilt_get_init(struct bcmasp_intf *intf,
+ int loc, bool wake_filter,
+ bool init)
+{
+ struct bcmasp_net_filter *nfilter = NULL;
+ struct bcmasp_priv *priv = intf->parent;
+ int i, open_index = -1;
+
+ /* Check whether we exceed the filter table capacity */
+ if (loc != RX_CLS_LOC_ANY && loc >= NUM_NET_FILTERS)
+ return ERR_PTR(-EINVAL);
+
+ /* If the filter location is busy (already claimed) and we are initializing
+ * the filter (insertion), return a busy error code.
+ */
+ if (loc != RX_CLS_LOC_ANY && init && priv->net_filters[loc].claimed)
+ return ERR_PTR(-EBUSY);
+
+ /* We need two filters for wake-up, so we cannot use an odd filter */
+ if (wake_filter && loc != RX_CLS_LOC_ANY && (loc % 2))
+ return ERR_PTR(-EINVAL);
+
+ /* Initialize the loop index based on the desired location or from 0 */
+ i = loc == RX_CLS_LOC_ANY ? 0 : loc;
+
+ for ( ; i < NUM_NET_FILTERS; i++) {
+ /* Found matching network filter */
+ if (!init &&
+ priv->net_filters[i].claimed &&
+ priv->net_filters[i].hw_index == i &&
+ priv->net_filters[i].port == intf->port)
+ return &priv->net_filters[i];
+
+ /* If we don't need a new filter or new filter already found */
+ if (!init || open_index >= 0)
+ continue;
+
+ /* Wake filter conslidates two filters to cover more bytes
+ * Wake filter is open if...
+ * 1. It is an even filter
+ * 2. The current and next filter is not claimed
+ */
+ if (wake_filter && !(i % 2) && !priv->net_filters[i].claimed &&
+ !priv->net_filters[i + 1].claimed)
+ open_index = i;
+ else if (!priv->net_filters[i].claimed)
+ open_index = i;
+ }
+
+ if (open_index >= 0) {
+ nfilter = &priv->net_filters[open_index];
+ nfilter->claimed = true;
+ nfilter->port = intf->port;
+ nfilter->hw_index = open_index;
+ }
+
+ if (wake_filter && open_index >= 0) {
+ /* Claim next filter */
+ priv->net_filters[open_index + 1].claimed = true;
+ priv->net_filters[open_index + 1].wake_filter = true;
+ nfilter->wake_filter = true;
+ }
+
+ return nfilter ? nfilter : ERR_PTR(-EINVAL);
+}
+
+void bcmasp_netfilt_release(struct bcmasp_intf *intf,
+ struct bcmasp_net_filter *nfilt)
+{
+ struct bcmasp_priv *priv = intf->parent;
+
+ if (nfilt->wake_filter) {
+ memset(&priv->net_filters[nfilt->hw_index + 1], 0,
+ sizeof(struct bcmasp_net_filter));
+ }
+
+ memset(nfilt, 0, sizeof(struct bcmasp_net_filter));
+}
+
+static void bcmasp_addr_to_uint(unsigned char *addr, u32 *high, u32 *low)
+{
+ *high = (u32)(addr[0] << 8 | addr[1]);
+ *low = (u32)(addr[2] << 24 | addr[3] << 16 | addr[4] << 8 |
+ addr[5]);
+}
+
+static void bcmasp_set_mda_filter(struct bcmasp_intf *intf,
+ const unsigned char *addr,
+ unsigned char *mask,
+ unsigned int i)
+{
+ struct bcmasp_priv *priv = intf->parent;
+ u32 addr_h, addr_l, mask_h, mask_l;
+
+ /* Set local copy */
+ ether_addr_copy(priv->mda_filters[i].mask, mask);
+ ether_addr_copy(priv->mda_filters[i].addr, addr);
+
+ /* Write to HW */
+ bcmasp_addr_to_uint(priv->mda_filters[i].mask, &mask_h, &mask_l);
+ bcmasp_addr_to_uint(priv->mda_filters[i].addr, &addr_h, &addr_l);
+ rx_filter_core_wl(priv, addr_h, ASP_RX_FILTER_MDA_PAT_H(i));
+ rx_filter_core_wl(priv, addr_l, ASP_RX_FILTER_MDA_PAT_L(i));
+ rx_filter_core_wl(priv, mask_h, ASP_RX_FILTER_MDA_MSK_H(i));
+ rx_filter_core_wl(priv, mask_l, ASP_RX_FILTER_MDA_MSK_L(i));
+}
+
+static void bcmasp_en_mda_filter(struct bcmasp_intf *intf, bool en,
+ unsigned int i)
+{
+ struct bcmasp_priv *priv = intf->parent;
+
+ if (priv->mda_filters[i].en == en)
+ return;
+
+ priv->mda_filters[i].en = en;
+ priv->mda_filters[i].port = intf->port;
+
+ rx_filter_core_wl(priv, ((intf->channel + 8) |
+ (en << ASP_RX_FILTER_MDA_CFG_EN_SHIFT) |
+ ASP_RX_FILTER_MDA_CFG_UMC_SEL(intf->port)),
+ ASP_RX_FILTER_MDA_CFG(i));
+}
+
+/* There are 32 MDA filters shared between all ports, we reserve 4 filters per
+ * port for the following.
+ * - Promisc: Filter to allow all packets when promisc is enabled
+ * - All Multicast
+ * - Broadcast
+ * - Own address
+ *
+ * The reserved filters are identified as so.
+ * - Promisc: (index * 4) + 0
+ * - All Multicast: (index * 4) + 1
+ * - Broadcast: (index * 4) + 2
+ * - Own address: (index * 4) + 3
+ */
+enum asp_rx_filter_id {
+ ASP_RX_FILTER_MDA_PROMISC = 0,
+ ASP_RX_FILTER_MDA_ALLMULTI,
+ ASP_RX_FILTER_MDA_BROADCAST,
+ ASP_RX_FILTER_MDA_OWN_ADDR,
+ ASP_RX_FILTER_MDA_RES_MAX,
+};
+
+#define ASP_RX_FILT_MDA(intf, name) (((intf)->index * \
+ ASP_RX_FILTER_MDA_RES_MAX) \
+ + ASP_RX_FILTER_MDA_##name)
+
+static int bcmasp_total_res_mda_cnt(struct bcmasp_priv *priv)
+{
+ return list_count_nodes(&priv->intfs) * ASP_RX_FILTER_MDA_RES_MAX;
+}
+
+void bcmasp_set_promisc(struct bcmasp_intf *intf, bool en)
+{
+ unsigned int i = ASP_RX_FILT_MDA(intf, PROMISC);
+ unsigned char promisc[ETH_ALEN];
+
+ eth_zero_addr(promisc);
+ /* Set mask to 00:00:00:00:00:00 to match all packets */
+ bcmasp_set_mda_filter(intf, promisc, promisc, i);
+ bcmasp_en_mda_filter(intf, en, i);
+}
+
+void bcmasp_set_allmulti(struct bcmasp_intf *intf, bool en)
+{
+ unsigned char allmulti[] = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00};
+ unsigned int i = ASP_RX_FILT_MDA(intf, ALLMULTI);
+
+ /* Set mask to 01:00:00:00:00:00 to match all multicast */
+ bcmasp_set_mda_filter(intf, allmulti, allmulti, i);
+ bcmasp_en_mda_filter(intf, en, i);
+}
+
+void bcmasp_set_broad(struct bcmasp_intf *intf, bool en)
+{
+ unsigned int i = ASP_RX_FILT_MDA(intf, BROADCAST);
+ unsigned char addr[ETH_ALEN];
+
+ eth_broadcast_addr(addr);
+ bcmasp_set_mda_filter(intf, addr, addr, i);
+ bcmasp_en_mda_filter(intf, en, i);
+}
+
+void bcmasp_set_oaddr(struct bcmasp_intf *intf, const unsigned char *addr,
+ bool en)
+{
+ unsigned char mask[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ unsigned int i = ASP_RX_FILT_MDA(intf, OWN_ADDR);
+
+ bcmasp_set_mda_filter(intf, addr, mask, i);
+ bcmasp_en_mda_filter(intf, en, i);
+}
+
+void bcmasp_disable_all_filters(struct bcmasp_intf *intf)
+{
+ struct bcmasp_priv *priv = intf->parent;
+ unsigned int i;
+ int res_count;
+
+ res_count = bcmasp_total_res_mda_cnt(intf->parent);
+
+ /* Disable all filters held by this port */
+ for (i = res_count; i < NUM_MDA_FILTERS; i++) {
+ if (priv->mda_filters[i].en &&
+ priv->mda_filters[i].port == intf->port)
+ bcmasp_en_mda_filter(intf, 0, i);
+ }
+}
+
+static int bcmasp_combine_set_filter(struct bcmasp_intf *intf,
+ unsigned char *addr, unsigned char *mask,
+ int i)
+{
+ struct bcmasp_priv *priv = intf->parent;
+ u64 addr1, addr2, mask1, mask2, mask3;
+
+ /* Switch to u64 to help with the calculations */
+ addr1 = ether_addr_to_u64(priv->mda_filters[i].addr);
+ mask1 = ether_addr_to_u64(priv->mda_filters[i].mask);
+ addr2 = ether_addr_to_u64(addr);
+ mask2 = ether_addr_to_u64(mask);
+
+ /* Check if one filter resides within the other */
+ mask3 = mask1 & mask2;
+ if (mask3 == mask1 && ((addr1 & mask1) == (addr2 & mask1))) {
+ /* Filter 2 resides within filter 1, so everything is good */
+ return 0;
+ } else if (mask3 == mask2 && ((addr1 & mask2) == (addr2 & mask2))) {
+ /* Filter 1 resides within filter 2, so swap filters */
+ bcmasp_set_mda_filter(intf, addr, mask, i);
+ return 0;
+ }
+
+ /* Unable to combine */
+ return -EINVAL;
+}
+
+int bcmasp_set_en_mda_filter(struct bcmasp_intf *intf, unsigned char *addr,
+ unsigned char *mask)
+{
+ struct bcmasp_priv *priv = intf->parent;
+ int ret, res_count;
+ unsigned int i;
+
+ res_count = bcmasp_total_res_mda_cnt(intf->parent);
+
+ for (i = res_count; i < NUM_MDA_FILTERS; i++) {
+ /* If filter not enabled or belongs to another port skip */
+ if (!priv->mda_filters[i].en ||
+ priv->mda_filters[i].port != intf->port)
+ continue;
+
+ /* Attempt to combine filters */
+ ret = bcmasp_combine_set_filter(intf, addr, mask, i);
+ if (!ret) {
+ intf->mib.filters_combine_cnt++;
+ return 0;
+ }
+ }
+
+ /* Create new filter if possible */
+ for (i = res_count; i < NUM_MDA_FILTERS; i++) {
+ if (priv->mda_filters[i].en)
+ continue;
+
+ bcmasp_set_mda_filter(intf, addr, mask, i);
+ bcmasp_en_mda_filter(intf, 1, i);
+ return 0;
+ }
+
+ /* No room for new filter */
+ return -EINVAL;
+}
+
+static void bcmasp_core_init_filters(struct bcmasp_priv *priv)
+{
+ unsigned int i;
+
+ /* Disable all filters and reset software view since the HW
+ * can lose context while in deep sleep suspend states
+ */
+ for (i = 0; i < NUM_MDA_FILTERS; i++) {
+ rx_filter_core_wl(priv, 0x0, ASP_RX_FILTER_MDA_CFG(i));
+ priv->mda_filters[i].en = 0;
+ }
+
+ for (i = 0; i < NUM_NET_FILTERS; i++)
+ rx_filter_core_wl(priv, 0x0, ASP_RX_FILTER_NET_CFG(i));
+
+ /* Top level filter enable bit should be enabled at all times, set
+ * GEN_WAKE_CLEAR to clear the network filter wake-up which would
+ * otherwise be sticky
+ */
+ rx_filter_core_wl(priv, (ASP_RX_FILTER_OPUT_EN |
+ ASP_RX_FILTER_MDA_EN |
+ ASP_RX_FILTER_GEN_WK_CLR |
+ ASP_RX_FILTER_NT_FLT_EN),
+ ASP_RX_FILTER_BLK_CTRL);
+}
+
+/* ASP core initialization */
+static void bcmasp_core_init(struct bcmasp_priv *priv)
+{
+ tx_analytics_core_wl(priv, 0x0, ASP_TX_ANALYTICS_CTRL);
+ rx_analytics_core_wl(priv, 0x4, ASP_RX_ANALYTICS_CTRL);
+
+ rx_edpkt_core_wl(priv, (ASP_EDPKT_HDR_SZ_128 << ASP_EDPKT_HDR_SZ_SHIFT),
+ ASP_EDPKT_HDR_CFG);
+ rx_edpkt_core_wl(priv,
+ (ASP_EDPKT_ENDI_BT_SWP_WD << ASP_EDPKT_ENDI_DESC_SHIFT),
+ ASP_EDPKT_ENDI);
+
+ rx_edpkt_core_wl(priv, 0x1b, ASP_EDPKT_BURST_BUF_PSCAL_TOUT);
+ rx_edpkt_core_wl(priv, 0x3e8, ASP_EDPKT_BURST_BUF_WRITE_TOUT);
+ rx_edpkt_core_wl(priv, 0x3e8, ASP_EDPKT_BURST_BUF_READ_TOUT);
+
+ rx_edpkt_core_wl(priv, ASP_EDPKT_ENABLE_EN, ASP_EDPKT_ENABLE);
+
+ /* Disable and clear both UniMAC's wake-up interrupts to avoid
+ * sticky interrupts.
+ */
+ _intr2_mask_set(priv, ASP_INTR2_UMC0_WAKE | ASP_INTR2_UMC1_WAKE);
+ intr2_core_wl(priv, ASP_INTR2_UMC0_WAKE | ASP_INTR2_UMC1_WAKE,
+ ASP_INTR2_CLEAR);
+}
+
+static void bcmasp_core_clock_select(struct bcmasp_priv *priv, bool slow)
+{
+ u32 reg;
+
+ reg = ctrl_core_rl(priv, ASP_CTRL_CORE_CLOCK_SELECT);
+ if (slow)
+ reg &= ~ASP_CTRL_CORE_CLOCK_SELECT_MAIN;
+ else
+ reg |= ASP_CTRL_CORE_CLOCK_SELECT_MAIN;
+ ctrl_core_wl(priv, reg, ASP_CTRL_CORE_CLOCK_SELECT);
+}
+
+static void bcmasp_core_clock_set_ll(struct bcmasp_priv *priv, u32 clr, u32 set)
+{
+ u32 reg;
+
+ reg = ctrl_core_rl(priv, ASP_CTRL_CLOCK_CTRL);
+ reg &= ~clr;
+ reg |= set;
+ ctrl_core_wl(priv, reg, ASP_CTRL_CLOCK_CTRL);
+
+ reg = ctrl_core_rl(priv, ASP_CTRL_SCRATCH_0);
+ reg &= ~clr;
+ reg |= set;
+ ctrl_core_wl(priv, reg, ASP_CTRL_SCRATCH_0);
+}
+
+static void bcmasp_core_clock_set(struct bcmasp_priv *priv, u32 clr, u32 set)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->clk_lock, flags);
+ bcmasp_core_clock_set_ll(priv, clr, set);
+ spin_unlock_irqrestore(&priv->clk_lock, flags);
+}
+
+void bcmasp_core_clock_set_intf(struct bcmasp_intf *intf, bool en)
+{
+ u32 intf_mask = ASP_CTRL_CLOCK_CTRL_ASP_RGMII_DIS(intf->port);
+ struct bcmasp_priv *priv = intf->parent;
+ unsigned long flags;
+ u32 reg;
+
+ /* When enabling an interface, if the RX or TX clocks were not enabled,
+ * enable them. Conversely, while disabling an interface, if this is
+ * the last one enabled, we can turn off the shared RX and TX clocks as
+ * well. We control enable bits which is why we test for equality on
+ * the RGMII clock bit mask.
+ */
+ spin_lock_irqsave(&priv->clk_lock, flags);
+ if (en) {
+ intf_mask |= ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE |
+ ASP_CTRL_CLOCK_CTRL_ASP_RX_DISABLE;
+ bcmasp_core_clock_set_ll(priv, intf_mask, 0);
+ } else {
+ reg = ctrl_core_rl(priv, ASP_CTRL_SCRATCH_0) | intf_mask;
+ if ((reg & ASP_CTRL_CLOCK_CTRL_ASP_RGMII_MASK) ==
+ ASP_CTRL_CLOCK_CTRL_ASP_RGMII_MASK)
+ intf_mask |= ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE |
+ ASP_CTRL_CLOCK_CTRL_ASP_RX_DISABLE;
+ bcmasp_core_clock_set_ll(priv, 0, intf_mask);
+ }
+ spin_unlock_irqrestore(&priv->clk_lock, flags);
+}
+
+static irqreturn_t bcmasp_isr_wol(int irq, void *data)
+{
+ struct bcmasp_priv *priv = data;
+ u32 status;
+
+ /* No L3 IRQ, so we good */
+ if (priv->wol_irq <= 0)
+ goto irq_handled;
+
+ status = wakeup_intr2_core_rl(priv, ASP_WAKEUP_INTR2_STATUS) &
+ ~wakeup_intr2_core_rl(priv, ASP_WAKEUP_INTR2_MASK_STATUS);
+ wakeup_intr2_core_wl(priv, status, ASP_WAKEUP_INTR2_CLEAR);
+
+irq_handled:
+ pm_wakeup_event(&priv->pdev->dev, 0);
+ return IRQ_HANDLED;
+}
+
+static int bcmasp_get_and_request_irq(struct bcmasp_priv *priv, int i)
+{
+ struct platform_device *pdev = priv->pdev;
+ int irq, ret;
+
+ irq = platform_get_irq_optional(pdev, i);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(&pdev->dev, irq, bcmasp_isr_wol, 0,
+ pdev->name, priv);
+ if (ret)
+ return ret;
+
+ return irq;
+}
+
+static void bcmasp_init_wol_shared(struct bcmasp_priv *priv)
+{
+ struct platform_device *pdev = priv->pdev;
+ struct device *dev = &pdev->dev;
+ int irq;
+
+ irq = bcmasp_get_and_request_irq(priv, 1);
+ if (irq < 0) {
+ dev_warn(dev, "Failed to init WoL irq: %d\n", irq);
+ return;
+ }
+
+ priv->wol_irq = irq;
+ priv->wol_irq_enabled_mask = 0;
+ device_set_wakeup_capable(&pdev->dev, 1);
+}
+
+static void bcmasp_enable_wol_shared(struct bcmasp_intf *intf, bool en)
+{
+ struct bcmasp_priv *priv = intf->parent;
+ struct device *dev = &priv->pdev->dev;
+
+ if (en) {
+ if (priv->wol_irq_enabled_mask) {
+ set_bit(intf->port, &priv->wol_irq_enabled_mask);
+ return;
+ }
+
+ /* First enable */
+ set_bit(intf->port, &priv->wol_irq_enabled_mask);
+ enable_irq_wake(priv->wol_irq);
+ device_set_wakeup_enable(dev, 1);
+ } else {
+ if (!priv->wol_irq_enabled_mask)
+ return;
+
+ clear_bit(intf->port, &priv->wol_irq_enabled_mask);
+ if (priv->wol_irq_enabled_mask)
+ return;
+
+ /* Last disable */
+ disable_irq_wake(priv->wol_irq);
+ device_set_wakeup_enable(dev, 0);
+ }
+}
+
+static void bcmasp_wol_irq_destroy_shared(struct bcmasp_priv *priv)
+{
+ if (priv->wol_irq > 0)
+ free_irq(priv->wol_irq, priv);
+}
+
+static void bcmasp_init_wol_per_intf(struct bcmasp_priv *priv)
+{
+ struct platform_device *pdev = priv->pdev;
+ struct device *dev = &pdev->dev;
+ struct bcmasp_intf *intf;
+ int irq;
+
+ list_for_each_entry(intf, &priv->intfs, list) {
+ irq = bcmasp_get_and_request_irq(priv, intf->port + 1);
+ if (irq < 0) {
+ dev_warn(dev, "Failed to init WoL irq(port %d): %d\n",
+ intf->port, irq);
+ continue;
+ }
+
+ intf->wol_irq = irq;
+ intf->wol_irq_enabled = false;
+ device_set_wakeup_capable(&pdev->dev, 1);
+ }
+}
+
+static void bcmasp_enable_wol_per_intf(struct bcmasp_intf *intf, bool en)
+{
+ struct device *dev = &intf->parent->pdev->dev;
+
+ if (en ^ intf->wol_irq_enabled)
+ irq_set_irq_wake(intf->wol_irq, en);
+
+ intf->wol_irq_enabled = en;
+ device_set_wakeup_enable(dev, en);
+}
+
+static void bcmasp_wol_irq_destroy_per_intf(struct bcmasp_priv *priv)
+{
+ struct bcmasp_intf *intf;
+
+ list_for_each_entry(intf, &priv->intfs, list) {
+ if (intf->wol_irq > 0)
+ free_irq(intf->wol_irq, priv);
+ }
+}
+
+static struct bcmasp_hw_info v20_hw_info = {
+ .rx_ctrl_flush = ASP_RX_CTRL_FLUSH,
+ .umac2fb = UMAC2FB_OFFSET,
+ .rx_ctrl_fb_out_frame_count = ASP_RX_CTRL_FB_OUT_FRAME_COUNT,
+ .rx_ctrl_fb_filt_out_frame_count = ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT,
+ .rx_ctrl_fb_rx_fifo_depth = ASP_RX_CTRL_FB_RX_FIFO_DEPTH,
+};
+
+static const struct bcmasp_plat_data v20_plat_data = {
+ .init_wol = bcmasp_init_wol_per_intf,
+ .enable_wol = bcmasp_enable_wol_per_intf,
+ .destroy_wol = bcmasp_wol_irq_destroy_per_intf,
+ .hw_info = &v20_hw_info,
+};
+
+static struct bcmasp_hw_info v21_hw_info = {
+ .rx_ctrl_flush = ASP_RX_CTRL_FLUSH_2_1,
+ .umac2fb = UMAC2FB_OFFSET_2_1,
+ .rx_ctrl_fb_out_frame_count = ASP_RX_CTRL_FB_OUT_FRAME_COUNT_2_1,
+ .rx_ctrl_fb_filt_out_frame_count =
+ ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT_2_1,
+ .rx_ctrl_fb_rx_fifo_depth = ASP_RX_CTRL_FB_RX_FIFO_DEPTH_2_1,
+};
+
+static const struct bcmasp_plat_data v21_plat_data = {
+ .init_wol = bcmasp_init_wol_shared,
+ .enable_wol = bcmasp_enable_wol_shared,
+ .destroy_wol = bcmasp_wol_irq_destroy_shared,
+ .hw_info = &v21_hw_info,
+};
+
+static const struct of_device_id bcmasp_of_match[] = {
+ { .compatible = "brcm,asp-v2.0", .data = &v20_plat_data },
+ { .compatible = "brcm,asp-v2.1", .data = &v21_plat_data },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, bcmasp_of_match);
+
+static const struct of_device_id bcmasp_mdio_of_match[] = {
+ { .compatible = "brcm,asp-v2.1-mdio", },
+ { .compatible = "brcm,asp-v2.0-mdio", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, bcmasp_mdio_of_match);
+
+static void bcmasp_remove_intfs(struct bcmasp_priv *priv)
+{
+ struct bcmasp_intf *intf, *n;
+
+ list_for_each_entry_safe(intf, n, &priv->intfs, list) {
+ list_del(&intf->list);
+ bcmasp_interface_destroy(intf);
+ }
+}
+
+static int bcmasp_probe(struct platform_device *pdev)
+{
+ struct device_node *ports_node, *intf_node;
+ const struct bcmasp_plat_data *pdata;
+ struct device *dev = &pdev->dev;
+ struct bcmasp_priv *priv;
+ struct bcmasp_intf *intf;
+ int ret = 0, count = 0;
+ unsigned int i;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->irq = platform_get_irq(pdev, 0);
+ if (priv->irq <= 0)
+ return dev_err_probe(dev, -EINVAL, "invalid interrupt\n");
+
+ priv->clk = devm_clk_get_optional_enabled(dev, "sw_asp");
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(dev, PTR_ERR(priv->clk),
+ "failed to request clock\n");
+
+ /* Base from parent node */
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return dev_err_probe(dev, PTR_ERR(priv->base), "failed to iomap\n");
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
+ if (ret)
+ return dev_err_probe(dev, ret, "unable to set DMA mask: %d\n", ret);
+
+ dev_set_drvdata(&pdev->dev, priv);
+ priv->pdev = pdev;
+ spin_lock_init(&priv->mda_lock);
+ spin_lock_init(&priv->clk_lock);
+ mutex_init(&priv->wol_lock);
+ mutex_init(&priv->net_lock);
+ INIT_LIST_HEAD(&priv->intfs);
+
+ pdata = device_get_match_data(&pdev->dev);
+ if (!pdata)
+ return dev_err_probe(dev, -EINVAL, "unable to find platform data\n");
+
+ priv->init_wol = pdata->init_wol;
+ priv->enable_wol = pdata->enable_wol;
+ priv->destroy_wol = pdata->destroy_wol;
+ priv->hw_info = pdata->hw_info;
+
+ /* Enable all clocks to ensure successful probing */
+ bcmasp_core_clock_set(priv, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE, 0);
+
+ /* Switch to the main clock */
+ bcmasp_core_clock_select(priv, false);
+
+ bcmasp_intr2_mask_set_all(priv);
+ bcmasp_intr2_clear_all(priv);
+
+ ret = devm_request_irq(&pdev->dev, priv->irq, bcmasp_isr, 0,
+ pdev->name, priv);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request ASP interrupt: %d", ret);
+
+ /* Register mdio child nodes */
+ of_platform_populate(dev->of_node, bcmasp_mdio_of_match, NULL, dev);
+
+ /* ASP specific initialization, Needs to be done regardless of
+ * how many interfaces come up.
+ */
+ bcmasp_core_init(priv);
+ bcmasp_core_init_filters(priv);
+
+ ports_node = of_find_node_by_name(dev->of_node, "ethernet-ports");
+ if (!ports_node) {
+ dev_warn(dev, "No ports found\n");
+ return -EINVAL;
+ }
+
+ i = 0;
+ for_each_available_child_of_node(ports_node, intf_node) {
+ intf = bcmasp_interface_create(priv, intf_node, i);
+ if (!intf) {
+ dev_err(dev, "Cannot create eth interface %d\n", i);
+ bcmasp_remove_intfs(priv);
+ goto of_put_exit;
+ }
+ list_add_tail(&intf->list, &priv->intfs);
+ i++;
+ }
+
+ /* Check and enable WoL */
+ priv->init_wol(priv);
+
+ /* Drop the clock reference count now and let ndo_open()/ndo_close()
+ * manage it for us from now on.
+ */
+ bcmasp_core_clock_set(priv, 0, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE);
+
+ clk_disable_unprepare(priv->clk);
+
+ /* Now do the registration of the network ports which will take care
+ * of managing the clock properly.
+ */
+ list_for_each_entry(intf, &priv->intfs, list) {
+ ret = register_netdev(intf->ndev);
+ if (ret) {
+ netdev_err(intf->ndev,
+ "failed to register net_device: %d\n", ret);
+ priv->destroy_wol(priv);
+ bcmasp_remove_intfs(priv);
+ goto of_put_exit;
+ }
+ count++;
+ }
+
+ dev_info(dev, "Initialized %d port(s)\n", count);
+
+of_put_exit:
+ of_node_put(ports_node);
+ return ret;
+}
+
+static int bcmasp_remove(struct platform_device *pdev)
+{
+ struct bcmasp_priv *priv = dev_get_drvdata(&pdev->dev);
+
+ if (!priv)
+ return 0;
+
+ priv->destroy_wol(priv);
+ bcmasp_remove_intfs(priv);
+
+ return 0;
+}
+
+static void bcmasp_shutdown(struct platform_device *pdev)
+{
+ bcmasp_remove(pdev);
+}
+
+static int __maybe_unused bcmasp_suspend(struct device *d)
+{
+ struct bcmasp_priv *priv = dev_get_drvdata(d);
+ struct bcmasp_intf *intf;
+ int ret;
+
+ list_for_each_entry(intf, &priv->intfs, list) {
+ ret = bcmasp_interface_suspend(intf);
+ if (ret)
+ break;
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ /* Whether Wake-on-LAN is enabled or not, we can always disable
+ * the shared TX clock
+ */
+ bcmasp_core_clock_set(priv, 0, ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE);
+
+ bcmasp_core_clock_select(priv, true);
+
+ clk_disable_unprepare(priv->clk);
+
+ return ret;
+}
+
+static int __maybe_unused bcmasp_resume(struct device *d)
+{
+ struct bcmasp_priv *priv = dev_get_drvdata(d);
+ struct bcmasp_intf *intf;
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ /* Switch to the main clock domain */
+ bcmasp_core_clock_select(priv, false);
+
+ /* Re-enable all clocks for re-initialization */
+ bcmasp_core_clock_set(priv, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE, 0);
+
+ bcmasp_core_init(priv);
+ bcmasp_core_init_filters(priv);
+
+ /* And disable them to let the network devices take care of them */
+ bcmasp_core_clock_set(priv, 0, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE);
+
+ clk_disable_unprepare(priv->clk);
+
+ list_for_each_entry(intf, &priv->intfs, list) {
+ ret = bcmasp_interface_resume(intf);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(bcmasp_pm_ops,
+ bcmasp_suspend, bcmasp_resume);
+
+static struct platform_driver bcmasp_driver = {
+ .probe = bcmasp_probe,
+ .remove = bcmasp_remove,
+ .shutdown = bcmasp_shutdown,
+ .driver = {
+ .name = "brcm,asp-v2",
+ .of_match_table = bcmasp_of_match,
+ .pm = &bcmasp_pm_ops,
+ },
+};
+module_platform_driver(bcmasp_driver);
+
+MODULE_DESCRIPTION("Broadcom ASP 2.0 Ethernet controller driver");
+MODULE_ALIAS("platform:brcm,asp-v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.h b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
new file mode 100644
index 000000000000..6bfcaa7f95a8
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
@@ -0,0 +1,586 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __BCMASP_H
+#define __BCMASP_H
+
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
+#include <uapi/linux/ethtool.h>
+
+#define ASP_INTR2_OFFSET 0x1000
+#define ASP_INTR2_STATUS 0x0
+#define ASP_INTR2_SET 0x4
+#define ASP_INTR2_CLEAR 0x8
+#define ASP_INTR2_MASK_STATUS 0xc
+#define ASP_INTR2_MASK_SET 0x10
+#define ASP_INTR2_MASK_CLEAR 0x14
+
+#define ASP_INTR2_RX_ECH(intr) BIT(intr)
+#define ASP_INTR2_TX_DESC(intr) BIT((intr) + 14)
+#define ASP_INTR2_UMC0_WAKE BIT(22)
+#define ASP_INTR2_UMC1_WAKE BIT(28)
+
+#define ASP_WAKEUP_INTR2_OFFSET 0x1200
+#define ASP_WAKEUP_INTR2_STATUS 0x0
+#define ASP_WAKEUP_INTR2_SET 0x4
+#define ASP_WAKEUP_INTR2_CLEAR 0x8
+#define ASP_WAKEUP_INTR2_MASK_STATUS 0xc
+#define ASP_WAKEUP_INTR2_MASK_SET 0x10
+#define ASP_WAKEUP_INTR2_MASK_CLEAR 0x14
+#define ASP_WAKEUP_INTR2_MPD_0 BIT(0)
+#define ASP_WAKEUP_INTR2_MPD_1 BIT(1)
+#define ASP_WAKEUP_INTR2_FILT_0 BIT(2)
+#define ASP_WAKEUP_INTR2_FILT_1 BIT(3)
+#define ASP_WAKEUP_INTR2_FW BIT(4)
+
+#define ASP_TX_ANALYTICS_OFFSET 0x4c000
+#define ASP_TX_ANALYTICS_CTRL 0x0
+
+#define ASP_RX_ANALYTICS_OFFSET 0x98000
+#define ASP_RX_ANALYTICS_CTRL 0x0
+
+#define ASP_RX_CTRL_OFFSET 0x9f000
+#define ASP_RX_CTRL_UMAC_0_FRAME_COUNT 0x8
+#define ASP_RX_CTRL_UMAC_1_FRAME_COUNT 0xc
+#define ASP_RX_CTRL_FB_0_FRAME_COUNT 0x14
+#define ASP_RX_CTRL_FB_1_FRAME_COUNT 0x18
+#define ASP_RX_CTRL_FB_8_FRAME_COUNT 0x1c
+/* asp2.1 diverges offsets here */
+/* ASP2.0 */
+#define ASP_RX_CTRL_FB_OUT_FRAME_COUNT 0x20
+#define ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT 0x24
+#define ASP_RX_CTRL_FLUSH 0x28
+#define ASP_CTRL_UMAC0_FLUSH_MASK (BIT(0) | BIT(12))
+#define ASP_CTRL_UMAC1_FLUSH_MASK (BIT(1) | BIT(13))
+#define ASP_CTRL_SPB_FLUSH_MASK (BIT(8) | BIT(20))
+#define ASP_RX_CTRL_FB_RX_FIFO_DEPTH 0x30
+/* ASP2.1 */
+#define ASP_RX_CTRL_FB_9_FRAME_COUNT_2_1 0x20
+#define ASP_RX_CTRL_FB_10_FRAME_COUNT_2_1 0x24
+#define ASP_RX_CTRL_FB_OUT_FRAME_COUNT_2_1 0x28
+#define ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT_2_1 0x2c
+#define ASP_RX_CTRL_FLUSH_2_1 0x30
+#define ASP_RX_CTRL_FB_RX_FIFO_DEPTH_2_1 0x38
+
+#define ASP_RX_FILTER_OFFSET 0x80000
+#define ASP_RX_FILTER_BLK_CTRL 0x0
+#define ASP_RX_FILTER_OPUT_EN BIT(0)
+#define ASP_RX_FILTER_MDA_EN BIT(1)
+#define ASP_RX_FILTER_LNR_MD BIT(2)
+#define ASP_RX_FILTER_GEN_WK_EN BIT(3)
+#define ASP_RX_FILTER_GEN_WK_CLR BIT(4)
+#define ASP_RX_FILTER_NT_FLT_EN BIT(5)
+#define ASP_RX_FILTER_MDA_CFG(sel) (((sel) * 0x14) + 0x100)
+#define ASP_RX_FILTER_MDA_CFG_EN_SHIFT 8
+#define ASP_RX_FILTER_MDA_CFG_UMC_SEL(sel) ((sel) > 1 ? BIT(17) : \
+ BIT((sel) + 9))
+#define ASP_RX_FILTER_MDA_PAT_H(sel) (((sel) * 0x14) + 0x104)
+#define ASP_RX_FILTER_MDA_PAT_L(sel) (((sel) * 0x14) + 0x108)
+#define ASP_RX_FILTER_MDA_MSK_H(sel) (((sel) * 0x14) + 0x10c)
+#define ASP_RX_FILTER_MDA_MSK_L(sel) (((sel) * 0x14) + 0x110)
+#define ASP_RX_FILTER_MDA_CFG(sel) (((sel) * 0x14) + 0x100)
+#define ASP_RX_FILTER_MDA_PAT_H(sel) (((sel) * 0x14) + 0x104)
+#define ASP_RX_FILTER_MDA_PAT_L(sel) (((sel) * 0x14) + 0x108)
+#define ASP_RX_FILTER_MDA_MSK_H(sel) (((sel) * 0x14) + 0x10c)
+#define ASP_RX_FILTER_MDA_MSK_L(sel) (((sel) * 0x14) + 0x110)
+#define ASP_RX_FILTER_NET_CFG(sel) (((sel) * 0xa04) + 0x400)
+#define ASP_RX_FILTER_NET_CFG_CH(sel) ((sel) << 0)
+#define ASP_RX_FILTER_NET_CFG_EN BIT(9)
+#define ASP_RX_FILTER_NET_CFG_L2_EN BIT(10)
+#define ASP_RX_FILTER_NET_CFG_L3_EN BIT(11)
+#define ASP_RX_FILTER_NET_CFG_L4_EN BIT(12)
+#define ASP_RX_FILTER_NET_CFG_L3_FRM(sel) ((sel) << 13)
+#define ASP_RX_FILTER_NET_CFG_L4_FRM(sel) ((sel) << 15)
+#define ASP_RX_FILTER_NET_CFG_UMC(sel) BIT((sel) + 19)
+#define ASP_RX_FILTER_NET_CFG_DMA_EN BIT(27)
+
+#define ASP_RX_FILTER_NET_OFFSET_MAX 32
+#define ASP_RX_FILTER_NET_PAT(sel, block, off) \
+ (((sel) * 0xa04) + ((block) * 0x200) + (off) + 0x600)
+#define ASP_RX_FILTER_NET_MASK(sel, block, off) \
+ (((sel) * 0xa04) + ((block) * 0x200) + (off) + 0x700)
+
+#define ASP_RX_FILTER_NET_OFFSET(sel) (((sel) * 0xa04) + 0xe00)
+#define ASP_RX_FILTER_NET_OFFSET_L2(val) ((val) << 0)
+#define ASP_RX_FILTER_NET_OFFSET_L3_0(val) ((val) << 8)
+#define ASP_RX_FILTER_NET_OFFSET_L3_1(val) ((val) << 16)
+#define ASP_RX_FILTER_NET_OFFSET_L4(val) ((val) << 24)
+
+enum asp_rx_net_filter_block {
+ ASP_RX_FILTER_NET_L2 = 0,
+ ASP_RX_FILTER_NET_L3_0,
+ ASP_RX_FILTER_NET_L3_1,
+ ASP_RX_FILTER_NET_L4,
+ ASP_RX_FILTER_NET_BLOCK_MAX
+};
+
+#define ASP_EDPKT_OFFSET 0x9c000
+#define ASP_EDPKT_ENABLE 0x4
+#define ASP_EDPKT_ENABLE_EN BIT(0)
+#define ASP_EDPKT_HDR_CFG 0xc
+#define ASP_EDPKT_HDR_SZ_SHIFT 2
+#define ASP_EDPKT_HDR_SZ_32 0
+#define ASP_EDPKT_HDR_SZ_64 1
+#define ASP_EDPKT_HDR_SZ_96 2
+#define ASP_EDPKT_HDR_SZ_128 3
+#define ASP_EDPKT_BURST_BUF_PSCAL_TOUT 0x10
+#define ASP_EDPKT_BURST_BUF_WRITE_TOUT 0x14
+#define ASP_EDPKT_BURST_BUF_READ_TOUT 0x18
+#define ASP_EDPKT_RX_TS_COUNTER 0x38
+#define ASP_EDPKT_ENDI 0x48
+#define ASP_EDPKT_ENDI_DESC_SHIFT 8
+#define ASP_EDPKT_ENDI_NO_BT_SWP 0
+#define ASP_EDPKT_ENDI_BT_SWP_WD 1
+#define ASP_EDPKT_RX_PKT_CNT 0x138
+#define ASP_EDPKT_HDR_EXTR_CNT 0x13c
+#define ASP_EDPKT_HDR_OUT_CNT 0x140
+
+#define ASP_CTRL 0x101000
+#define ASP_CTRL_ASP_SW_INIT 0x04
+#define ASP_CTRL_ASP_SW_INIT_ACPUSS_CORE BIT(0)
+#define ASP_CTRL_ASP_SW_INIT_ASP_TX BIT(1)
+#define ASP_CTRL_ASP_SW_INIT_AS_RX BIT(2)
+#define ASP_CTRL_ASP_SW_INIT_ASP_RGMII_UMAC0 BIT(3)
+#define ASP_CTRL_ASP_SW_INIT_ASP_RGMII_UMAC1 BIT(4)
+#define ASP_CTRL_ASP_SW_INIT_ASP_XMEMIF BIT(5)
+#define ASP_CTRL_CLOCK_CTRL 0x04
+#define ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE BIT(0)
+#define ASP_CTRL_CLOCK_CTRL_ASP_RX_DISABLE BIT(1)
+#define ASP_CTRL_CLOCK_CTRL_ASP_RGMII_SHIFT 2
+#define ASP_CTRL_CLOCK_CTRL_ASP_RGMII_MASK (0x7 << ASP_CTRL_CLOCK_CTRL_ASP_RGMII_SHIFT)
+#define ASP_CTRL_CLOCK_CTRL_ASP_RGMII_DIS(x) BIT(ASP_CTRL_CLOCK_CTRL_ASP_RGMII_SHIFT + (x))
+#define ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE GENMASK(4, 0)
+#define ASP_CTRL_CORE_CLOCK_SELECT 0x08
+#define ASP_CTRL_CORE_CLOCK_SELECT_MAIN BIT(0)
+#define ASP_CTRL_SCRATCH_0 0x0c
+
+struct bcmasp_tx_cb {
+ struct sk_buff *skb;
+ unsigned int bytes_sent;
+ bool last;
+
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
+ DEFINE_DMA_UNMAP_LEN(dma_len);
+};
+
+struct bcmasp_res {
+ /* Per interface resources */
+ /* Port */
+ void __iomem *umac;
+ void __iomem *umac2fb;
+ void __iomem *rgmii;
+
+ /* TX slowpath/configuration */
+ void __iomem *tx_spb_ctrl;
+ void __iomem *tx_spb_top;
+ void __iomem *tx_epkt_core;
+ void __iomem *tx_pause_ctrl;
+};
+
+#define DESC_ADDR(x) ((x) & GENMASK_ULL(39, 0))
+#define DESC_FLAGS(x) ((x) & GENMASK_ULL(63, 40))
+
+struct bcmasp_desc {
+ u64 buf;
+ #define DESC_CHKSUM BIT_ULL(40)
+ #define DESC_CRC_ERR BIT_ULL(41)
+ #define DESC_RX_SYM_ERR BIT_ULL(42)
+ #define DESC_NO_OCT_ALN BIT_ULL(43)
+ #define DESC_PKT_TRUC BIT_ULL(44)
+ /* 39:0 (TX/RX) bits 0-39 of buf addr
+ * 40 (RX) checksum
+ * 41 (RX) crc_error
+ * 42 (RX) rx_symbol_error
+ * 43 (RX) non_octet_aligned
+ * 44 (RX) pkt_truncated
+ * 45 Reserved
+ * 56:46 (RX) mac_filter_id
+ * 60:57 (RX) rx_port_num (0-unicmac0, 1-unimac1)
+ * 61 Reserved
+ * 63:62 (TX) forward CRC, overwrite CRC
+ */
+ u32 size;
+ u32 flags;
+ #define DESC_INT_EN BIT(0)
+ #define DESC_SOF BIT(1)
+ #define DESC_EOF BIT(2)
+ #define DESC_EPKT_CMD BIT(3)
+ #define DESC_SCRAM_ST BIT(8)
+ #define DESC_SCRAM_END BIT(9)
+ #define DESC_PCPP BIT(10)
+ #define DESC_PPPP BIT(11)
+ /* 0 (TX) tx_int_en
+ * 1 (TX/RX) SOF
+ * 2 (TX/RX) EOF
+ * 3 (TX) epkt_command
+ * 6:4 (TX) PA
+ * 7 (TX) pause at desc end
+ * 8 (TX) scram_start
+ * 9 (TX) scram_end
+ * 10 (TX) PCPP
+ * 11 (TX) PPPP
+ * 14:12 Reserved
+ * 15 (TX) pid ch Valid
+ * 19:16 (TX) data_pkt_type
+ * 32:20 (TX) pid_channel (RX) nw_filter_id
+ */
+};
+
+struct bcmasp_intf;
+
+struct bcmasp_intf_stats64 {
+ /* Rx Stats */
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_errors;
+ u64_stats_t rx_dropped;
+ u64_stats_t rx_crc_errs;
+ u64_stats_t rx_sym_errs;
+
+ /* Tx Stats*/
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
+
+ struct u64_stats_sync syncp;
+};
+
+struct bcmasp_mib_counters {
+ u32 edpkt_ts;
+ u32 edpkt_rx_pkt_cnt;
+ u32 edpkt_hdr_ext_cnt;
+ u32 edpkt_hdr_out_cnt;
+ u32 umac_frm_cnt;
+ u32 fb_frm_cnt;
+ u32 fb_rx_fifo_depth;
+ u32 fb_out_frm_cnt;
+ u32 fb_filt_out_frm_cnt;
+ u32 alloc_rx_skb_failed;
+ u32 tx_dma_failed;
+ u32 mc_filters_full_cnt;
+ u32 uc_filters_full_cnt;
+ u32 filters_combine_cnt;
+ u32 promisc_filters_cnt;
+ u32 tx_realloc_offload_failed;
+ u32 tx_timeout_cnt;
+};
+
+struct bcmasp_intf_ops {
+ unsigned long (*rx_desc_read)(struct bcmasp_intf *intf);
+ void (*rx_buffer_write)(struct bcmasp_intf *intf, dma_addr_t addr);
+ void (*rx_desc_write)(struct bcmasp_intf *intf, dma_addr_t addr);
+ unsigned long (*tx_read)(struct bcmasp_intf *intf);
+ void (*tx_write)(struct bcmasp_intf *intf, dma_addr_t addr);
+};
+
+struct bcmasp_priv;
+
+struct bcmasp_intf {
+ struct list_head list;
+ struct net_device *ndev;
+ struct bcmasp_priv *parent;
+
+ /* ASP Ch */
+ int channel;
+ int port;
+ const struct bcmasp_intf_ops *ops;
+
+ /* Used for splitting shared resources */
+ int index;
+
+ struct napi_struct tx_napi;
+ /* TX ring, starts on a new cacheline boundary */
+ void __iomem *tx_spb_dma;
+ int tx_spb_index;
+ int tx_spb_clean_index;
+ struct bcmasp_desc *tx_spb_cpu;
+ dma_addr_t tx_spb_dma_addr;
+ dma_addr_t tx_spb_dma_valid;
+ dma_addr_t tx_spb_dma_read;
+ struct bcmasp_tx_cb *tx_cbs;
+
+ /* RX ring, starts on a new cacheline boundary */
+ void __iomem *rx_edpkt_cfg;
+ void __iomem *rx_edpkt_dma;
+ int rx_edpkt_index;
+ int rx_buf_order;
+ struct bcmasp_desc *rx_edpkt_cpu;
+ dma_addr_t rx_edpkt_dma_addr;
+ dma_addr_t rx_edpkt_dma_read;
+
+ /* RX buffer prefetcher ring*/
+ void *rx_ring_cpu;
+ dma_addr_t rx_ring_dma;
+ dma_addr_t rx_ring_dma_valid;
+ struct napi_struct rx_napi;
+
+ struct bcmasp_res res;
+ unsigned int crc_fwd;
+
+ /* PHY device */
+ struct device_node *phy_dn;
+ struct device_node *ndev_dn;
+ phy_interface_t phy_interface;
+ bool internal_phy;
+ int old_pause;
+ int old_link;
+ int old_duplex;
+
+ u32 msg_enable;
+
+ /* Statistics */
+ struct bcmasp_intf_stats64 stats64;
+ struct bcmasp_mib_counters mib;
+
+ u32 wolopts;
+ u8 sopass[SOPASS_MAX];
+ /* Used if per intf wol irq */
+ int wol_irq;
+ unsigned int wol_irq_enabled:1;
+
+ struct ethtool_eee eee;
+};
+
+#define NUM_NET_FILTERS 32
+struct bcmasp_net_filter {
+ struct ethtool_rx_flow_spec fs;
+
+ bool claimed;
+ bool wake_filter;
+
+ int port;
+ unsigned int hw_index;
+};
+
+#define NUM_MDA_FILTERS 32
+struct bcmasp_mda_filter {
+ /* Current owner of this filter */
+ int port;
+ bool en;
+ u8 addr[ETH_ALEN];
+ u8 mask[ETH_ALEN];
+};
+
+struct bcmasp_hw_info {
+ u32 rx_ctrl_flush;
+ u32 umac2fb;
+ u32 rx_ctrl_fb_out_frame_count;
+ u32 rx_ctrl_fb_filt_out_frame_count;
+ u32 rx_ctrl_fb_rx_fifo_depth;
+};
+
+struct bcmasp_plat_data {
+ void (*init_wol)(struct bcmasp_priv *priv);
+ void (*enable_wol)(struct bcmasp_intf *intf, bool en);
+ void (*destroy_wol)(struct bcmasp_priv *priv);
+ struct bcmasp_hw_info *hw_info;
+};
+
+struct bcmasp_priv {
+ struct platform_device *pdev;
+ struct clk *clk;
+
+ int irq;
+ u32 irq_mask;
+
+ /* Used if shared wol irq */
+ struct mutex wol_lock;
+ int wol_irq;
+ unsigned long wol_irq_enabled_mask;
+
+ void (*init_wol)(struct bcmasp_priv *priv);
+ void (*enable_wol)(struct bcmasp_intf *intf, bool en);
+ void (*destroy_wol)(struct bcmasp_priv *priv);
+
+ void __iomem *base;
+ struct bcmasp_hw_info *hw_info;
+
+ struct list_head intfs;
+
+ struct bcmasp_mda_filter mda_filters[NUM_MDA_FILTERS];
+
+ /* MAC destination address filters lock */
+ spinlock_t mda_lock;
+
+ /* Protects accesses to ASP_CTRL_CLOCK_CTRL */
+ spinlock_t clk_lock;
+
+ struct bcmasp_net_filter net_filters[NUM_NET_FILTERS];
+
+ /* Network filter lock */
+ struct mutex net_lock;
+};
+
+static inline unsigned long bcmasp_intf_rx_desc_read(struct bcmasp_intf *intf)
+{
+ return intf->ops->rx_desc_read(intf);
+}
+
+static inline void bcmasp_intf_rx_buffer_write(struct bcmasp_intf *intf,
+ dma_addr_t addr)
+{
+ intf->ops->rx_buffer_write(intf, addr);
+}
+
+static inline void bcmasp_intf_rx_desc_write(struct bcmasp_intf *intf,
+ dma_addr_t addr)
+{
+ intf->ops->rx_desc_write(intf, addr);
+}
+
+static inline unsigned long bcmasp_intf_tx_read(struct bcmasp_intf *intf)
+{
+ return intf->ops->tx_read(intf);
+}
+
+static inline void bcmasp_intf_tx_write(struct bcmasp_intf *intf,
+ dma_addr_t addr)
+{
+ intf->ops->tx_write(intf, addr);
+}
+
+#define __BCMASP_IO_MACRO(name, m) \
+static inline u32 name##_rl(struct bcmasp_intf *intf, u32 off) \
+{ \
+ u32 reg = readl_relaxed(intf->m + off); \
+ return reg; \
+} \
+static inline void name##_wl(struct bcmasp_intf *intf, u32 val, u32 off)\
+{ \
+ writel_relaxed(val, intf->m + off); \
+}
+
+#define BCMASP_IO_MACRO(name) __BCMASP_IO_MACRO(name, res.name)
+#define BCMASP_FP_IO_MACRO(name) __BCMASP_IO_MACRO(name, name)
+
+BCMASP_IO_MACRO(umac);
+BCMASP_IO_MACRO(umac2fb);
+BCMASP_IO_MACRO(rgmii);
+BCMASP_FP_IO_MACRO(tx_spb_dma);
+BCMASP_IO_MACRO(tx_spb_ctrl);
+BCMASP_IO_MACRO(tx_spb_top);
+BCMASP_IO_MACRO(tx_epkt_core);
+BCMASP_IO_MACRO(tx_pause_ctrl);
+BCMASP_FP_IO_MACRO(rx_edpkt_dma);
+BCMASP_FP_IO_MACRO(rx_edpkt_cfg);
+
+#define __BCMASP_FP_IO_MACRO_Q(name, m) \
+static inline u64 name##_rq(struct bcmasp_intf *intf, u32 off) \
+{ \
+ u64 reg = readq_relaxed(intf->m + off); \
+ return reg; \
+} \
+static inline void name##_wq(struct bcmasp_intf *intf, u64 val, u32 off)\
+{ \
+ writeq_relaxed(val, intf->m + off); \
+}
+
+#define BCMASP_FP_IO_MACRO_Q(name) __BCMASP_FP_IO_MACRO_Q(name, name)
+
+BCMASP_FP_IO_MACRO_Q(tx_spb_dma);
+BCMASP_FP_IO_MACRO_Q(rx_edpkt_dma);
+BCMASP_FP_IO_MACRO_Q(rx_edpkt_cfg);
+
+#define PKT_OFFLOAD_NOP (0 << 28)
+#define PKT_OFFLOAD_HDR_OP (1 << 28)
+#define PKT_OFFLOAD_HDR_WRBACK BIT(19)
+#define PKT_OFFLOAD_HDR_COUNT(x) ((x) << 16)
+#define PKT_OFFLOAD_HDR_SIZE_1(x) ((x) << 4)
+#define PKT_OFFLOAD_HDR_SIZE_2(x) (x)
+#define PKT_OFFLOAD_HDR2_SIZE_2(x) ((x) << 24)
+#define PKT_OFFLOAD_HDR2_SIZE_3(x) ((x) << 12)
+#define PKT_OFFLOAD_HDR2_SIZE_4(x) (x)
+#define PKT_OFFLOAD_EPKT_OP (2 << 28)
+#define PKT_OFFLOAD_EPKT_WRBACK BIT(23)
+#define PKT_OFFLOAD_EPKT_IP(x) ((x) << 21)
+#define PKT_OFFLOAD_EPKT_TP(x) ((x) << 19)
+#define PKT_OFFLOAD_EPKT_LEN(x) ((x) << 16)
+#define PKT_OFFLOAD_EPKT_CSUM_L3 BIT(15)
+#define PKT_OFFLOAD_EPKT_CSUM_L2 BIT(14)
+#define PKT_OFFLOAD_EPKT_ID(x) ((x) << 12)
+#define PKT_OFFLOAD_EPKT_SEQ(x) ((x) << 10)
+#define PKT_OFFLOAD_EPKT_TS(x) ((x) << 8)
+#define PKT_OFFLOAD_EPKT_BLOC(x) (x)
+#define PKT_OFFLOAD_END_OP (7 << 28)
+
+struct bcmasp_pkt_offload {
+ __be32 nop;
+ __be32 header;
+ __be32 header2;
+ __be32 epkt;
+ __be32 end;
+};
+
+#define BCMASP_CORE_IO_MACRO(name, offset) \
+static inline u32 name##_core_rl(struct bcmasp_priv *priv, \
+ u32 off) \
+{ \
+ u32 reg = readl_relaxed(priv->base + (offset) + off); \
+ return reg; \
+} \
+static inline void name##_core_wl(struct bcmasp_priv *priv, \
+ u32 val, u32 off) \
+{ \
+ writel_relaxed(val, priv->base + (offset) + off); \
+}
+
+BCMASP_CORE_IO_MACRO(intr2, ASP_INTR2_OFFSET);
+BCMASP_CORE_IO_MACRO(wakeup_intr2, ASP_WAKEUP_INTR2_OFFSET);
+BCMASP_CORE_IO_MACRO(tx_analytics, ASP_TX_ANALYTICS_OFFSET);
+BCMASP_CORE_IO_MACRO(rx_analytics, ASP_RX_ANALYTICS_OFFSET);
+BCMASP_CORE_IO_MACRO(rx_ctrl, ASP_RX_CTRL_OFFSET);
+BCMASP_CORE_IO_MACRO(rx_filter, ASP_RX_FILTER_OFFSET);
+BCMASP_CORE_IO_MACRO(rx_edpkt, ASP_EDPKT_OFFSET);
+BCMASP_CORE_IO_MACRO(ctrl, ASP_CTRL);
+
+struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
+ struct device_node *ndev_dn, int i);
+
+void bcmasp_interface_destroy(struct bcmasp_intf *intf);
+
+void bcmasp_enable_tx_irq(struct bcmasp_intf *intf, int en);
+
+void bcmasp_enable_rx_irq(struct bcmasp_intf *intf, int en);
+
+void bcmasp_flush_rx_port(struct bcmasp_intf *intf);
+
+extern const struct ethtool_ops bcmasp_ethtool_ops;
+
+int bcmasp_interface_suspend(struct bcmasp_intf *intf);
+
+int bcmasp_interface_resume(struct bcmasp_intf *intf);
+
+void bcmasp_set_promisc(struct bcmasp_intf *intf, bool en);
+
+void bcmasp_set_allmulti(struct bcmasp_intf *intf, bool en);
+
+void bcmasp_set_broad(struct bcmasp_intf *intf, bool en);
+
+void bcmasp_set_oaddr(struct bcmasp_intf *intf, const unsigned char *addr,
+ bool en);
+
+int bcmasp_set_en_mda_filter(struct bcmasp_intf *intf, unsigned char *addr,
+ unsigned char *mask);
+
+void bcmasp_disable_all_filters(struct bcmasp_intf *intf);
+
+void bcmasp_core_clock_set_intf(struct bcmasp_intf *intf, bool en);
+
+struct bcmasp_net_filter *bcmasp_netfilt_get_init(struct bcmasp_intf *intf,
+ int loc, bool wake_filter,
+ bool init);
+
+bool bcmasp_netfilt_check_dup(struct bcmasp_intf *intf,
+ struct ethtool_rx_flow_spec *fs);
+
+void bcmasp_netfilt_release(struct bcmasp_intf *intf,
+ struct bcmasp_net_filter *nfilt);
+
+int bcmasp_netfilt_get_active(struct bcmasp_intf *intf);
+
+void bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
+ u32 *rule_cnt);
+
+void bcmasp_netfilt_suspend(struct bcmasp_intf *intf);
+
+void bcmasp_eee_enable_set(struct bcmasp_intf *intf, bool enable);
+#endif
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
new file mode 100644
index 000000000000..c4f1604d5ab3
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
@@ -0,0 +1,503 @@
+// SPDX-License-Identifier: GPL-2.0
+#define pr_fmt(fmt) "bcmasp_ethtool: " fmt
+
+#include <asm-generic/unaligned.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+
+#include "bcmasp.h"
+#include "bcmasp_intf_defs.h"
+
+enum bcmasp_stat_type {
+ BCMASP_STAT_RX_EDPKT,
+ BCMASP_STAT_RX_CTRL,
+ BCMASP_STAT_RX_CTRL_PER_INTF,
+ BCMASP_STAT_SOFT,
+};
+
+struct bcmasp_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ enum bcmasp_stat_type type;
+ u32 reg_offset;
+};
+
+#define STAT_BCMASP_SOFT_MIB(str) { \
+ .stat_string = str, \
+ .type = BCMASP_STAT_SOFT, \
+}
+
+#define STAT_BCMASP_OFFSET(str, _type, offset) { \
+ .stat_string = str, \
+ .type = _type, \
+ .reg_offset = offset, \
+}
+
+#define STAT_BCMASP_RX_EDPKT(str, offset) \
+ STAT_BCMASP_OFFSET(str, BCMASP_STAT_RX_EDPKT, offset)
+#define STAT_BCMASP_RX_CTRL(str, offset) \
+ STAT_BCMASP_OFFSET(str, BCMASP_STAT_RX_CTRL, offset)
+#define STAT_BCMASP_RX_CTRL_PER_INTF(str, offset) \
+ STAT_BCMASP_OFFSET(str, BCMASP_STAT_RX_CTRL_PER_INTF, offset)
+
+/* Must match the order of struct bcmasp_mib_counters */
+static const struct bcmasp_stats bcmasp_gstrings_stats[] = {
+ /* EDPKT counters */
+ STAT_BCMASP_RX_EDPKT("RX Time Stamp", ASP_EDPKT_RX_TS_COUNTER),
+ STAT_BCMASP_RX_EDPKT("RX PKT Count", ASP_EDPKT_RX_PKT_CNT),
+ STAT_BCMASP_RX_EDPKT("RX PKT Buffered", ASP_EDPKT_HDR_EXTR_CNT),
+ STAT_BCMASP_RX_EDPKT("RX PKT Pushed to DRAM", ASP_EDPKT_HDR_OUT_CNT),
+ /* ASP RX control */
+ STAT_BCMASP_RX_CTRL_PER_INTF("Frames From Unimac",
+ ASP_RX_CTRL_UMAC_0_FRAME_COUNT),
+ STAT_BCMASP_RX_CTRL_PER_INTF("Frames From Port",
+ ASP_RX_CTRL_FB_0_FRAME_COUNT),
+ STAT_BCMASP_RX_CTRL_PER_INTF("RX Buffer FIFO Depth",
+ ASP_RX_CTRL_FB_RX_FIFO_DEPTH),
+ STAT_BCMASP_RX_CTRL("Frames Out(Buffer)",
+ ASP_RX_CTRL_FB_OUT_FRAME_COUNT),
+ STAT_BCMASP_RX_CTRL("Frames Out(Filters)",
+ ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT),
+ /* Software maintained statistics */
+ STAT_BCMASP_SOFT_MIB("RX SKB Alloc Failed"),
+ STAT_BCMASP_SOFT_MIB("TX DMA Failed"),
+ STAT_BCMASP_SOFT_MIB("Multicast Filters Full"),
+ STAT_BCMASP_SOFT_MIB("Unicast Filters Full"),
+ STAT_BCMASP_SOFT_MIB("MDA Filters Combined"),
+ STAT_BCMASP_SOFT_MIB("Promisc Filter Set"),
+ STAT_BCMASP_SOFT_MIB("TX Realloc For Offload Failed"),
+ STAT_BCMASP_SOFT_MIB("Tx Timeout Count"),
+};
+
+#define BCMASP_STATS_LEN ARRAY_SIZE(bcmasp_gstrings_stats)
+
+static u16 bcmasp_stat_fixup_offset(struct bcmasp_intf *intf,
+ const struct bcmasp_stats *s)
+{
+ struct bcmasp_priv *priv = intf->parent;
+
+ if (!strcmp("Frames Out(Buffer)", s->stat_string))
+ return priv->hw_info->rx_ctrl_fb_out_frame_count;
+
+ if (!strcmp("Frames Out(Filters)", s->stat_string))
+ return priv->hw_info->rx_ctrl_fb_filt_out_frame_count;
+
+ if (!strcmp("RX Buffer FIFO Depth", s->stat_string))
+ return priv->hw_info->rx_ctrl_fb_rx_fifo_depth;
+
+ return s->reg_offset;
+}
+
+static int bcmasp_get_sset_count(struct net_device *dev, int string_set)
+{
+ switch (string_set) {
+ case ETH_SS_STATS:
+ return BCMASP_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void bcmasp_get_strings(struct net_device *dev, u32 stringset,
+ u8 *data)
+{
+ unsigned int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < BCMASP_STATS_LEN; i++) {
+ memcpy(data + i * ETH_GSTRING_LEN,
+ bcmasp_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ }
+ break;
+ default:
+ return;
+ }
+}
+
+static void bcmasp_update_mib_counters(struct bcmasp_intf *intf)
+{
+ unsigned int i;
+
+ for (i = 0; i < BCMASP_STATS_LEN; i++) {
+ const struct bcmasp_stats *s;
+ u32 offset, val;
+ char *p;
+
+ s = &bcmasp_gstrings_stats[i];
+ offset = bcmasp_stat_fixup_offset(intf, s);
+ switch (s->type) {
+ case BCMASP_STAT_SOFT:
+ continue;
+ case BCMASP_STAT_RX_EDPKT:
+ val = rx_edpkt_core_rl(intf->parent, offset);
+ break;
+ case BCMASP_STAT_RX_CTRL:
+ val = rx_ctrl_core_rl(intf->parent, offset);
+ break;
+ case BCMASP_STAT_RX_CTRL_PER_INTF:
+ offset += sizeof(u32) * intf->port;
+ val = rx_ctrl_core_rl(intf->parent, offset);
+ break;
+ default:
+ continue;
+ }
+ p = (char *)(&intf->mib) + (i * sizeof(u32));
+ put_unaligned(val, (u32 *)p);
+ }
+}
+
+static void bcmasp_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ unsigned int i;
+ char *p;
+
+ if (netif_running(dev))
+ bcmasp_update_mib_counters(intf);
+
+ for (i = 0; i < BCMASP_STATS_LEN; i++) {
+ p = (char *)(&intf->mib) + (i * sizeof(u32));
+ data[i] = *(u32 *)p;
+ }
+}
+
+static void bcmasp_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strscpy(info->driver, "bcmasp", sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(dev->dev.parent),
+ sizeof(info->bus_info));
+}
+
+static u32 bcmasp_get_msglevel(struct net_device *dev)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+
+ return intf->msg_enable;
+}
+
+static void bcmasp_set_msglevel(struct net_device *dev, u32 level)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+
+ intf->msg_enable = level;
+}
+
+#define BCMASP_SUPPORTED_WAKE (WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER)
+static void bcmasp_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+
+ wol->supported = BCMASP_SUPPORTED_WAKE;
+ wol->wolopts = intf->wolopts;
+ memset(wol->sopass, 0, sizeof(wol->sopass));
+
+ if (wol->wolopts & WAKE_MAGICSECURE)
+ memcpy(wol->sopass, intf->sopass, sizeof(intf->sopass));
+}
+
+static int bcmasp_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ struct bcmasp_priv *priv = intf->parent;
+ struct device *kdev = &priv->pdev->dev;
+
+ if (!device_can_wakeup(kdev))
+ return -EOPNOTSUPP;
+
+ /* Interface Specific */
+ intf->wolopts = wol->wolopts;
+ if (intf->wolopts & WAKE_MAGICSECURE)
+ memcpy(intf->sopass, wol->sopass, sizeof(wol->sopass));
+
+ mutex_lock(&priv->wol_lock);
+ priv->enable_wol(intf, !!intf->wolopts);
+ mutex_unlock(&priv->wol_lock);
+
+ return 0;
+}
+
+static int bcmasp_flow_insert(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ struct bcmasp_net_filter *nfilter;
+ u32 loc = cmd->fs.location;
+ bool wake = false;
+
+ if (cmd->fs.ring_cookie == RX_CLS_FLOW_WAKE)
+ wake = true;
+
+ /* Currently only supports WAKE filters */
+ if (!wake)
+ return -EOPNOTSUPP;
+
+ switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+ case ETHER_FLOW:
+ case IP_USER_FLOW:
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /* Check if filter already exists */
+ if (bcmasp_netfilt_check_dup(intf, &cmd->fs))
+ return -EINVAL;
+
+ nfilter = bcmasp_netfilt_get_init(intf, loc, wake, true);
+ if (IS_ERR(nfilter))
+ return PTR_ERR(nfilter);
+
+ /* Return the location where we did insert the filter */
+ cmd->fs.location = nfilter->hw_index;
+ memcpy(&nfilter->fs, &cmd->fs, sizeof(struct ethtool_rx_flow_spec));
+
+ /* Since we only support wake filters, defer register programming till
+ * suspend time.
+ */
+ return 0;
+}
+
+static int bcmasp_flow_delete(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ struct bcmasp_net_filter *nfilter;
+
+ nfilter = bcmasp_netfilt_get_init(intf, cmd->fs.location, false, false);
+ if (IS_ERR(nfilter))
+ return PTR_ERR(nfilter);
+
+ bcmasp_netfilt_release(intf, nfilter);
+
+ return 0;
+}
+
+static int bcmasp_flow_get(struct bcmasp_intf *intf, struct ethtool_rxnfc *cmd)
+{
+ struct bcmasp_net_filter *nfilter;
+
+ nfilter = bcmasp_netfilt_get_init(intf, cmd->fs.location, false, false);
+ if (IS_ERR(nfilter))
+ return PTR_ERR(nfilter);
+
+ memcpy(&cmd->fs, &nfilter->fs, sizeof(nfilter->fs));
+
+ cmd->data = NUM_NET_FILTERS;
+
+ return 0;
+}
+
+static int bcmasp_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ mutex_lock(&intf->parent->net_lock);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ ret = bcmasp_flow_insert(dev, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = bcmasp_flow_delete(dev, cmd);
+ break;
+ default:
+ break;
+ }
+
+ mutex_unlock(&intf->parent->net_lock);
+
+ return ret;
+}
+
+static int bcmasp_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ int err = 0;
+
+ mutex_lock(&intf->parent->net_lock);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = bcmasp_netfilt_get_active(intf);
+ /* We support specifying rule locations */
+ cmd->data |= RX_CLS_LOC_SPECIAL;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ err = bcmasp_flow_get(intf, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ bcmasp_netfilt_get_all_active(intf, rule_locs, &cmd->rule_cnt);
+ cmd->data = NUM_NET_FILTERS;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ mutex_unlock(&intf->parent->net_lock);
+
+ return err;
+}
+
+void bcmasp_eee_enable_set(struct bcmasp_intf *intf, bool enable)
+{
+ u32 reg;
+
+ reg = umac_rl(intf, UMC_EEE_CTRL);
+ if (enable)
+ reg |= EEE_EN;
+ else
+ reg &= ~EEE_EN;
+ umac_wl(intf, reg, UMC_EEE_CTRL);
+
+ intf->eee.eee_enabled = enable;
+ intf->eee.eee_active = enable;
+}
+
+static int bcmasp_get_eee(struct net_device *dev, struct ethtool_eee *e)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ struct ethtool_eee *p = &intf->eee;
+
+ if (!dev->phydev)
+ return -ENODEV;
+
+ e->eee_enabled = p->eee_enabled;
+ e->eee_active = p->eee_active;
+ e->tx_lpi_enabled = p->tx_lpi_enabled;
+ e->tx_lpi_timer = umac_rl(intf, UMC_EEE_LPI_TIMER);
+
+ return phy_ethtool_get_eee(dev->phydev, e);
+}
+
+static int bcmasp_set_eee(struct net_device *dev, struct ethtool_eee *e)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ struct ethtool_eee *p = &intf->eee;
+ int ret;
+
+ if (!dev->phydev)
+ return -ENODEV;
+
+ if (!p->eee_enabled) {
+ bcmasp_eee_enable_set(intf, false);
+ } else {
+ ret = phy_init_eee(dev->phydev, 0);
+ if (ret) {
+ netif_err(intf, hw, dev,
+ "EEE initialization failed: %d\n", ret);
+ return ret;
+ }
+
+ umac_wl(intf, e->tx_lpi_timer, UMC_EEE_LPI_TIMER);
+ intf->eee.eee_active = ret >= 0;
+ intf->eee.tx_lpi_enabled = e->tx_lpi_enabled;
+ bcmasp_eee_enable_set(intf, true);
+ }
+
+ return phy_ethtool_set_eee(dev->phydev, e);
+}
+
+static void bcmasp_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+
+ mac_stats->FramesTransmittedOK = umac_rl(intf, UMC_GTPOK);
+ mac_stats->SingleCollisionFrames = umac_rl(intf, UMC_GTSCL);
+ mac_stats->MultipleCollisionFrames = umac_rl(intf, UMC_GTMCL);
+ mac_stats->FramesReceivedOK = umac_rl(intf, UMC_GRPOK);
+ mac_stats->FrameCheckSequenceErrors = umac_rl(intf, UMC_GRFCS);
+ mac_stats->AlignmentErrors = umac_rl(intf, UMC_GRALN);
+ mac_stats->OctetsTransmittedOK = umac_rl(intf, UMC_GTBYT);
+ mac_stats->FramesWithDeferredXmissions = umac_rl(intf, UMC_GTDRF);
+ mac_stats->LateCollisions = umac_rl(intf, UMC_GTLCL);
+ mac_stats->FramesAbortedDueToXSColls = umac_rl(intf, UMC_GTXCL);
+ mac_stats->OctetsReceivedOK = umac_rl(intf, UMC_GRBYT);
+ mac_stats->MulticastFramesXmittedOK = umac_rl(intf, UMC_GTMCA);
+ mac_stats->BroadcastFramesXmittedOK = umac_rl(intf, UMC_GTBCA);
+ mac_stats->FramesWithExcessiveDeferral = umac_rl(intf, UMC_GTEDF);
+ mac_stats->MulticastFramesReceivedOK = umac_rl(intf, UMC_GRMCA);
+ mac_stats->BroadcastFramesReceivedOK = umac_rl(intf, UMC_GRBCA);
+}
+
+static const struct ethtool_rmon_hist_range bcmasp_rmon_ranges[] = {
+ { 0, 64},
+ { 65, 127},
+ { 128, 255},
+ { 256, 511},
+ { 512, 1023},
+ { 1024, 1518},
+ { 1519, 1522},
+ {}
+};
+
+static void bcmasp_get_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+
+ *ranges = bcmasp_rmon_ranges;
+
+ rmon_stats->undersize_pkts = umac_rl(intf, UMC_RRUND);
+ rmon_stats->oversize_pkts = umac_rl(intf, UMC_GROVR);
+ rmon_stats->fragments = umac_rl(intf, UMC_RRFRG);
+ rmon_stats->jabbers = umac_rl(intf, UMC_GRJBR);
+
+ rmon_stats->hist[0] = umac_rl(intf, UMC_GR64);
+ rmon_stats->hist[1] = umac_rl(intf, UMC_GR127);
+ rmon_stats->hist[2] = umac_rl(intf, UMC_GR255);
+ rmon_stats->hist[3] = umac_rl(intf, UMC_GR511);
+ rmon_stats->hist[4] = umac_rl(intf, UMC_GR1023);
+ rmon_stats->hist[5] = umac_rl(intf, UMC_GR1518);
+ rmon_stats->hist[6] = umac_rl(intf, UMC_GRMGV);
+
+ rmon_stats->hist_tx[0] = umac_rl(intf, UMC_TR64);
+ rmon_stats->hist_tx[1] = umac_rl(intf, UMC_TR127);
+ rmon_stats->hist_tx[2] = umac_rl(intf, UMC_TR255);
+ rmon_stats->hist_tx[3] = umac_rl(intf, UMC_TR511);
+ rmon_stats->hist_tx[4] = umac_rl(intf, UMC_TR1023);
+ rmon_stats->hist_tx[5] = umac_rl(intf, UMC_TR1518);
+ rmon_stats->hist_tx[6] = umac_rl(intf, UMC_TRMGV);
+}
+
+static void bcmasp_get_eth_ctrl_stats(struct net_device *dev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+
+ ctrl_stats->MACControlFramesTransmitted = umac_rl(intf, UMC_GTXCF);
+ ctrl_stats->MACControlFramesReceived = umac_rl(intf, UMC_GRXCF);
+ ctrl_stats->UnsupportedOpcodesReceived = umac_rl(intf, UMC_GRXUO);
+}
+
+const struct ethtool_ops bcmasp_ethtool_ops = {
+ .get_drvinfo = bcmasp_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_msglevel = bcmasp_get_msglevel,
+ .set_msglevel = bcmasp_set_msglevel,
+ .get_wol = bcmasp_get_wol,
+ .set_wol = bcmasp_set_wol,
+ .get_rxnfc = bcmasp_get_rxnfc,
+ .set_rxnfc = bcmasp_set_rxnfc,
+ .set_eee = bcmasp_set_eee,
+ .get_eee = bcmasp_get_eee,
+ .get_eth_mac_stats = bcmasp_get_eth_mac_stats,
+ .get_rmon_stats = bcmasp_get_rmon_stats,
+ .get_eth_ctrl_stats = bcmasp_get_eth_ctrl_stats,
+ .get_strings = bcmasp_get_strings,
+ .get_ethtool_stats = bcmasp_get_ethtool_stats,
+ .get_sset_count = bcmasp_get_sset_count,
+};
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
new file mode 100644
index 000000000000..53e542881255
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
@@ -0,0 +1,1415 @@
+// SPDX-License-Identifier: GPL-2.0
+#define pr_fmt(fmt) "bcmasp_intf: " fmt
+
+#include <asm/byteorder.h>
+#include <linux/brcmphy.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <linux/ptp_classify.h>
+#include <linux/platform_device.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+#include "bcmasp.h"
+#include "bcmasp_intf_defs.h"
+
+static int incr_ring(int index, int ring_count)
+{
+ index++;
+ if (index == ring_count)
+ return 0;
+
+ return index;
+}
+
+/* Points to last byte of descriptor */
+static dma_addr_t incr_last_byte(dma_addr_t addr, dma_addr_t beg,
+ int ring_count)
+{
+ dma_addr_t end = beg + (ring_count * DESC_SIZE);
+
+ addr += DESC_SIZE;
+ if (addr > end)
+ return beg + DESC_SIZE - 1;
+
+ return addr;
+}
+
+/* Points to first byte of descriptor */
+static dma_addr_t incr_first_byte(dma_addr_t addr, dma_addr_t beg,
+ int ring_count)
+{
+ dma_addr_t end = beg + (ring_count * DESC_SIZE);
+
+ addr += DESC_SIZE;
+ if (addr >= end)
+ return beg;
+
+ return addr;
+}
+
+static void bcmasp_enable_tx(struct bcmasp_intf *intf, int en)
+{
+ if (en) {
+ tx_spb_ctrl_wl(intf, TX_SPB_CTRL_ENABLE_EN, TX_SPB_CTRL_ENABLE);
+ tx_epkt_core_wl(intf, (TX_EPKT_C_CFG_MISC_EN |
+ TX_EPKT_C_CFG_MISC_PT |
+ (intf->port << TX_EPKT_C_CFG_MISC_PS_SHIFT)),
+ TX_EPKT_C_CFG_MISC);
+ } else {
+ tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE);
+ tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC);
+ }
+}
+
+static void bcmasp_enable_rx(struct bcmasp_intf *intf, int en)
+{
+ if (en)
+ rx_edpkt_cfg_wl(intf, RX_EDPKT_CFG_ENABLE_EN,
+ RX_EDPKT_CFG_ENABLE);
+ else
+ rx_edpkt_cfg_wl(intf, 0x0, RX_EDPKT_CFG_ENABLE);
+}
+
+static void bcmasp_set_rx_mode(struct net_device *dev)
+{
+ unsigned char mask[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ struct netdev_hw_addr *ha;
+ int ret;
+
+ spin_lock_bh(&intf->parent->mda_lock);
+
+ bcmasp_disable_all_filters(intf);
+
+ if (dev->flags & IFF_PROMISC)
+ goto set_promisc;
+
+ bcmasp_set_promisc(intf, 0);
+
+ bcmasp_set_broad(intf, 1);
+
+ bcmasp_set_oaddr(intf, dev->dev_addr, 1);
+
+ if (dev->flags & IFF_ALLMULTI) {
+ bcmasp_set_allmulti(intf, 1);
+ } else {
+ bcmasp_set_allmulti(intf, 0);
+
+ netdev_for_each_mc_addr(ha, dev) {
+ ret = bcmasp_set_en_mda_filter(intf, ha->addr, mask);
+ if (ret) {
+ intf->mib.mc_filters_full_cnt++;
+ goto set_promisc;
+ }
+ }
+ }
+
+ netdev_for_each_uc_addr(ha, dev) {
+ ret = bcmasp_set_en_mda_filter(intf, ha->addr, mask);
+ if (ret) {
+ intf->mib.uc_filters_full_cnt++;
+ goto set_promisc;
+ }
+ }
+
+ spin_unlock_bh(&intf->parent->mda_lock);
+ return;
+
+set_promisc:
+ bcmasp_set_promisc(intf, 1);
+ intf->mib.promisc_filters_cnt++;
+
+ /* disable all filters used by this port */
+ bcmasp_disable_all_filters(intf);
+
+ spin_unlock_bh(&intf->parent->mda_lock);
+}
+
+static void bcmasp_clean_txcb(struct bcmasp_intf *intf, int index)
+{
+ struct bcmasp_tx_cb *txcb = &intf->tx_cbs[index];
+
+ txcb->skb = NULL;
+ dma_unmap_addr_set(txcb, dma_addr, 0);
+ dma_unmap_len_set(txcb, dma_len, 0);
+ txcb->last = false;
+}
+
+static int tx_spb_ring_full(struct bcmasp_intf *intf, int cnt)
+{
+ int next_index, i;
+
+ /* Check if we have enough room for cnt descriptors */
+ for (i = 0; i < cnt; i++) {
+ next_index = incr_ring(intf->tx_spb_index, DESC_RING_COUNT);
+ if (next_index == intf->tx_spb_clean_index)
+ return 1;
+ }
+
+ return 0;
+}
+
+static struct sk_buff *bcmasp_csum_offload(struct net_device *dev,
+ struct sk_buff *skb,
+ bool *csum_hw)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ u32 header = 0, header2 = 0, epkt = 0;
+ struct bcmasp_pkt_offload *offload;
+ unsigned int header_cnt = 0;
+ u8 ip_proto;
+ int ret;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return skb;
+
+ ret = skb_cow_head(skb, sizeof(*offload));
+ if (ret < 0) {
+ intf->mib.tx_realloc_offload_failed++;
+ goto help;
+ }
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ header |= PKT_OFFLOAD_HDR_SIZE_2((ip_hdrlen(skb) >> 8) & 0xf);
+ header2 |= PKT_OFFLOAD_HDR2_SIZE_2(ip_hdrlen(skb) & 0xff);
+ epkt |= PKT_OFFLOAD_EPKT_IP(0) | PKT_OFFLOAD_EPKT_CSUM_L2;
+ ip_proto = ip_hdr(skb)->protocol;
+ header_cnt += 2;
+ break;
+ case htons(ETH_P_IPV6):
+ header |= PKT_OFFLOAD_HDR_SIZE_2((IP6_HLEN >> 8) & 0xf);
+ header2 |= PKT_OFFLOAD_HDR2_SIZE_2(IP6_HLEN & 0xff);
+ epkt |= PKT_OFFLOAD_EPKT_IP(1) | PKT_OFFLOAD_EPKT_CSUM_L2;
+ ip_proto = ipv6_hdr(skb)->nexthdr;
+ header_cnt += 2;
+ break;
+ default:
+ goto help;
+ }
+
+ switch (ip_proto) {
+ case IPPROTO_TCP:
+ header2 |= PKT_OFFLOAD_HDR2_SIZE_3(tcp_hdrlen(skb));
+ epkt |= PKT_OFFLOAD_EPKT_TP(0) | PKT_OFFLOAD_EPKT_CSUM_L3;
+ header_cnt++;
+ break;
+ case IPPROTO_UDP:
+ header2 |= PKT_OFFLOAD_HDR2_SIZE_3(UDP_HLEN);
+ epkt |= PKT_OFFLOAD_EPKT_TP(1) | PKT_OFFLOAD_EPKT_CSUM_L3;
+ header_cnt++;
+ break;
+ default:
+ goto help;
+ }
+
+ offload = (struct bcmasp_pkt_offload *)skb_push(skb, sizeof(*offload));
+
+ header |= PKT_OFFLOAD_HDR_OP | PKT_OFFLOAD_HDR_COUNT(header_cnt) |
+ PKT_OFFLOAD_HDR_SIZE_1(ETH_HLEN);
+ epkt |= PKT_OFFLOAD_EPKT_OP;
+
+ offload->nop = htonl(PKT_OFFLOAD_NOP);
+ offload->header = htonl(header);
+ offload->header2 = htonl(header2);
+ offload->epkt = htonl(epkt);
+ offload->end = htonl(PKT_OFFLOAD_END_OP);
+ *csum_hw = true;
+
+ return skb;
+
+help:
+ skb_checksum_help(skb);
+
+ return skb;
+}
+
+static unsigned long bcmasp_rx_edpkt_dma_rq(struct bcmasp_intf *intf)
+{
+ return rx_edpkt_dma_rq(intf, RX_EDPKT_DMA_VALID);
+}
+
+static void bcmasp_rx_edpkt_cfg_wq(struct bcmasp_intf *intf, dma_addr_t addr)
+{
+ rx_edpkt_cfg_wq(intf, addr, RX_EDPKT_RING_BUFFER_READ);
+}
+
+static void bcmasp_rx_edpkt_dma_wq(struct bcmasp_intf *intf, dma_addr_t addr)
+{
+ rx_edpkt_dma_wq(intf, addr, RX_EDPKT_DMA_READ);
+}
+
+static unsigned long bcmasp_tx_spb_dma_rq(struct bcmasp_intf *intf)
+{
+ return tx_spb_dma_rq(intf, TX_SPB_DMA_READ);
+}
+
+static void bcmasp_tx_spb_dma_wq(struct bcmasp_intf *intf, dma_addr_t addr)
+{
+ tx_spb_dma_wq(intf, addr, TX_SPB_DMA_VALID);
+}
+
+static const struct bcmasp_intf_ops bcmasp_intf_ops = {
+ .rx_desc_read = bcmasp_rx_edpkt_dma_rq,
+ .rx_buffer_write = bcmasp_rx_edpkt_cfg_wq,
+ .rx_desc_write = bcmasp_rx_edpkt_dma_wq,
+ .tx_read = bcmasp_tx_spb_dma_rq,
+ .tx_write = bcmasp_tx_spb_dma_wq,
+};
+
+static netdev_tx_t bcmasp_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ unsigned int total_bytes, size;
+ int spb_index, nr_frags, i, j;
+ struct bcmasp_tx_cb *txcb;
+ dma_addr_t mapping, valid;
+ struct bcmasp_desc *desc;
+ bool csum_hw = false;
+ struct device *kdev;
+ skb_frag_t *frag;
+
+ kdev = &intf->parent->pdev->dev;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ if (tx_spb_ring_full(intf, nr_frags + 1)) {
+ netif_stop_queue(dev);
+ if (net_ratelimit())
+ netdev_err(dev, "Tx Ring Full!\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ /* Save skb len before adding csum offload header */
+ total_bytes = skb->len;
+ skb = bcmasp_csum_offload(dev, skb, &csum_hw);
+ if (!skb)
+ return NETDEV_TX_OK;
+
+ spb_index = intf->tx_spb_index;
+ valid = intf->tx_spb_dma_valid;
+ for (i = 0; i <= nr_frags; i++) {
+ if (!i) {
+ size = skb_headlen(skb);
+ if (!nr_frags && size < (ETH_ZLEN + ETH_FCS_LEN)) {
+ if (skb_put_padto(skb, ETH_ZLEN + ETH_FCS_LEN))
+ return NETDEV_TX_OK;
+ size = skb->len;
+ }
+ mapping = dma_map_single(kdev, skb->data, size,
+ DMA_TO_DEVICE);
+ } else {
+ frag = &skb_shinfo(skb)->frags[i - 1];
+ size = skb_frag_size(frag);
+ mapping = skb_frag_dma_map(kdev, frag, 0, size,
+ DMA_TO_DEVICE);
+ }
+
+ if (dma_mapping_error(kdev, mapping)) {
+ intf->mib.tx_dma_failed++;
+ spb_index = intf->tx_spb_index;
+ for (j = 0; j < i; j++) {
+ bcmasp_clean_txcb(intf, spb_index);
+ spb_index = incr_ring(spb_index,
+ DESC_RING_COUNT);
+ }
+ /* Rewind so we do not have a hole */
+ spb_index = intf->tx_spb_index;
+ return NETDEV_TX_OK;
+ }
+
+ txcb = &intf->tx_cbs[spb_index];
+ desc = &intf->tx_spb_cpu[spb_index];
+ memset(desc, 0, sizeof(*desc));
+ txcb->skb = skb;
+ txcb->bytes_sent = total_bytes;
+ dma_unmap_addr_set(txcb, dma_addr, mapping);
+ dma_unmap_len_set(txcb, dma_len, size);
+ if (!i) {
+ desc->flags |= DESC_SOF;
+ if (csum_hw)
+ desc->flags |= DESC_EPKT_CMD;
+ }
+
+ if (i == nr_frags) {
+ desc->flags |= DESC_EOF;
+ txcb->last = true;
+ }
+
+ desc->buf = mapping;
+ desc->size = size;
+ desc->flags |= DESC_INT_EN;
+
+ netif_dbg(intf, tx_queued, dev,
+ "%s dma_buf=%pad dma_len=0x%x flags=0x%x index=0x%x\n",
+ __func__, &mapping, desc->size, desc->flags,
+ spb_index);
+
+ spb_index = incr_ring(spb_index, DESC_RING_COUNT);
+ valid = incr_last_byte(valid, intf->tx_spb_dma_addr,
+ DESC_RING_COUNT);
+ }
+
+ /* Ensure all descriptors have been written to DRAM for the
+ * hardware to see up-to-date contents.
+ */
+ wmb();
+
+ intf->tx_spb_index = spb_index;
+ intf->tx_spb_dma_valid = valid;
+ bcmasp_intf_tx_write(intf, intf->tx_spb_dma_valid);
+
+ if (tx_spb_ring_full(intf, MAX_SKB_FRAGS + 1))
+ netif_stop_queue(dev);
+
+ return NETDEV_TX_OK;
+}
+
+static void bcmasp_netif_start(struct net_device *dev)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+
+ bcmasp_set_rx_mode(dev);
+ napi_enable(&intf->tx_napi);
+ napi_enable(&intf->rx_napi);
+
+ bcmasp_enable_rx_irq(intf, 1);
+ bcmasp_enable_tx_irq(intf, 1);
+
+ phy_start(dev->phydev);
+}
+
+static void umac_reset(struct bcmasp_intf *intf)
+{
+ umac_wl(intf, 0x0, UMC_CMD);
+ umac_wl(intf, UMC_CMD_SW_RESET, UMC_CMD);
+ usleep_range(10, 100);
+ umac_wl(intf, 0x0, UMC_CMD);
+}
+
+static void umac_set_hw_addr(struct bcmasp_intf *intf,
+ const unsigned char *addr)
+{
+ u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
+ addr[3];
+ u32 mac1 = (addr[4] << 8) | addr[5];
+
+ umac_wl(intf, mac0, UMC_MAC0);
+ umac_wl(intf, mac1, UMC_MAC1);
+}
+
+static void umac_enable_set(struct bcmasp_intf *intf, u32 mask,
+ unsigned int enable)
+{
+ u32 reg;
+
+ reg = umac_rl(intf, UMC_CMD);
+ if (enable)
+ reg |= mask;
+ else
+ reg &= ~mask;
+ umac_wl(intf, reg, UMC_CMD);
+
+ /* UniMAC stops on a packet boundary, wait for a full-sized packet
+ * to be processed (1 msec).
+ */
+ if (enable == 0)
+ usleep_range(1000, 2000);
+}
+
+static void umac_init(struct bcmasp_intf *intf)
+{
+ umac_wl(intf, 0x800, UMC_FRM_LEN);
+ umac_wl(intf, 0xffff, UMC_PAUSE_CNTRL);
+ umac_wl(intf, 0x800, UMC_RX_MAX_PKT_SZ);
+ umac_enable_set(intf, UMC_CMD_PROMISC, 1);
+}
+
+static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct bcmasp_intf *intf =
+ container_of(napi, struct bcmasp_intf, tx_napi);
+ struct bcmasp_intf_stats64 *stats = &intf->stats64;
+ struct device *kdev = &intf->parent->pdev->dev;
+ unsigned long read, released = 0;
+ struct bcmasp_tx_cb *txcb;
+ struct bcmasp_desc *desc;
+ dma_addr_t mapping;
+
+ read = bcmasp_intf_tx_read(intf);
+ while (intf->tx_spb_dma_read != read) {
+ txcb = &intf->tx_cbs[intf->tx_spb_clean_index];
+ mapping = dma_unmap_addr(txcb, dma_addr);
+
+ dma_unmap_single(kdev, mapping,
+ dma_unmap_len(txcb, dma_len),
+ DMA_TO_DEVICE);
+
+ if (txcb->last) {
+ dev_consume_skb_any(txcb->skb);
+
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->tx_packets);
+ u64_stats_add(&stats->tx_bytes, txcb->bytes_sent);
+ u64_stats_update_end(&stats->syncp);
+ }
+
+ desc = &intf->tx_spb_cpu[intf->tx_spb_clean_index];
+
+ netif_dbg(intf, tx_done, intf->ndev,
+ "%s dma_buf=%pad dma_len=0x%x flags=0x%x c_index=0x%x\n",
+ __func__, &mapping, desc->size, desc->flags,
+ intf->tx_spb_clean_index);
+
+ bcmasp_clean_txcb(intf, intf->tx_spb_clean_index);
+ released++;
+
+ intf->tx_spb_clean_index = incr_ring(intf->tx_spb_clean_index,
+ DESC_RING_COUNT);
+ intf->tx_spb_dma_read = incr_first_byte(intf->tx_spb_dma_read,
+ intf->tx_spb_dma_addr,
+ DESC_RING_COUNT);
+ }
+
+ /* Ensure all descriptors have been written to DRAM for the hardware
+ * to see updated contents.
+ */
+ wmb();
+
+ napi_complete(&intf->tx_napi);
+
+ bcmasp_enable_tx_irq(intf, 1);
+
+ if (released)
+ netif_wake_queue(intf->ndev);
+
+ return 0;
+}
+
+static int bcmasp_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct bcmasp_intf *intf =
+ container_of(napi, struct bcmasp_intf, rx_napi);
+ struct bcmasp_intf_stats64 *stats = &intf->stats64;
+ struct device *kdev = &intf->parent->pdev->dev;
+ unsigned long processed = 0;
+ struct bcmasp_desc *desc;
+ struct sk_buff *skb;
+ dma_addr_t valid;
+ void *data;
+ u64 flags;
+ u32 len;
+
+ valid = bcmasp_intf_rx_desc_read(intf) + 1;
+ if (valid == intf->rx_edpkt_dma_addr + DESC_RING_SIZE)
+ valid = intf->rx_edpkt_dma_addr;
+
+ while ((processed < budget) && (valid != intf->rx_edpkt_dma_read)) {
+ desc = &intf->rx_edpkt_cpu[intf->rx_edpkt_index];
+
+ /* Ensure that descriptor has been fully written to DRAM by
+ * hardware before reading by the CPU
+ */
+ rmb();
+
+ /* Calculate virt addr by offsetting from physical addr */
+ data = intf->rx_ring_cpu +
+ (DESC_ADDR(desc->buf) - intf->rx_ring_dma);
+
+ flags = DESC_FLAGS(desc->buf);
+ if (unlikely(flags & (DESC_CRC_ERR | DESC_RX_SYM_ERR))) {
+ if (net_ratelimit()) {
+ netif_err(intf, rx_status, intf->ndev,
+ "flags=0x%llx\n", flags);
+ }
+
+ u64_stats_update_begin(&stats->syncp);
+ if (flags & DESC_CRC_ERR)
+ u64_stats_inc(&stats->rx_crc_errs);
+ if (flags & DESC_RX_SYM_ERR)
+ u64_stats_inc(&stats->rx_sym_errs);
+ u64_stats_update_end(&stats->syncp);
+
+ goto next;
+ }
+
+ dma_sync_single_for_cpu(kdev, DESC_ADDR(desc->buf), desc->size,
+ DMA_FROM_DEVICE);
+
+ len = desc->size;
+
+ skb = napi_alloc_skb(napi, len);
+ if (!skb) {
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->rx_dropped);
+ u64_stats_update_end(&stats->syncp);
+ intf->mib.alloc_rx_skb_failed++;
+
+ goto next;
+ }
+
+ skb_put(skb, len);
+ memcpy(skb->data, data, len);
+
+ skb_pull(skb, 2);
+ len -= 2;
+ if (likely(intf->crc_fwd)) {
+ skb_trim(skb, len - ETH_FCS_LEN);
+ len -= ETH_FCS_LEN;
+ }
+
+ if ((intf->ndev->features & NETIF_F_RXCSUM) &&
+ (desc->buf & DESC_CHKSUM))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ skb->protocol = eth_type_trans(skb, intf->ndev);
+
+ napi_gro_receive(napi, skb);
+
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->rx_packets);
+ u64_stats_add(&stats->rx_bytes, len);
+ u64_stats_update_end(&stats->syncp);
+
+next:
+ bcmasp_intf_rx_buffer_write(intf, (DESC_ADDR(desc->buf) +
+ desc->size));
+
+ processed++;
+ intf->rx_edpkt_dma_read =
+ incr_first_byte(intf->rx_edpkt_dma_read,
+ intf->rx_edpkt_dma_addr,
+ DESC_RING_COUNT);
+ intf->rx_edpkt_index = incr_ring(intf->rx_edpkt_index,
+ DESC_RING_COUNT);
+ }
+
+ bcmasp_intf_rx_desc_write(intf, intf->rx_edpkt_dma_read);
+
+ if (processed < budget) {
+ napi_complete_done(&intf->rx_napi, processed);
+ bcmasp_enable_rx_irq(intf, 1);
+ }
+
+ return processed;
+}
+
+static void bcmasp_adj_link(struct net_device *dev)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
+ u32 cmd_bits = 0, reg;
+ int changed = 0;
+
+ if (intf->old_link != phydev->link) {
+ changed = 1;
+ intf->old_link = phydev->link;
+ }
+
+ if (intf->old_duplex != phydev->duplex) {
+ changed = 1;
+ intf->old_duplex = phydev->duplex;
+ }
+
+ switch (phydev->speed) {
+ case SPEED_2500:
+ cmd_bits = UMC_CMD_SPEED_2500;
+ break;
+ case SPEED_1000:
+ cmd_bits = UMC_CMD_SPEED_1000;
+ break;
+ case SPEED_100:
+ cmd_bits = UMC_CMD_SPEED_100;
+ break;
+ case SPEED_10:
+ cmd_bits = UMC_CMD_SPEED_10;
+ break;
+ default:
+ break;
+ }
+ cmd_bits <<= UMC_CMD_SPEED_SHIFT;
+
+ if (phydev->duplex == DUPLEX_HALF)
+ cmd_bits |= UMC_CMD_HD_EN;
+
+ if (intf->old_pause != phydev->pause) {
+ changed = 1;
+ intf->old_pause = phydev->pause;
+ }
+
+ if (!phydev->pause)
+ cmd_bits |= UMC_CMD_RX_PAUSE_IGNORE | UMC_CMD_TX_PAUSE_IGNORE;
+
+ if (!changed)
+ return;
+
+ if (phydev->link) {
+ reg = umac_rl(intf, UMC_CMD);
+ reg &= ~((UMC_CMD_SPEED_MASK << UMC_CMD_SPEED_SHIFT) |
+ UMC_CMD_HD_EN | UMC_CMD_RX_PAUSE_IGNORE |
+ UMC_CMD_TX_PAUSE_IGNORE);
+ reg |= cmd_bits;
+ umac_wl(intf, reg, UMC_CMD);
+
+ intf->eee.eee_active = phy_init_eee(phydev, 0) >= 0;
+ bcmasp_eee_enable_set(intf, intf->eee.eee_active);
+ }
+
+ reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
+ if (phydev->link)
+ reg |= RGMII_LINK;
+ else
+ reg &= ~RGMII_LINK;
+ rgmii_wl(intf, reg, RGMII_OOB_CNTRL);
+
+ if (changed)
+ phy_print_status(phydev);
+}
+
+static int bcmasp_init_rx(struct bcmasp_intf *intf)
+{
+ struct device *kdev = &intf->parent->pdev->dev;
+ struct page *buffer_pg;
+ dma_addr_t dma;
+ void *p;
+ u32 reg;
+ int ret;
+
+ intf->rx_buf_order = get_order(RING_BUFFER_SIZE);
+ buffer_pg = alloc_pages(GFP_KERNEL, intf->rx_buf_order);
+
+ dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(kdev, dma)) {
+ __free_pages(buffer_pg, intf->rx_buf_order);
+ return -ENOMEM;
+ }
+ intf->rx_ring_cpu = page_to_virt(buffer_pg);
+ intf->rx_ring_dma = dma;
+ intf->rx_ring_dma_valid = intf->rx_ring_dma + RING_BUFFER_SIZE - 1;
+
+ p = dma_alloc_coherent(kdev, DESC_RING_SIZE, &intf->rx_edpkt_dma_addr,
+ GFP_KERNEL);
+ if (!p) {
+ ret = -ENOMEM;
+ goto free_rx_ring;
+ }
+ intf->rx_edpkt_cpu = p;
+
+ netif_napi_add(intf->ndev, &intf->rx_napi, bcmasp_rx_poll);
+
+ intf->rx_edpkt_dma_read = intf->rx_edpkt_dma_addr;
+ intf->rx_edpkt_index = 0;
+
+ /* Make sure channels are disabled */
+ rx_edpkt_cfg_wl(intf, 0x0, RX_EDPKT_CFG_ENABLE);
+
+ /* Rx SPB */
+ rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_READ);
+ rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_WRITE);
+ rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_BASE);
+ rx_edpkt_cfg_wq(intf, intf->rx_ring_dma_valid,
+ RX_EDPKT_RING_BUFFER_END);
+ rx_edpkt_cfg_wq(intf, intf->rx_ring_dma_valid,
+ RX_EDPKT_RING_BUFFER_VALID);
+
+ /* EDPKT */
+ rx_edpkt_cfg_wl(intf, (RX_EDPKT_CFG_CFG0_RBUF_4K <<
+ RX_EDPKT_CFG_CFG0_DBUF_SHIFT) |
+ (RX_EDPKT_CFG_CFG0_64_ALN <<
+ RX_EDPKT_CFG_CFG0_BALN_SHIFT) |
+ (RX_EDPKT_CFG_CFG0_EFRM_STUF),
+ RX_EDPKT_CFG_CFG0);
+ rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_WRITE);
+ rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_READ);
+ rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_BASE);
+ rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1),
+ RX_EDPKT_DMA_END);
+ rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1),
+ RX_EDPKT_DMA_VALID);
+
+ reg = UMAC2FB_CFG_DEFAULT_EN |
+ ((intf->channel + 11) << UMAC2FB_CFG_CHID_SHIFT);
+ reg |= (0xd << UMAC2FB_CFG_OK_SEND_SHIFT);
+ umac2fb_wl(intf, reg, UMAC2FB_CFG);
+
+ return 0;
+
+free_rx_ring:
+ dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+ __free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order);
+
+ return ret;
+}
+
+static void bcmasp_reclaim_free_all_rx(struct bcmasp_intf *intf)
+{
+ struct device *kdev = &intf->parent->pdev->dev;
+
+ dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu,
+ intf->rx_edpkt_dma_addr);
+ dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+ __free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order);
+}
+
+static int bcmasp_init_tx(struct bcmasp_intf *intf)
+{
+ struct device *kdev = &intf->parent->pdev->dev;
+ void *p;
+ int ret;
+
+ p = dma_alloc_coherent(kdev, DESC_RING_SIZE, &intf->tx_spb_dma_addr,
+ GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ intf->tx_spb_cpu = p;
+ intf->tx_spb_dma_valid = intf->tx_spb_dma_addr + DESC_RING_SIZE - 1;
+ intf->tx_spb_dma_read = intf->tx_spb_dma_addr;
+
+ intf->tx_cbs = kcalloc(DESC_RING_COUNT, sizeof(struct bcmasp_tx_cb),
+ GFP_KERNEL);
+ if (!intf->tx_cbs) {
+ ret = -ENOMEM;
+ goto free_tx_spb;
+ }
+
+ intf->tx_spb_index = 0;
+ intf->tx_spb_clean_index = 0;
+
+ netif_napi_add_tx(intf->ndev, &intf->tx_napi, bcmasp_tx_poll);
+
+ /* Make sure channels are disabled */
+ tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE);
+ tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC);
+
+ /* Tx SPB */
+ tx_spb_ctrl_wl(intf, ((intf->channel + 8) << TX_SPB_CTRL_XF_BID_SHIFT),
+ TX_SPB_CTRL_XF_CTRL2);
+ tx_pause_ctrl_wl(intf, (1 << (intf->channel + 8)), TX_PAUSE_MAP_VECTOR);
+ tx_spb_top_wl(intf, 0x1e, TX_SPB_TOP_BLKOUT);
+ tx_spb_top_wl(intf, 0x0, TX_SPB_TOP_SPRE_BW_CTRL);
+
+ tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_READ);
+ tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_BASE);
+ tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_END);
+ tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_VALID);
+
+ return 0;
+
+free_tx_spb:
+ dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
+ intf->tx_spb_dma_addr);
+
+ return ret;
+}
+
+static void bcmasp_reclaim_free_all_tx(struct bcmasp_intf *intf)
+{
+ struct device *kdev = &intf->parent->pdev->dev;
+
+ /* Free descriptors */
+ dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
+ intf->tx_spb_dma_addr);
+
+ /* Free cbs */
+ kfree(intf->tx_cbs);
+}
+
+static void bcmasp_ephy_enable_set(struct bcmasp_intf *intf, bool enable)
+{
+ u32 mask = RGMII_EPHY_CFG_IDDQ_BIAS | RGMII_EPHY_CFG_EXT_PWRDOWN |
+ RGMII_EPHY_CFG_IDDQ_GLOBAL;
+ u32 reg;
+
+ reg = rgmii_rl(intf, RGMII_EPHY_CNTRL);
+ if (enable) {
+ reg &= ~RGMII_EPHY_CK25_DIS;
+ rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
+ mdelay(1);
+
+ reg &= ~mask;
+ reg |= RGMII_EPHY_RESET;
+ rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
+ mdelay(1);
+
+ reg &= ~RGMII_EPHY_RESET;
+ } else {
+ reg |= mask | RGMII_EPHY_RESET;
+ rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
+ mdelay(1);
+ reg |= RGMII_EPHY_CK25_DIS;
+ }
+ rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
+ mdelay(1);
+
+ /* Set or clear the LED control override to avoid lighting up LEDs
+ * while the EPHY is powered off and drawing unnecessary current.
+ */
+ reg = rgmii_rl(intf, RGMII_SYS_LED_CNTRL);
+ if (enable)
+ reg &= ~RGMII_SYS_LED_CNTRL_LINK_OVRD;
+ else
+ reg |= RGMII_SYS_LED_CNTRL_LINK_OVRD;
+ rgmii_wl(intf, reg, RGMII_SYS_LED_CNTRL);
+}
+
+static void bcmasp_rgmii_mode_en_set(struct bcmasp_intf *intf, bool enable)
+{
+ u32 reg;
+
+ reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
+ reg &= ~RGMII_OOB_DIS;
+ if (enable)
+ reg |= RGMII_MODE_EN;
+ else
+ reg &= ~RGMII_MODE_EN;
+ rgmii_wl(intf, reg, RGMII_OOB_CNTRL);
+}
+
+static void bcmasp_netif_deinit(struct net_device *dev)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ u32 reg, timeout = 1000;
+
+ napi_disable(&intf->tx_napi);
+
+ bcmasp_enable_tx(intf, 0);
+
+ /* Flush any TX packets in the pipe */
+ tx_spb_dma_wl(intf, TX_SPB_DMA_FIFO_FLUSH, TX_SPB_DMA_FIFO_CTRL);
+ do {
+ reg = tx_spb_dma_rl(intf, TX_SPB_DMA_FIFO_STATUS);
+ if (!(reg & TX_SPB_DMA_FIFO_FLUSH))
+ break;
+ usleep_range(1000, 2000);
+ } while (timeout-- > 0);
+ tx_spb_dma_wl(intf, 0x0, TX_SPB_DMA_FIFO_CTRL);
+
+ umac_enable_set(intf, UMC_CMD_TX_EN, 0);
+
+ phy_stop(dev->phydev);
+
+ umac_enable_set(intf, UMC_CMD_RX_EN, 0);
+
+ bcmasp_flush_rx_port(intf);
+ usleep_range(1000, 2000);
+ bcmasp_enable_rx(intf, 0);
+
+ napi_disable(&intf->rx_napi);
+
+ /* Disable interrupts */
+ bcmasp_enable_tx_irq(intf, 0);
+ bcmasp_enable_rx_irq(intf, 0);
+
+ netif_napi_del(&intf->tx_napi);
+ bcmasp_reclaim_free_all_tx(intf);
+
+ netif_napi_del(&intf->rx_napi);
+ bcmasp_reclaim_free_all_rx(intf);
+}
+
+static int bcmasp_stop(struct net_device *dev)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+
+ netif_dbg(intf, ifdown, dev, "bcmasp stop\n");
+
+ /* Stop tx from updating HW */
+ netif_tx_disable(dev);
+
+ bcmasp_netif_deinit(dev);
+
+ phy_disconnect(dev->phydev);
+
+ /* Disable internal EPHY or external PHY */
+ if (intf->internal_phy)
+ bcmasp_ephy_enable_set(intf, false);
+ else
+ bcmasp_rgmii_mode_en_set(intf, false);
+
+ /* Disable the interface clocks */
+ bcmasp_core_clock_set_intf(intf, false);
+
+ clk_disable_unprepare(intf->parent->clk);
+
+ return 0;
+}
+
+static void bcmasp_configure_port(struct bcmasp_intf *intf)
+{
+ u32 reg, id_mode_dis = 0;
+
+ reg = rgmii_rl(intf, RGMII_PORT_CNTRL);
+ reg &= ~RGMII_PORT_MODE_MASK;
+
+ switch (intf->phy_interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ /* RGMII_NO_ID: TXC transitions at the same time as TXD
+ * (requires PCB or receiver-side delay)
+ * RGMII: Add 2ns delay on TXC (90 degree shift)
+ *
+ * ID is implicitly disabled for 100Mbps (RG)MII operation.
+ */
+ id_mode_dis = RGMII_ID_MODE_DIS;
+ fallthrough;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ reg |= RGMII_PORT_MODE_EXT_GPHY;
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ reg |= RGMII_PORT_MODE_EXT_EPHY;
+ break;
+ default:
+ break;
+ }
+
+ if (intf->internal_phy)
+ reg |= RGMII_PORT_MODE_EPHY;
+
+ rgmii_wl(intf, reg, RGMII_PORT_CNTRL);
+
+ reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
+ reg &= ~RGMII_ID_MODE_DIS;
+ reg |= id_mode_dis;
+ rgmii_wl(intf, reg, RGMII_OOB_CNTRL);
+}
+
+static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ phy_interface_t phy_iface = intf->phy_interface;
+ u32 phy_flags = PHY_BRCM_AUTO_PWRDWN_ENABLE |
+ PHY_BRCM_DIS_TXCRXC_NOENRGY |
+ PHY_BRCM_IDDQ_SUSPEND;
+ struct phy_device *phydev = NULL;
+ int ret;
+
+ /* Always enable interface clocks */
+ bcmasp_core_clock_set_intf(intf, true);
+
+ /* Enable internal PHY or external PHY before any MAC activity */
+ if (intf->internal_phy)
+ bcmasp_ephy_enable_set(intf, true);
+ else
+ bcmasp_rgmii_mode_en_set(intf, true);
+ bcmasp_configure_port(intf);
+
+ /* This is an ugly quirk but we have not been correctly
+ * interpreting the phy_interface values and we have done that
+ * across different drivers, so at least we are consistent in
+ * our mistakes.
+ *
+ * When the Generic PHY driver is in use either the PHY has
+ * been strapped or programmed correctly by the boot loader so
+ * we should stick to our incorrect interpretation since we
+ * have validated it.
+ *
+ * Now when a dedicated PHY driver is in use, we need to
+ * reverse the meaning of the phy_interface_mode values to
+ * something that the PHY driver will interpret and act on such
+ * that we have two mistakes canceling themselves so to speak.
+ * We only do this for the two modes that GENET driver
+ * officially supports on Broadcom STB chips:
+ * PHY_INTERFACE_MODE_RGMII and PHY_INTERFACE_MODE_RGMII_TXID.
+ * Other modes are not *officially* supported with the boot
+ * loader and the scripted environment generating Device Tree
+ * blobs for those platforms.
+ *
+ * Note that internal PHY and fixed-link configurations are not
+ * affected because they use different phy_interface_t values
+ * or the Generic PHY driver.
+ */
+ switch (phy_iface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ phy_iface = PHY_INTERFACE_MODE_RGMII_ID;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ phy_iface = PHY_INTERFACE_MODE_RGMII_RXID;
+ break;
+ default:
+ break;
+ }
+
+ if (phy_connect) {
+ phydev = of_phy_connect(dev, intf->phy_dn,
+ bcmasp_adj_link, phy_flags,
+ phy_iface);
+ if (!phydev) {
+ ret = -ENODEV;
+ netdev_err(dev, "could not attach to PHY\n");
+ goto err_phy_disable;
+ }
+ } else if (!intf->wolopts) {
+ ret = phy_resume(dev->phydev);
+ if (ret)
+ goto err_phy_disable;
+ }
+
+ umac_reset(intf);
+
+ umac_init(intf);
+
+ /* Disable the UniMAC RX/TX */
+ umac_enable_set(intf, (UMC_CMD_RX_EN | UMC_CMD_TX_EN), 0);
+
+ umac_set_hw_addr(intf, dev->dev_addr);
+
+ intf->old_duplex = -1;
+ intf->old_link = -1;
+ intf->old_pause = -1;
+
+ ret = bcmasp_init_tx(intf);
+ if (ret)
+ goto err_phy_disconnect;
+
+ /* Turn on asp */
+ bcmasp_enable_tx(intf, 1);
+
+ ret = bcmasp_init_rx(intf);
+ if (ret)
+ goto err_reclaim_tx;
+
+ bcmasp_enable_rx(intf, 1);
+
+ /* Turn on UniMAC TX/RX */
+ umac_enable_set(intf, (UMC_CMD_RX_EN | UMC_CMD_TX_EN), 1);
+
+ intf->crc_fwd = !!(umac_rl(intf, UMC_CMD) & UMC_CMD_CRC_FWD);
+
+ bcmasp_netif_start(dev);
+
+ netif_start_queue(dev);
+
+ return 0;
+
+err_reclaim_tx:
+ bcmasp_reclaim_free_all_tx(intf);
+err_phy_disconnect:
+ if (phydev)
+ phy_disconnect(phydev);
+err_phy_disable:
+ if (intf->internal_phy)
+ bcmasp_ephy_enable_set(intf, false);
+ else
+ bcmasp_rgmii_mode_en_set(intf, false);
+ return ret;
+}
+
+static int bcmasp_open(struct net_device *dev)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ int ret;
+
+ netif_dbg(intf, ifup, dev, "bcmasp open\n");
+
+ ret = clk_prepare_enable(intf->parent->clk);
+ if (ret)
+ return ret;
+
+ ret = bcmasp_netif_init(dev, true);
+ if (ret)
+ clk_disable_unprepare(intf->parent->clk);
+
+ return ret;
+}
+
+static void bcmasp_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+
+ netif_dbg(intf, tx_err, dev, "transmit timeout!\n");
+ intf->mib.tx_timeout_cnt++;
+}
+
+static int bcmasp_get_phys_port_name(struct net_device *dev,
+ char *name, size_t len)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+
+ if (snprintf(name, len, "p%d", intf->port) >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void bcmasp_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ struct bcmasp_intf_stats64 *lstats;
+ unsigned int start;
+
+ lstats = &intf->stats64;
+
+ do {
+ start = u64_stats_fetch_begin(&lstats->syncp);
+ stats->rx_packets = u64_stats_read(&lstats->rx_packets);
+ stats->rx_bytes = u64_stats_read(&lstats->rx_bytes);
+ stats->rx_dropped = u64_stats_read(&lstats->rx_dropped);
+ stats->rx_crc_errors = u64_stats_read(&lstats->rx_crc_errs);
+ stats->rx_frame_errors = u64_stats_read(&lstats->rx_sym_errs);
+ stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
+
+ stats->tx_packets = u64_stats_read(&lstats->tx_packets);
+ stats->tx_bytes = u64_stats_read(&lstats->tx_bytes);
+ } while (u64_stats_fetch_retry(&lstats->syncp, start));
+}
+
+static const struct net_device_ops bcmasp_netdev_ops = {
+ .ndo_open = bcmasp_open,
+ .ndo_stop = bcmasp_stop,
+ .ndo_start_xmit = bcmasp_xmit,
+ .ndo_tx_timeout = bcmasp_tx_timeout,
+ .ndo_set_rx_mode = bcmasp_set_rx_mode,
+ .ndo_get_phys_port_name = bcmasp_get_phys_port_name,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_get_stats64 = bcmasp_get_stats64,
+};
+
+static void bcmasp_map_res(struct bcmasp_priv *priv, struct bcmasp_intf *intf)
+{
+ /* Per port */
+ intf->res.umac = priv->base + UMC_OFFSET(intf);
+ intf->res.umac2fb = priv->base + (priv->hw_info->umac2fb +
+ (intf->port * 0x4));
+ intf->res.rgmii = priv->base + RGMII_OFFSET(intf);
+
+ /* Per ch */
+ intf->tx_spb_dma = priv->base + TX_SPB_DMA_OFFSET(intf);
+ intf->res.tx_spb_ctrl = priv->base + TX_SPB_CTRL_OFFSET(intf);
+ intf->res.tx_spb_top = priv->base + TX_SPB_TOP_OFFSET(intf);
+ intf->res.tx_epkt_core = priv->base + TX_EPKT_C_OFFSET(intf);
+ intf->res.tx_pause_ctrl = priv->base + TX_PAUSE_CTRL_OFFSET(intf);
+
+ intf->rx_edpkt_dma = priv->base + RX_EDPKT_DMA_OFFSET(intf);
+ intf->rx_edpkt_cfg = priv->base + RX_EDPKT_CFG_OFFSET(intf);
+}
+
+#define MAX_IRQ_STR_LEN 64
+struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
+ struct device_node *ndev_dn, int i)
+{
+ struct device *dev = &priv->pdev->dev;
+ struct bcmasp_intf *intf;
+ struct net_device *ndev;
+ int ch, port, ret;
+
+ if (of_property_read_u32(ndev_dn, "reg", &port)) {
+ dev_warn(dev, "%s: invalid port number\n", ndev_dn->name);
+ goto err;
+ }
+
+ if (of_property_read_u32(ndev_dn, "brcm,channel", &ch)) {
+ dev_warn(dev, "%s: invalid ch number\n", ndev_dn->name);
+ goto err;
+ }
+
+ ndev = alloc_etherdev(sizeof(struct bcmasp_intf));
+ if (!ndev) {
+ dev_warn(dev, "%s: unable to alloc ndev\n", ndev_dn->name);
+ goto err;
+ }
+ intf = netdev_priv(ndev);
+
+ intf->parent = priv;
+ intf->ndev = ndev;
+ intf->channel = ch;
+ intf->port = port;
+ intf->ndev_dn = ndev_dn;
+ intf->index = i;
+
+ ret = of_get_phy_mode(ndev_dn, &intf->phy_interface);
+ if (ret < 0) {
+ dev_err(dev, "invalid PHY mode property\n");
+ goto err_free_netdev;
+ }
+
+ if (intf->phy_interface == PHY_INTERFACE_MODE_INTERNAL)
+ intf->internal_phy = true;
+
+ intf->phy_dn = of_parse_phandle(ndev_dn, "phy-handle", 0);
+ if (!intf->phy_dn && of_phy_is_fixed_link(ndev_dn)) {
+ ret = of_phy_register_fixed_link(ndev_dn);
+ if (ret) {
+ dev_warn(dev, "%s: failed to register fixed PHY\n",
+ ndev_dn->name);
+ goto err_free_netdev;
+ }
+ intf->phy_dn = ndev_dn;
+ }
+
+ /* Map resource */
+ bcmasp_map_res(priv, intf);
+
+ if ((!phy_interface_mode_is_rgmii(intf->phy_interface) &&
+ intf->phy_interface != PHY_INTERFACE_MODE_MII &&
+ intf->phy_interface != PHY_INTERFACE_MODE_INTERNAL) ||
+ (intf->port != 1 && intf->internal_phy)) {
+ netdev_err(intf->ndev, "invalid PHY mode: %s for port %d\n",
+ phy_modes(intf->phy_interface), intf->port);
+ ret = -EINVAL;
+ goto err_free_netdev;
+ }
+
+ ret = of_get_ethdev_address(ndev_dn, ndev);
+ if (ret) {
+ netdev_warn(ndev, "using random Ethernet MAC\n");
+ eth_hw_addr_random(ndev);
+ }
+
+ SET_NETDEV_DEV(ndev, dev);
+ intf->ops = &bcmasp_intf_ops;
+ ndev->netdev_ops = &bcmasp_netdev_ops;
+ ndev->ethtool_ops = &bcmasp_ethtool_ops;
+ intf->msg_enable = netif_msg_init(-1, NETIF_MSG_DRV |
+ NETIF_MSG_PROBE |
+ NETIF_MSG_LINK);
+ ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
+ NETIF_F_RXCSUM;
+ ndev->hw_features |= ndev->features;
+ ndev->needed_headroom += sizeof(struct bcmasp_pkt_offload);
+
+ return intf;
+
+err_free_netdev:
+ free_netdev(ndev);
+err:
+ return NULL;
+}
+
+void bcmasp_interface_destroy(struct bcmasp_intf *intf)
+{
+ if (intf->ndev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(intf->ndev);
+ if (of_phy_is_fixed_link(intf->ndev_dn))
+ of_phy_deregister_fixed_link(intf->ndev_dn);
+ free_netdev(intf->ndev);
+}
+
+static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf)
+{
+ struct net_device *ndev = intf->ndev;
+ u32 reg;
+
+ reg = umac_rl(intf, UMC_MPD_CTRL);
+ if (intf->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE))
+ reg |= UMC_MPD_CTRL_MPD_EN;
+ reg &= ~UMC_MPD_CTRL_PSW_EN;
+ if (intf->wolopts & WAKE_MAGICSECURE) {
+ /* Program the SecureOn password */
+ umac_wl(intf, get_unaligned_be16(&intf->sopass[0]),
+ UMC_PSW_MS);
+ umac_wl(intf, get_unaligned_be32(&intf->sopass[2]),
+ UMC_PSW_LS);
+ reg |= UMC_MPD_CTRL_PSW_EN;
+ }
+ umac_wl(intf, reg, UMC_MPD_CTRL);
+
+ if (intf->wolopts & WAKE_FILTER)
+ bcmasp_netfilt_suspend(intf);
+
+ /* UniMAC receive needs to be turned on */
+ umac_enable_set(intf, UMC_CMD_RX_EN, 1);
+
+ if (intf->parent->wol_irq > 0) {
+ wakeup_intr2_core_wl(intf->parent, 0xffffffff,
+ ASP_WAKEUP_INTR2_MASK_CLEAR);
+ }
+
+ netif_dbg(intf, wol, ndev, "entered WOL mode\n");
+}
+
+int bcmasp_interface_suspend(struct bcmasp_intf *intf)
+{
+ struct device *kdev = &intf->parent->pdev->dev;
+ struct net_device *dev = intf->ndev;
+ int ret = 0;
+
+ if (!netif_running(dev))
+ return 0;
+
+ netif_device_detach(dev);
+
+ bcmasp_netif_deinit(dev);
+
+ if (!intf->wolopts) {
+ ret = phy_suspend(dev->phydev);
+ if (ret)
+ goto out;
+
+ if (intf->internal_phy)
+ bcmasp_ephy_enable_set(intf, false);
+ else
+ bcmasp_rgmii_mode_en_set(intf, false);
+
+ /* If Wake-on-LAN is disabled, we can safely
+ * disable the network interface clocks.
+ */
+ bcmasp_core_clock_set_intf(intf, false);
+ }
+
+ if (device_may_wakeup(kdev) && intf->wolopts)
+ bcmasp_suspend_to_wol(intf);
+
+ clk_disable_unprepare(intf->parent->clk);
+
+ return ret;
+
+out:
+ bcmasp_netif_init(dev, false);
+ return ret;
+}
+
+static void bcmasp_resume_from_wol(struct bcmasp_intf *intf)
+{
+ u32 reg;
+
+ reg = umac_rl(intf, UMC_MPD_CTRL);
+ reg &= ~UMC_MPD_CTRL_MPD_EN;
+ umac_wl(intf, reg, UMC_MPD_CTRL);
+
+ if (intf->parent->wol_irq > 0) {
+ wakeup_intr2_core_wl(intf->parent, 0xffffffff,
+ ASP_WAKEUP_INTR2_MASK_SET);
+ }
+}
+
+int bcmasp_interface_resume(struct bcmasp_intf *intf)
+{
+ struct net_device *dev = intf->ndev;
+ int ret;
+
+ if (!netif_running(dev))
+ return 0;
+
+ ret = clk_prepare_enable(intf->parent->clk);
+ if (ret)
+ return ret;
+
+ ret = bcmasp_netif_init(dev, false);
+ if (ret)
+ goto out;
+
+ bcmasp_resume_from_wol(intf);
+
+ if (intf->eee.eee_enabled)
+ bcmasp_eee_enable_set(intf, true);
+
+ netif_device_attach(dev);
+
+ return 0;
+
+out:
+ clk_disable_unprepare(intf->parent->clk);
+ return ret;
+}
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h
new file mode 100644
index 000000000000..ad742612895f
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h
@@ -0,0 +1,257 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __BCMASP_INTF_DEFS_H
+#define __BCMASP_INTF_DEFS_H
+
+#define UMC_OFFSET(intf) \
+ ((((intf)->port) * 0x800) + 0xc000)
+#define UMC_CMD 0x008
+#define UMC_CMD_TX_EN BIT(0)
+#define UMC_CMD_RX_EN BIT(1)
+#define UMC_CMD_SPEED_SHIFT 0x2
+#define UMC_CMD_SPEED_MASK 0x3
+#define UMC_CMD_SPEED_10 0x0
+#define UMC_CMD_SPEED_100 0x1
+#define UMC_CMD_SPEED_1000 0x2
+#define UMC_CMD_SPEED_2500 0x3
+#define UMC_CMD_PROMISC BIT(4)
+#define UMC_CMD_PAD_EN BIT(5)
+#define UMC_CMD_CRC_FWD BIT(6)
+#define UMC_CMD_PAUSE_FWD BIT(7)
+#define UMC_CMD_RX_PAUSE_IGNORE BIT(8)
+#define UMC_CMD_TX_ADDR_INS BIT(9)
+#define UMC_CMD_HD_EN BIT(10)
+#define UMC_CMD_SW_RESET BIT(13)
+#define UMC_CMD_LCL_LOOP_EN BIT(15)
+#define UMC_CMD_AUTO_CONFIG BIT(22)
+#define UMC_CMD_CNTL_FRM_EN BIT(23)
+#define UMC_CMD_NO_LEN_CHK BIT(24)
+#define UMC_CMD_RMT_LOOP_EN BIT(25)
+#define UMC_CMD_PRBL_EN BIT(27)
+#define UMC_CMD_TX_PAUSE_IGNORE BIT(28)
+#define UMC_CMD_TX_RX_EN BIT(29)
+#define UMC_CMD_RUNT_FILTER_DIS BIT(30)
+#define UMC_MAC0 0x0c
+#define UMC_MAC1 0x10
+#define UMC_FRM_LEN 0x14
+#define UMC_EEE_CTRL 0x64
+#define EN_LPI_RX_PAUSE BIT(0)
+#define EN_LPI_TX_PFC BIT(1)
+#define EN_LPI_TX_PAUSE BIT(2)
+#define EEE_EN BIT(3)
+#define RX_FIFO_CHECK BIT(4)
+#define EEE_TX_CLK_DIS BIT(5)
+#define DIS_EEE_10M BIT(6)
+#define LP_IDLE_PREDICTION_MODE BIT(7)
+#define UMC_EEE_LPI_TIMER 0x68
+#define UMC_PAUSE_CNTRL 0x330
+#define UMC_TX_FLUSH 0x334
+#define UMC_GR64 0x400
+#define UMC_GR127 0x404
+#define UMC_GR255 0x408
+#define UMC_GR511 0x40c
+#define UMC_GR1023 0x410
+#define UMC_GR1518 0x414
+#define UMC_GRMGV 0x418
+#define UMC_GR2047 0x41c
+#define UMC_GR4095 0x420
+#define UMC_GR9216 0x424
+#define UMC_GRPKT 0x428
+#define UMC_GRBYT 0x42c
+#define UMC_GRMCA 0x430
+#define UMC_GRBCA 0x434
+#define UMC_GRFCS 0x438
+#define UMC_GRXCF 0x43c
+#define UMC_GRXPF 0x440
+#define UMC_GRXUO 0x444
+#define UMC_GRALN 0x448
+#define UMC_GRFLR 0x44c
+#define UMC_GRCDE 0x450
+#define UMC_GRFCR 0x454
+#define UMC_GROVR 0x458
+#define UMC_GRJBR 0x45c
+#define UMC_GRMTUE 0x460
+#define UMC_GRPOK 0x464
+#define UMC_GRUC 0x468
+#define UMC_GRPPP 0x46c
+#define UMC_GRMCRC 0x470
+#define UMC_TR64 0x480
+#define UMC_TR127 0x484
+#define UMC_TR255 0x488
+#define UMC_TR511 0x48c
+#define UMC_TR1023 0x490
+#define UMC_TR1518 0x494
+#define UMC_TRMGV 0x498
+#define UMC_TR2047 0x49c
+#define UMC_TR4095 0x4a0
+#define UMC_TR9216 0x4a4
+#define UMC_GTPKT 0x4a8
+#define UMC_GTMCA 0x4ac
+#define UMC_GTBCA 0x4b0
+#define UMC_GTXPF 0x4b4
+#define UMC_GTXCF 0x4b8
+#define UMC_GTFCS 0x4bc
+#define UMC_GTOVR 0x4c0
+#define UMC_GTDRF 0x4c4
+#define UMC_GTEDF 0x4c8
+#define UMC_GTSCL 0x4cc
+#define UMC_GTMCL 0x4d0
+#define UMC_GTLCL 0x4d4
+#define UMC_GTXCL 0x4d8
+#define UMC_GTFRG 0x4dc
+#define UMC_GTNCL 0x4e0
+#define UMC_GTJBR 0x4e4
+#define UMC_GTBYT 0x4e8
+#define UMC_GTPOK 0x4ec
+#define UMC_GTUC 0x4f0
+#define UMC_RRPKT 0x500
+#define UMC_RRUND 0x504
+#define UMC_RRFRG 0x508
+#define UMC_RRBYT 0x50c
+#define UMC_MIB_CNTRL 0x580
+#define UMC_MIB_CNTRL_RX_CNT_RST BIT(0)
+#define UMC_MIB_CNTRL_RUNT_CNT_RST BIT(1)
+#define UMC_MIB_CNTRL_TX_CNT_RST BIT(2)
+#define UMC_RX_MAX_PKT_SZ 0x608
+#define UMC_MPD_CTRL 0x620
+#define UMC_MPD_CTRL_MPD_EN BIT(0)
+#define UMC_MPD_CTRL_PSW_EN BIT(27)
+#define UMC_PSW_MS 0x624
+#define UMC_PSW_LS 0x628
+
+#define UMAC2FB_OFFSET_2_1 0x9f044
+#define UMAC2FB_OFFSET 0x9f03c
+#define UMAC2FB_CFG 0x0
+#define UMAC2FB_CFG_OPUT_EN BIT(0)
+#define UMAC2FB_CFG_VLAN_EN BIT(1)
+#define UMAC2FB_CFG_SNAP_EN BIT(2)
+#define UMAC2FB_CFG_BCM_TG_EN BIT(3)
+#define UMAC2FB_CFG_IPUT_EN BIT(4)
+#define UMAC2FB_CFG_CHID_SHIFT 8
+#define UMAC2FB_CFG_OK_SEND_SHIFT 24
+#define UMAC2FB_CFG_DEFAULT_EN \
+ (UMAC2FB_CFG_OPUT_EN | UMAC2FB_CFG_VLAN_EN \
+ | UMAC2FB_CFG_SNAP_EN | UMAC2FB_CFG_IPUT_EN)
+
+#define RGMII_OFFSET(intf) \
+ ((((intf)->port) * 0x100) + 0xd000)
+#define RGMII_EPHY_CNTRL 0x00
+#define RGMII_EPHY_CFG_IDDQ_BIAS BIT(0)
+#define RGMII_EPHY_CFG_EXT_PWRDOWN BIT(1)
+#define RGMII_EPHY_CFG_FORCE_DLL_EN BIT(2)
+#define RGMII_EPHY_CFG_IDDQ_GLOBAL BIT(3)
+#define RGMII_EPHY_CK25_DIS BIT(4)
+#define RGMII_EPHY_RESET BIT(7)
+#define RGMII_OOB_CNTRL 0x0c
+#define RGMII_LINK BIT(4)
+#define RGMII_OOB_DIS BIT(5)
+#define RGMII_MODE_EN BIT(6)
+#define RGMII_ID_MODE_DIS BIT(16)
+
+#define RGMII_PORT_CNTRL 0x60
+#define RGMII_PORT_MODE_EPHY 0
+#define RGMII_PORT_MODE_GPHY 1
+#define RGMII_PORT_MODE_EXT_EPHY 2
+#define RGMII_PORT_MODE_EXT_GPHY 3
+#define RGMII_PORT_MODE_EXT_RVMII 4
+#define RGMII_PORT_MODE_MASK GENMASK(2, 0)
+
+#define RGMII_SYS_LED_CNTRL 0x74
+#define RGMII_SYS_LED_CNTRL_LINK_OVRD BIT(15)
+
+#define TX_SPB_DMA_OFFSET(intf) \
+ ((((intf)->channel) * 0x30) + 0x48180)
+#define TX_SPB_DMA_READ 0x00
+#define TX_SPB_DMA_BASE 0x08
+#define TX_SPB_DMA_END 0x10
+#define TX_SPB_DMA_VALID 0x18
+#define TX_SPB_DMA_FIFO_CTRL 0x20
+#define TX_SPB_DMA_FIFO_FLUSH BIT(0)
+#define TX_SPB_DMA_FIFO_STATUS 0x24
+
+#define TX_SPB_CTRL_OFFSET(intf) \
+ ((((intf)->channel) * 0x68) + 0x49340)
+#define TX_SPB_CTRL_ENABLE 0x0
+#define TX_SPB_CTRL_ENABLE_EN BIT(0)
+#define TX_SPB_CTRL_XF_CTRL2 0x20
+#define TX_SPB_CTRL_XF_BID_SHIFT 16
+
+#define TX_SPB_TOP_OFFSET(intf) \
+ ((((intf)->channel) * 0x1c) + 0x4a0e0)
+#define TX_SPB_TOP_BLKOUT 0x0
+#define TX_SPB_TOP_SPRE_BW_CTRL 0x4
+
+#define TX_EPKT_C_OFFSET(intf) \
+ ((((intf)->channel) * 0x120) + 0x40900)
+#define TX_EPKT_C_CFG_MISC 0x0
+#define TX_EPKT_C_CFG_MISC_EN BIT(0)
+#define TX_EPKT_C_CFG_MISC_PT BIT(1)
+#define TX_EPKT_C_CFG_MISC_PS_SHIFT 14
+#define TX_EPKT_C_CFG_MISC_FD_SHIFT 20
+
+#define TX_PAUSE_CTRL_OFFSET(intf) \
+ ((((intf)->channel * 0xc) + 0x49a20))
+#define TX_PAUSE_MAP_VECTOR 0x8
+
+#define RX_EDPKT_DMA_OFFSET(intf) \
+ ((((intf)->channel) * 0x38) + 0x9ca00)
+#define RX_EDPKT_DMA_WRITE 0x00
+#define RX_EDPKT_DMA_READ 0x08
+#define RX_EDPKT_DMA_BASE 0x10
+#define RX_EDPKT_DMA_END 0x18
+#define RX_EDPKT_DMA_VALID 0x20
+#define RX_EDPKT_DMA_FULLNESS 0x28
+#define RX_EDPKT_DMA_MIN_THRES 0x2c
+#define RX_EDPKT_DMA_CH_XONOFF 0x30
+
+#define RX_EDPKT_CFG_OFFSET(intf) \
+ ((((intf)->channel) * 0x70) + 0x9c600)
+#define RX_EDPKT_CFG_CFG0 0x0
+#define RX_EDPKT_CFG_CFG0_DBUF_SHIFT 9
+#define RX_EDPKT_CFG_CFG0_RBUF 0x0
+#define RX_EDPKT_CFG_CFG0_RBUF_4K 0x1
+#define RX_EDPKT_CFG_CFG0_BUF_4K 0x2
+/* EFRM STUFF, 0 = no byte stuff, 1 = two byte stuff */
+#define RX_EDPKT_CFG_CFG0_EFRM_STUF BIT(11)
+#define RX_EDPKT_CFG_CFG0_BALN_SHIFT 12
+#define RX_EDPKT_CFG_CFG0_NO_ALN 0
+#define RX_EDPKT_CFG_CFG0_4_ALN 2
+#define RX_EDPKT_CFG_CFG0_64_ALN 6
+#define RX_EDPKT_RING_BUFFER_WRITE 0x38
+#define RX_EDPKT_RING_BUFFER_READ 0x40
+#define RX_EDPKT_RING_BUFFER_BASE 0x48
+#define RX_EDPKT_RING_BUFFER_END 0x50
+#define RX_EDPKT_RING_BUFFER_VALID 0x58
+#define RX_EDPKT_CFG_ENABLE 0x6c
+#define RX_EDPKT_CFG_ENABLE_EN BIT(0)
+
+#define RX_SPB_DMA_OFFSET(intf) \
+ ((((intf)->channel) * 0x30) + 0xa0000)
+#define RX_SPB_DMA_READ 0x00
+#define RX_SPB_DMA_BASE 0x08
+#define RX_SPB_DMA_END 0x10
+#define RX_SPB_DMA_VALID 0x18
+#define RX_SPB_DMA_FIFO_CTRL 0x20
+#define RX_SPB_DMA_FIFO_FLUSH BIT(0)
+#define RX_SPB_DMA_FIFO_STATUS 0x24
+
+#define RX_SPB_CTRL_OFFSET(intf) \
+ ((((intf)->channel - 6) * 0x68) + 0xa1000)
+#define RX_SPB_CTRL_ENABLE 0x00
+#define RX_SPB_CTRL_ENABLE_EN BIT(0)
+
+#define RX_PAUSE_CTRL_OFFSET(intf) \
+ ((((intf)->channel - 6) * 0x4) + 0xa1138)
+#define RX_PAUSE_MAP_VECTOR 0x00
+
+#define RX_SPB_TOP_CTRL_OFFSET(intf) \
+ ((((intf)->channel - 6) * 0x14) + 0xa2000)
+#define RX_SPB_TOP_BLKOUT 0x00
+
+#define NUM_4K_BUFFERS 32
+#define RING_BUFFER_SIZE (PAGE_SIZE * NUM_4K_BUFFERS)
+
+#define DESC_RING_COUNT (64 * NUM_4K_BUFFERS)
+#define DESC_SIZE 16
+#define DESC_RING_SIZE (DESC_RING_COUNT * DESC_SIZE)
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index e5b54e6025be..a3bbd13c070f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -293,6 +293,60 @@ static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
BNXT_DB_CQ(db, idx);
}
+static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
+{
+ if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
+ return;
+
+ if (BNXT_PF(bp))
+ queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
+ else
+ schedule_delayed_work(&bp->fw_reset_task, delay);
+}
+
+static void __bnxt_queue_sp_work(struct bnxt *bp)
+{
+ if (BNXT_PF(bp))
+ queue_work(bnxt_pf_wq, &bp->sp_task);
+ else
+ schedule_work(&bp->sp_task);
+}
+
+static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event)
+{
+ set_bit(event, &bp->sp_event);
+ __bnxt_queue_sp_work(bp);
+}
+
+static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+{
+ if (!rxr->bnapi->in_reset) {
+ rxr->bnapi->in_reset = true;
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
+ else
+ set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
+ __bnxt_queue_sp_work(bp);
+ }
+ rxr->rx_next_cons = 0xffff;
+}
+
+void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+ int idx)
+{
+ struct bnxt_napi *bnapi = txr->bnapi;
+
+ if (bnapi->tx_fault)
+ return;
+
+ netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_pkts:%d cons:%u prod:%u i:%d)",
+ txr->txq_index, bnapi->tx_pkts,
+ txr->tx_cons, txr->tx_prod, idx);
+ WARN_ON_ONCE(1);
+ bnapi->tx_fault = 1;
+ bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
+}
+
const u16 bnxt_lhint_arr[] = {
TX_BD_FLAGS_LHINT_512_AND_SMALLER,
TX_BD_FLAGS_LHINT_512_TO_1023,
@@ -652,6 +706,11 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
skb = tx_buf->skb;
tx_buf->skb = NULL;
+ if (unlikely(!skb)) {
+ bnxt_sched_reset_txr(bp, txr, i);
+ return;
+ }
+
tx_bytes += skb->len;
if (tx_buf->is_push) {
@@ -685,7 +744,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
next_tx_int:
cons = NEXT_TX(cons);
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
}
WRITE_ONCE(txr->tx_cons, cons);
@@ -1234,38 +1293,6 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
return 0;
}
-static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
-{
- if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
- return;
-
- if (BNXT_PF(bp))
- queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
- else
- schedule_delayed_work(&bp->fw_reset_task, delay);
-}
-
-static void bnxt_queue_sp_work(struct bnxt *bp)
-{
- if (BNXT_PF(bp))
- queue_work(bnxt_pf_wq, &bp->sp_task);
- else
- schedule_work(&bp->sp_task);
-}
-
-static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
-{
- if (!rxr->bnapi->in_reset) {
- rxr->bnapi->in_reset = true;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
- set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
- else
- set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
- }
- rxr->rx_next_cons = 0xffff;
-}
-
static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
{
struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
@@ -1320,7 +1347,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
cons, rxr->rx_next_cons,
TPA_START_ERROR_CODE(tpa_start1));
- bnxt_sched_reset(bp, rxr);
+ bnxt_sched_reset_rxr(bp, rxr);
return;
}
/* Store cfa_code in tpa_info to use in tpa_end
@@ -1844,7 +1871,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (rxr->rx_next_cons != 0xffff)
netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
cons, rxr->rx_next_cons);
- bnxt_sched_reset(bp, rxr);
+ bnxt_sched_reset_rxr(bp, rxr);
if (rc1)
return rc1;
goto next_rx_no_prod_no_len;
@@ -1882,7 +1909,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
!(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
netdev_warn_once(bp->dev, "RX buffer error %x\n",
rx_err);
- bnxt_sched_reset(bp, rxr);
+ bnxt_sched_reset_rxr(bp, rxr);
}
}
goto next_rx_no_len;
@@ -2329,7 +2356,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
goto async_event_process_exit;
}
rxr = bp->bnapi[grp_idx]->rx_ring;
- bnxt_sched_reset(bp, rxr);
+ bnxt_sched_reset_rxr(bp, rxr);
goto async_event_process_exit;
}
case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
@@ -2384,7 +2411,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
default:
goto async_event_process_exit;
}
- bnxt_queue_sp_work(bp);
+ __bnxt_queue_sp_work(bp);
async_event_process_exit:
return 0;
}
@@ -2413,8 +2440,7 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
}
set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
- set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
+ bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT);
break;
case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
@@ -2571,7 +2597,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
{
- if (bnapi->tx_pkts) {
+ if (bnapi->tx_pkts && !bnapi->tx_fault) {
bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
bnapi->tx_pkts = 0;
}
@@ -9424,6 +9450,8 @@ static void bnxt_enable_napi(struct bnxt *bp)
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr;
+ bnapi->tx_fault = 0;
+
cpr = &bnapi->cp_ring;
if (bnapi->in_reset)
cpr->sw_stats.rx.rx_resets++;
@@ -11031,8 +11059,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
if (mask != vnic->rx_mask || uc_update || mc_update) {
vnic->rx_mask = mask;
- set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
+ bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
}
}
@@ -11597,8 +11624,7 @@ static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
struct bnxt *bp = netdev_priv(dev);
netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
- set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
+ bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
}
static void bnxt_fw_health_check(struct bnxt *bp)
@@ -11635,8 +11661,7 @@ static void bnxt_fw_health_check(struct bnxt *bp)
return;
fw_reset:
- set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
+ bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT);
}
static void bnxt_timer(struct timer_list *t)
@@ -11653,21 +11678,15 @@ static void bnxt_timer(struct timer_list *t)
if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
bnxt_fw_health_check(bp);
- if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks) {
- set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
- }
+ if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks)
+ bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT);
- if (bnxt_tc_flower_enabled(bp)) {
- set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
- }
+ if (bnxt_tc_flower_enabled(bp))
+ bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT);
#ifdef CONFIG_RFS_ACCEL
- if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) {
- set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
- }
+ if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count)
+ bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
#endif /*CONFIG_RFS_ACCEL*/
if (bp->link_info.phy_retry) {
@@ -11675,21 +11694,17 @@ static void bnxt_timer(struct timer_list *t)
bp->link_info.phy_retry = false;
netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
} else {
- set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
+ bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT);
}
}
- if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) {
- set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
- }
+ if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
+ bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
- netif_carrier_ok(dev)) {
- set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
- }
+ netif_carrier_ok(dev))
+ bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT);
+
bnxt_restart_timer:
mod_timer(&bp->timer, jiffies + bp->current_interval);
}
@@ -12968,8 +12983,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
bp->ntp_fltr_count++;
spin_unlock_bh(&bp->ntp_fltr_lock);
- set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
+ bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
return new_fltr->sw_id;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 080e73496066..9d16757e27fe 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1008,6 +1008,7 @@ struct bnxt_napi {
int);
int tx_pkts;
u8 events;
+ u8 tx_fault:1;
u32 flags;
#define BNXT_NAPI_FLAG_XDP 0x1
@@ -2329,6 +2330,8 @@ int bnxt_get_avail_msix(struct bnxt *bp, int num);
int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init);
void bnxt_tx_disable(struct bnxt *bp);
void bnxt_tx_enable(struct bnxt *bp);
+void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+ int idx);
void bnxt_report_link(struct bnxt *bp);
int bnxt_update_link(struct bnxt *bp, bool chng_link_state);
int bnxt_hwrm_set_pause(struct bnxt *);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 4efa5fe6972b..5b6fbdc4dc40 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -149,6 +149,7 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
tx_buf->action = 0;
tx_buf->xdpf = NULL;
} else if (tx_buf->action == XDP_TX) {
+ tx_buf->action = 0;
rx_doorbell_needed = true;
last_tx_cons = tx_cons;
@@ -158,6 +159,9 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
tx_buf = &txr->tx_buf_ring[tx_cons];
page_pool_recycle_direct(rxr->page_pool, tx_buf->page);
}
+ } else {
+ bnxt_sched_reset_txr(bp, txr, i);
+ return;
}
tx_cons = NEXT_TX(tx_cons);
}
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index d6d90f9722a7..31191b520b58 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -1037,8 +1037,7 @@ bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
static void
bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
{
- struct bnad_tx_info *tx_info =
- (struct bnad_tx_info *)tx->priv;
+ struct bnad_tx_info *tx_info = tx->priv;
struct bna_tcb *tcb;
u32 txq_id;
int i;
@@ -1056,7 +1055,7 @@ bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
static void
bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
{
- struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
+ struct bnad_tx_info *tx_info = tx->priv;
struct bna_tcb *tcb;
u32 txq_id;
int i;
@@ -1133,7 +1132,7 @@ bnad_tx_cleanup(struct delayed_work *work)
static void
bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
{
- struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
+ struct bnad_tx_info *tx_info = tx->priv;
struct bna_tcb *tcb;
int i;
@@ -1149,7 +1148,7 @@ bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
static void
bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
{
- struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
+ struct bnad_rx_info *rx_info = rx->priv;
struct bna_ccb *ccb;
struct bnad_rx_ctrl *rx_ctrl;
int i;
@@ -1208,7 +1207,7 @@ bnad_rx_cleanup(void *work)
static void
bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
{
- struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
+ struct bnad_rx_info *rx_info = rx->priv;
struct bna_ccb *ccb;
struct bnad_rx_ctrl *rx_ctrl;
int i;
@@ -1231,7 +1230,7 @@ bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
static void
bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
{
- struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
+ struct bnad_rx_info *rx_info = rx->priv;
struct bna_ccb *ccb;
struct bna_rcb *rcb;
struct bnad_rx_ctrl *rx_ctrl;
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index 84751bb303a6..079f9f6ae21a 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -1333,7 +1333,7 @@ static void tsnep_rx_page(struct tsnep_rx *rx, struct napi_struct *napi,
skb = tsnep_build_skb(rx, page, length);
if (skb) {
- page_pool_release_page(rx->page_pool, page);
+ skb_mark_for_recycle(skb);
rx->packets++;
rx->bytes += length;
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index a03879a27b04..9135b918dd49 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -177,16 +177,20 @@ static void ftgmac100_write_mac_addr(struct ftgmac100 *priv, const u8 *mac)
iowrite32(laddr, priv->base + FTGMAC100_OFFSET_MAC_LADR);
}
-static void ftgmac100_initial_mac(struct ftgmac100 *priv)
+static int ftgmac100_initial_mac(struct ftgmac100 *priv)
{
u8 mac[ETH_ALEN];
unsigned int m;
unsigned int l;
+ int err;
- if (!device_get_ethdev_address(priv->dev, priv->netdev)) {
+ err = of_get_ethdev_address(priv->dev->of_node, priv->netdev);
+ if (err == -EPROBE_DEFER)
+ return err;
+ if (!err) {
dev_info(priv->dev, "Read MAC address %pM from device tree\n",
priv->netdev->dev_addr);
- return;
+ return 0;
}
m = ioread32(priv->base + FTGMAC100_OFFSET_MAC_MADR);
@@ -207,6 +211,8 @@ static void ftgmac100_initial_mac(struct ftgmac100 *priv)
dev_info(priv->dev, "Generated random MAC address %pM\n",
priv->netdev->dev_addr);
}
+
+ return 0;
}
static int ftgmac100_set_mac_addr(struct net_device *dev, void *p)
@@ -1843,7 +1849,9 @@ static int ftgmac100_probe(struct platform_device *pdev)
priv->aneg_pause = true;
/* MAC address from chip or random one */
- ftgmac100_initial_mac(priv);
+ err = ftgmac100_initial_mac(priv);
+ if (err)
+ goto err_phy_connect;
np = pdev->dev.of_node;
if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 431f8917dc39..85e38a1ec22a 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -3497,7 +3497,7 @@ free_netdev:
return err;
}
-static int dpaa_remove(struct platform_device *pdev)
+static void dpaa_remove(struct platform_device *pdev)
{
struct net_device *net_dev;
struct dpaa_priv *priv;
@@ -3516,6 +3516,9 @@ static int dpaa_remove(struct platform_device *pdev)
phylink_destroy(priv->mac_dev->phylink);
err = dpaa_fq_free(dev, &priv->dpaa_fq_list);
+ if (err)
+ dev_err(dev, "Failed to free FQs on remove (%pE)\n",
+ ERR_PTR(err));
qman_delete_cgr_safe(&priv->ingress_cgr);
qman_release_cgrid(priv->ingress_cgr.cgrid);
@@ -3527,8 +3530,6 @@ static int dpaa_remove(struct platform_device *pdev)
dpaa_bps_free(priv);
free_netdev(net_dev);
-
- return err;
}
static const struct platform_device_id dpaa_devtype[] = {
@@ -3546,7 +3547,7 @@ static struct platform_driver dpaa_driver = {
},
.id_table = dpaa_devtype,
.probe = dpaa_eth_probe,
- .remove = dpaa_remove
+ .remove_new = dpaa_remove
};
static int __init dpaa_load(void)
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 63a053dea819..8f1edcca96c4 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -651,12 +651,9 @@ struct fec_enet_private {
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_caps;
- unsigned long last_overflow_check;
spinlock_t tmreg_lock;
struct cyclecounter cc;
struct timecounter tc;
- int rx_hwtstamp_filter;
- u32 base_incval;
u32 cycle_speed;
int hwts_rx_en;
int hwts_tx_en;
@@ -679,8 +676,6 @@ struct fec_enet_private {
struct ethtool_eee eee;
unsigned int clk_ref_rate;
- u32 rx_copybreak;
-
/* ptp clock period in ns*/
unsigned int ptp_inc;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 66b5cbdb43b9..14d0dc7ba3c9 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -325,8 +325,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define FEC_WOL_FLAG_ENABLE (0x1 << 1)
#define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2)
-#define COPYBREAK_DEFAULT 256
-
/* Max number of allowed TCP segments for software TSO */
#define FEC_MAX_TSO_SEGS 100
#define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
@@ -3059,44 +3057,6 @@ static int fec_enet_set_coalesce(struct net_device *ndev,
return 0;
}
-static int fec_enet_get_tunable(struct net_device *netdev,
- const struct ethtool_tunable *tuna,
- void *data)
-{
- struct fec_enet_private *fep = netdev_priv(netdev);
- int ret = 0;
-
- switch (tuna->id) {
- case ETHTOOL_RX_COPYBREAK:
- *(u32 *)data = fep->rx_copybreak;
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static int fec_enet_set_tunable(struct net_device *netdev,
- const struct ethtool_tunable *tuna,
- const void *data)
-{
- struct fec_enet_private *fep = netdev_priv(netdev);
- int ret = 0;
-
- switch (tuna->id) {
- case ETHTOOL_RX_COPYBREAK:
- fep->rx_copybreak = *(u32 *)data;
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
/* LPI Sleep Ts count base on tx clk (clk_ref).
* The lpi sleep cnt value = X us / (cycle_ns).
*/
@@ -3234,8 +3194,6 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
.get_sset_count = fec_enet_get_sset_count,
#endif
.get_ts_info = fec_enet_get_ts_info,
- .get_tunable = fec_enet_get_tunable,
- .set_tunable = fec_enet_set_tunable,
.get_wol = fec_enet_get_wol,
.set_wol = fec_enet_set_wol,
.get_eee = fec_enet_get_eee,
@@ -4018,9 +3976,6 @@ static int fec_enet_init(struct net_device *ndev)
if (ret)
goto free_queue_mem;
- /* make sure MAC we just acquired is programmed into the hw */
- fec_set_mac_address(ndev, NULL);
-
/* Set receive and transmit descriptor base. */
for (i = 0; i < fep->num_rx_queues; i++) {
struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i];
@@ -4486,7 +4441,6 @@ fec_probe(struct platform_device *pdev)
if (fep->bufdesc_ex && fep->ptp_clock)
netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
- fep->rx_copybreak = COPYBREAK_DEFAULT;
INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
pm_runtime_mark_last_busy(&pdev->dev);
@@ -4526,7 +4480,7 @@ failed_ioremap:
return ret;
}
-static int
+static void
fec_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
@@ -4562,7 +4516,6 @@ fec_drv_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
free_netdev(ndev);
- return 0;
}
static int __maybe_unused fec_suspend(struct device *dev)
@@ -4718,7 +4671,7 @@ static struct platform_driver fec_driver = {
},
.id_table = fec_devtype,
.probe = fec_probe,
- .remove = fec_drv_remove,
+ .remove_new = fec_drv_remove,
};
module_platform_driver(fec_driver);
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index b88816b71ddf..984ece5cd64f 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -974,7 +974,7 @@ err_netdev:
return rv;
}
-static int
+static void
mpc52xx_fec_remove(struct platform_device *op)
{
struct net_device *ndev;
@@ -998,8 +998,6 @@ mpc52xx_fec_remove(struct platform_device *op)
release_mem_region(ndev->base_addr, sizeof(struct mpc52xx_fec));
free_netdev(ndev);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -1042,7 +1040,7 @@ static struct platform_driver mpc52xx_fec_driver = {
.of_match_table = mpc52xx_fec_match,
},
.probe = mpc52xx_fec_probe,
- .remove = mpc52xx_fec_remove,
+ .remove_new = mpc52xx_fec_remove,
#ifdef CONFIG_PM
.suspend = mpc52xx_fec_of_suspend,
.resume = mpc52xx_fec_of_resume,
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
index 95f778cce98c..61d3776d6750 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
@@ -117,7 +117,7 @@ static int mpc52xx_fec_mdio_probe(struct platform_device *of)
return err;
}
-static int mpc52xx_fec_mdio_remove(struct platform_device *of)
+static void mpc52xx_fec_mdio_remove(struct platform_device *of)
{
struct mii_bus *bus = platform_get_drvdata(of);
struct mpc52xx_fec_mdio_priv *priv = bus->priv;
@@ -126,8 +126,6 @@ static int mpc52xx_fec_mdio_remove(struct platform_device *of)
iounmap(priv->regs);
kfree(priv);
mdiobus_free(bus);
-
- return 0;
}
static const struct of_device_id mpc52xx_fec_mdio_match[] = {
@@ -145,7 +143,7 @@ struct platform_driver mpc52xx_fec_mdio_driver = {
.of_match_table = mpc52xx_fec_mdio_match,
},
.probe = mpc52xx_fec_mdio_probe,
- .remove = mpc52xx_fec_mdio_remove,
+ .remove_new = mpc52xx_fec_mdio_remove,
};
/* let fec driver call it, since this has to be registered before it */
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index ab86bb8562ef..afc658d2c271 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -443,21 +443,21 @@ static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
*/
static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
- struct fec_enet_private *adapter =
+ struct fec_enet_private *fep =
container_of(ptp, struct fec_enet_private, ptp_caps);
u64 ns;
unsigned long flags;
- mutex_lock(&adapter->ptp_clk_mutex);
+ mutex_lock(&fep->ptp_clk_mutex);
/* Check the ptp clock */
- if (!adapter->ptp_clk_on) {
- mutex_unlock(&adapter->ptp_clk_mutex);
+ if (!fep->ptp_clk_on) {
+ mutex_unlock(&fep->ptp_clk_mutex);
return -EINVAL;
}
- spin_lock_irqsave(&adapter->tmreg_lock, flags);
- ns = timecounter_read(&adapter->tc);
- spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
- mutex_unlock(&adapter->ptp_clk_mutex);
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+ ns = timecounter_read(&fep->tc);
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ mutex_unlock(&fep->ptp_clk_mutex);
*ts = ns_to_timespec64(ns);
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 43665806c590..c5045891d694 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -331,12 +331,11 @@ _return_of_node_put:
return err;
}
-static int mac_remove(struct platform_device *pdev)
+static void mac_remove(struct platform_device *pdev)
{
struct mac_device *mac_dev = platform_get_drvdata(pdev);
platform_device_unregister(mac_dev->priv->eth_dev);
- return 0;
}
static struct platform_driver mac_driver = {
@@ -345,7 +344,7 @@ static struct platform_driver mac_driver = {
.of_match_table = mac_match,
},
.probe = mac_probe,
- .remove = mac_remove,
+ .remove_new = mac_remove,
};
builtin_platform_driver(mac_driver);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 8844a9a04fcf..f9f5b28cc72e 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -1051,7 +1051,7 @@ out_free_fpi:
return ret;
}
-static int fs_enet_remove(struct platform_device *ofdev)
+static void fs_enet_remove(struct platform_device *ofdev)
{
struct net_device *ndev = platform_get_drvdata(ofdev);
struct fs_enet_private *fep = netdev_priv(ndev);
@@ -1066,7 +1066,6 @@ static int fs_enet_remove(struct platform_device *ofdev)
if (of_phy_is_fixed_link(ofdev->dev.of_node))
of_phy_deregister_fixed_link(ofdev->dev.of_node);
free_netdev(ndev);
- return 0;
}
static const struct of_device_id fs_enet_match[] = {
@@ -1113,7 +1112,7 @@ static struct platform_driver fs_enet_driver = {
.of_match_table = fs_enet_match,
},
.probe = fs_enet_probe,
- .remove = fs_enet_remove,
+ .remove_new = fs_enet_remove,
};
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
index 21de56345503..91a69fc2f7c2 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -192,7 +192,7 @@ out:
return ret;
}
-static int fs_enet_mdio_remove(struct platform_device *ofdev)
+static void fs_enet_mdio_remove(struct platform_device *ofdev)
{
struct mii_bus *bus = platform_get_drvdata(ofdev);
struct bb_info *bitbang = bus->priv;
@@ -201,8 +201,6 @@ static int fs_enet_mdio_remove(struct platform_device *ofdev)
free_mdio_bitbang(bus);
iounmap(bitbang->dir);
kfree(bitbang);
-
- return 0;
}
static const struct of_device_id fs_enet_mdio_bb_match[] = {
@@ -219,7 +217,7 @@ static struct platform_driver fs_enet_bb_mdio_driver = {
.of_match_table = fs_enet_mdio_bb_match,
},
.probe = fs_enet_mdio_probe,
- .remove = fs_enet_mdio_remove,
+ .remove_new = fs_enet_mdio_remove,
};
module_platform_driver(fs_enet_bb_mdio_driver);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index 59a8f0bd0f5c..1910df250c33 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -187,7 +187,7 @@ out:
return ret;
}
-static int fs_enet_mdio_remove(struct platform_device *ofdev)
+static void fs_enet_mdio_remove(struct platform_device *ofdev)
{
struct mii_bus *bus = platform_get_drvdata(ofdev);
struct fec_info *fec = bus->priv;
@@ -196,8 +196,6 @@ static int fs_enet_mdio_remove(struct platform_device *ofdev)
iounmap(fec->fecp);
kfree(fec);
mdiobus_free(bus);
-
- return 0;
}
static const struct of_device_id fs_enet_mdio_fec_match[] = {
@@ -220,7 +218,7 @@ static struct platform_driver fs_enet_fec_mdio_driver = {
.of_match_table = fs_enet_mdio_fec_match,
},
.probe = fs_enet_mdio_probe,
- .remove = fs_enet_mdio_remove,
+ .remove_new = fs_enet_mdio_remove,
};
module_platform_driver(fs_enet_fec_mdio_driver);
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 9d58d8334467..01d594886f52 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -511,7 +511,7 @@ error:
}
-static int fsl_pq_mdio_remove(struct platform_device *pdev)
+static void fsl_pq_mdio_remove(struct platform_device *pdev)
{
struct device *device = &pdev->dev;
struct mii_bus *bus = dev_get_drvdata(device);
@@ -521,8 +521,6 @@ static int fsl_pq_mdio_remove(struct platform_device *pdev)
iounmap(priv->map);
mdiobus_free(bus);
-
- return 0;
}
static struct platform_driver fsl_pq_mdio_driver = {
@@ -531,7 +529,7 @@ static struct platform_driver fsl_pq_mdio_driver = {
.of_match_table = fsl_pq_mdio_match,
},
.probe = fsl_pq_mdio_probe,
- .remove = fsl_pq_mdio_remove,
+ .remove_new = fsl_pq_mdio_remove,
};
module_platform_driver(fsl_pq_mdio_driver);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 38d5013c6fed..0c898f9ee358 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -3364,7 +3364,7 @@ register_fail:
return err;
}
-static int gfar_remove(struct platform_device *ofdev)
+static void gfar_remove(struct platform_device *ofdev)
{
struct gfar_private *priv = platform_get_drvdata(ofdev);
struct device_node *np = ofdev->dev.of_node;
@@ -3381,8 +3381,6 @@ static int gfar_remove(struct platform_device *ofdev)
gfar_free_rx_queues(priv);
gfar_free_tx_queues(priv);
free_gfar_dev(priv);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -3642,7 +3640,7 @@ static struct platform_driver gfar_driver = {
.of_match_table = gfar_match,
},
.probe = gfar_probe,
- .remove = gfar_remove,
+ .remove_new = gfar_remove,
};
module_platform_driver(gfar_driver);
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 7a4cb4f07c32..2b3a15f24e7c 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3753,7 +3753,7 @@ err_free_info:
return err;
}
-static int ucc_geth_remove(struct platform_device* ofdev)
+static void ucc_geth_remove(struct platform_device* ofdev)
{
struct net_device *dev = platform_get_drvdata(ofdev);
struct ucc_geth_private *ugeth = netdev_priv(dev);
@@ -3767,8 +3767,6 @@ static int ucc_geth_remove(struct platform_device* ofdev)
of_node_put(ugeth->ug_info->phy_node);
kfree(ugeth->ug_info);
free_netdev(dev);
-
- return 0;
}
static const struct of_device_id ucc_geth_match[] = {
@@ -3787,7 +3785,7 @@ static struct platform_driver ucc_geth_driver = {
.of_match_table = ucc_geth_match,
},
.probe = ucc_geth_probe,
- .remove = ucc_geth_remove,
+ .remove_new = ucc_geth_remove,
.suspend = ucc_geth_suspend,
.resume = ucc_geth_resume,
};
diff --git a/drivers/net/ethernet/google/gve/gve_desc.h b/drivers/net/ethernet/google/gve/gve_desc.h
index f4ae9e19b844..c2874cdcf40c 100644
--- a/drivers/net/ethernet/google/gve/gve_desc.h
+++ b/drivers/net/ethernet/google/gve/gve_desc.h
@@ -105,10 +105,10 @@ union gve_rx_data_slot {
__be64 addr;
};
-/* GVE Recive Packet Descriptor Seq No */
+/* GVE Receive Packet Descriptor Seq No */
#define GVE_SEQNO(x) (be16_to_cpu(x) & 0x7)
-/* GVE Recive Packet Descriptor Flags */
+/* GVE Receive Packet Descriptor Flags */
#define GVE_RXFLG(x) cpu_to_be16(1 << (3 + (x)))
#define GVE_RXF_FRAG GVE_RXFLG(3) /* IP Fragment */
#define GVE_RXF_IPV4 GVE_RXFLG(4) /* IPv4 */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 407d30ee55d2..36858a72d771 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -569,8 +569,8 @@ static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
{
- struct hns3_nic_priv *nic_priv = (struct hns3_nic_priv *)handle->priv;
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hns3_nic_priv *nic_priv = handle->priv;
struct hns3_enet_ring *ring;
u8 *stat;
int i, j;
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 9232caaf0bdc..409a89d80220 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -217,7 +217,7 @@ static void hns_mdio_cmd_write(struct hns_mdio_device *mdio_dev,
static int hns_mdio_write_c22(struct mii_bus *bus,
int phy_id, int regnum, u16 data)
{
- struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
+ struct hns_mdio_device *mdio_dev = bus->priv;
u16 reg = (u16)(regnum & 0xffff);
u16 cmd_reg_cfg;
int ret;
@@ -259,7 +259,7 @@ static int hns_mdio_write_c22(struct mii_bus *bus,
static int hns_mdio_write_c45(struct mii_bus *bus, int phy_id, int devad,
int regnum, u16 data)
{
- struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
+ struct hns_mdio_device *mdio_dev = bus->priv;
u16 reg = (u16)(regnum & 0xffff);
u16 cmd_reg_cfg;
int ret;
@@ -312,7 +312,7 @@ static int hns_mdio_write_c45(struct mii_bus *bus, int phy_id, int devad,
*/
static int hns_mdio_read_c22(struct mii_bus *bus, int phy_id, int regnum)
{
- struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
+ struct hns_mdio_device *mdio_dev = bus->priv;
u16 reg = (u16)(regnum & 0xffff);
u16 reg_val;
int ret;
@@ -363,7 +363,7 @@ static int hns_mdio_read_c22(struct mii_bus *bus, int phy_id, int regnum)
static int hns_mdio_read_c45(struct mii_bus *bus, int phy_id, int devad,
int regnum)
{
- struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
+ struct hns_mdio_device *mdio_dev = bus->priv;
u16 reg = (u16)(regnum & 0xffff);
u16 reg_val;
int ret;
@@ -424,7 +424,7 @@ static int hns_mdio_read_c45(struct mii_bus *bus, int phy_id, int devad,
*/
static int hns_mdio_reset(struct mii_bus *bus)
{
- struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
+ struct hns_mdio_device *mdio_dev = bus->priv;
const struct hns_mdio_sc_reg *sc_reg;
int ret;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 29ad1797adce..982ae70c51e8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -3585,11 +3585,6 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
if (ring->xsk_pool) {
ring->rx_buf_len =
xsk_pool_get_rx_frame_size(ring->xsk_pool);
- /* For AF_XDP ZC, we disallow packets to span on
- * multiple buffers, thus letting us skip that
- * handling in the fast-path.
- */
- chain_len = 1;
ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL,
NULL);
@@ -13822,6 +13817,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_XSK_ZEROCOPY |
NETDEV_XDP_ACT_RX_SG;
+ netdev->xdp_zc_max_segs = I40E_MAX_BUFFER_TXD;
} else {
/* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we
* are still limited by IFNAMSIZ, but we're adding 'v%d\0' to
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 8b8bf4880faa..0b3a27f118fb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2284,8 +2284,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
* If the buffer is an EOP buffer, this function exits returning false,
* otherwise return true indicating that this is in fact a non-EOP buffer.
*/
-static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc)
+bool i40e_is_non_eop(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc)
{
/* if we are the last buffer then there is nothing else to do */
#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 8c3d24012c54..900b0d9ede9f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -473,6 +473,8 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40e_chk_linearize(struct sk_buff *skb);
int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
+bool i40e_is_non_eop(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc);
/**
* i40e_get_head - Retrieve head from head writeback
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index be59ba3774e1..398fb4854cbe 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -4305,6 +4305,38 @@ err_out:
}
/**
+ * i40e_check_vf_init_timeout
+ * @vf: the virtual function
+ *
+ * Check that the VF's initialization was successfully done and if not
+ * wait up to 300ms for its finish.
+ *
+ * Returns true when VF is initialized, false on timeout
+ **/
+static bool i40e_check_vf_init_timeout(struct i40e_vf *vf)
+{
+ int i;
+
+ /* When the VF is resetting wait until it is done.
+ * It can take up to 200 milliseconds, but wait for
+ * up to 300 milliseconds to be safe.
+ */
+ for (i = 0; i < 15; i++) {
+ if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
+ return true;
+ msleep(20);
+ }
+
+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
+ dev_err(&vf->pf->pdev->dev,
+ "VF %d still in reset. Try again.\n", vf->vf_id);
+ return false;
+ }
+
+ return true;
+}
+
+/**
* i40e_ndo_set_vf_mac
* @netdev: network interface device structure
* @vf_id: VF identifier
@@ -4322,7 +4354,6 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
int ret = 0;
struct hlist_node *h;
int bkt;
- u8 i;
if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
@@ -4335,21 +4366,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
goto error_param;
vf = &pf->vf[vf_id];
-
- /* When the VF is resetting wait until it is done.
- * It can take up to 200 milliseconds,
- * but wait for up to 300 milliseconds to be safe.
- * Acquire the VSI pointer only after the VF has been
- * properly initialized.
- */
- for (i = 0; i < 15; i++) {
- if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
- break;
- msleep(20);
- }
- if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
- dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
- vf_id);
+ if (!i40e_check_vf_init_timeout(vf)) {
ret = -EAGAIN;
goto error_param;
}
@@ -4451,13 +4468,11 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
}
vf = &pf->vf[vf_id];
- vsi = pf->vsi[vf->lan_vsi_idx];
- if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
- dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
- vf_id);
+ if (!i40e_check_vf_init_timeout(vf)) {
ret = -EAGAIN;
goto error_pvid;
}
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (le16_to_cpu(vsi->info.pvid) == vlanprio)
/* duplicate request, so just return success */
@@ -4601,13 +4616,11 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
}
vf = &pf->vf[vf_id];
- vsi = pf->vsi[vf->lan_vsi_idx];
- if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
- dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
- vf_id);
+ if (!i40e_check_vf_init_timeout(vf)) {
ret = -EAGAIN;
goto error;
}
+ vsi = pf->vsi[vf->lan_vsi_idx];
ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
if (ret)
@@ -4774,9 +4787,7 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
}
vf = &(pf->vf[vf_id]);
- if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
- dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
- vf_id);
+ if (!i40e_check_vf_init_timeout(vf)) {
ret = -EAGAIN;
goto out;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 05ec1181471e..37f41c8a682f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -294,8 +294,14 @@ static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
{
unsigned int totalsize = xdp->data_end - xdp->data_meta;
unsigned int metasize = xdp->data - xdp->data_meta;
+ struct skb_shared_info *sinfo = NULL;
struct sk_buff *skb;
+ u32 nr_frags = 0;
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ nr_frags = sinfo->nr_frags;
+ }
net_prefetch(xdp->data_meta);
/* allocate a skb to store the frags */
@@ -312,6 +318,28 @@ static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
__skb_pull(skb, metasize);
}
+ if (likely(!xdp_buff_has_frags(xdp)))
+ goto out;
+
+ for (int i = 0; i < nr_frags; i++) {
+ struct skb_shared_info *skinfo = skb_shinfo(skb);
+ skb_frag_t *frag = &sinfo->frags[i];
+ struct page *page;
+ void *addr;
+
+ page = dev_alloc_page();
+ if (!page) {
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+ addr = page_to_virt(page);
+
+ memcpy(addr, skb_frag_page(frag), skb_frag_size(frag));
+
+ __skb_fill_page_desc_noacc(skinfo, skinfo->nr_frags++,
+ addr, 0, skb_frag_size(frag));
+ }
+
out:
xsk_buff_free(xdp);
return skb;
@@ -322,14 +350,13 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
union i40e_rx_desc *rx_desc,
unsigned int *rx_packets,
unsigned int *rx_bytes,
- unsigned int size,
unsigned int xdp_res,
bool *failure)
{
struct sk_buff *skb;
*rx_packets = 1;
- *rx_bytes = size;
+ *rx_bytes = xdp_get_buff_len(xdp_buff);
if (likely(xdp_res == I40E_XDP_REDIR) || xdp_res == I40E_XDP_TX)
return;
@@ -363,7 +390,6 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
return;
}
- *rx_bytes = skb->len;
i40e_process_skb_fields(rx_ring, rx_desc, skb);
napi_gro_receive(&rx_ring->q_vector->napi, skb);
return;
@@ -374,6 +400,31 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
WARN_ON_ONCE(1);
}
+static int
+i40e_add_xsk_frag(struct i40e_ring *rx_ring, struct xdp_buff *first,
+ struct xdp_buff *xdp, const unsigned int size)
+{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first);
+
+ if (!xdp_buff_has_frags(first)) {
+ sinfo->nr_frags = 0;
+ sinfo->xdp_frags_size = 0;
+ xdp_buff_set_frags_flag(first);
+ }
+
+ if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
+ xsk_buff_free(first);
+ return -ENOMEM;
+ }
+
+ __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
+ virt_to_page(xdp->data_hard_start), 0, size);
+ sinfo->xdp_frags_size += size;
+ xsk_buff_add_frag(xdp);
+
+ return 0;
+}
+
/**
* i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
* @rx_ring: Rx ring
@@ -384,13 +435,18 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ u16 next_to_process = rx_ring->next_to_process;
u16 next_to_clean = rx_ring->next_to_clean;
u16 count_mask = rx_ring->count - 1;
unsigned int xdp_res, xdp_xmit = 0;
+ struct xdp_buff *first = NULL;
struct bpf_prog *xdp_prog;
bool failure = false;
u16 cleaned_count;
+ if (next_to_process != next_to_clean)
+ first = *i40e_rx_bi(rx_ring, next_to_clean);
+
/* NB! xdp_prog will always be !NULL, due to the fact that
* this path is enabled by setting an XDP program.
*/
@@ -404,7 +460,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
unsigned int size;
u64 qword;
- rx_desc = I40E_RX_DESC(rx_ring, next_to_clean);
+ rx_desc = I40E_RX_DESC(rx_ring, next_to_process);
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
/* This memory barrier is needed to keep us from reading
@@ -417,9 +473,9 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
i40e_clean_programming_status(rx_ring,
rx_desc->raw.qword[0],
qword);
- bi = *i40e_rx_bi(rx_ring, next_to_clean);
+ bi = *i40e_rx_bi(rx_ring, next_to_process);
xsk_buff_free(bi);
- next_to_clean = (next_to_clean + 1) & count_mask;
+ next_to_process = (next_to_process + 1) & count_mask;
continue;
}
@@ -428,22 +484,35 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
if (!size)
break;
- bi = *i40e_rx_bi(rx_ring, next_to_clean);
+ bi = *i40e_rx_bi(rx_ring, next_to_process);
xsk_buff_set_size(bi, size);
xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool);
- xdp_res = i40e_run_xdp_zc(rx_ring, bi, xdp_prog);
- i40e_handle_xdp_result_zc(rx_ring, bi, rx_desc, &rx_packets,
- &rx_bytes, size, xdp_res, &failure);
+ if (!first)
+ first = bi;
+ else if (i40e_add_xsk_frag(rx_ring, first, bi, size))
+ break;
+
+ next_to_process = (next_to_process + 1) & count_mask;
+
+ if (i40e_is_non_eop(rx_ring, rx_desc))
+ continue;
+
+ xdp_res = i40e_run_xdp_zc(rx_ring, first, xdp_prog);
+ i40e_handle_xdp_result_zc(rx_ring, first, rx_desc, &rx_packets,
+ &rx_bytes, xdp_res, &failure);
+ first->flags = 0;
+ next_to_clean = next_to_process;
if (failure)
break;
total_rx_packets += rx_packets;
total_rx_bytes += rx_bytes;
xdp_xmit |= xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR);
- next_to_clean = (next_to_clean + 1) & count_mask;
+ first = NULL;
}
rx_ring->next_to_clean = next_to_clean;
+ rx_ring->next_to_process = next_to_process;
cleaned_count = (next_to_clean - rx_ring->next_to_use - 1) & count_mask;
if (cleaned_count >= I40E_RX_BUFFER_WRITE)
@@ -466,6 +535,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
static void i40e_xmit_pkt(struct i40e_ring *xdp_ring, struct xdp_desc *desc,
unsigned int *total_bytes)
{
+ u32 cmd = I40E_TX_DESC_CMD_ICRC | xsk_is_eop_desc(desc);
struct i40e_tx_desc *tx_desc;
dma_addr_t dma;
@@ -474,8 +544,7 @@ static void i40e_xmit_pkt(struct i40e_ring *xdp_ring, struct xdp_desc *desc,
tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use++);
tx_desc->buffer_addr = cpu_to_le64(dma);
- tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC | I40E_TX_DESC_CMD_EOP,
- 0, desc->len, 0);
+ tx_desc->cmd_type_offset_bsz = build_ctob(cmd, 0, desc->len, 0);
*total_bytes += desc->len;
}
@@ -489,14 +558,14 @@ static void i40e_xmit_pkt_batch(struct i40e_ring *xdp_ring, struct xdp_desc *des
u32 i;
loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
+ u32 cmd = I40E_TX_DESC_CMD_ICRC | xsk_is_eop_desc(&desc[i]);
+
dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc[i].addr);
xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc[i].len);
tx_desc = I40E_TX_DESC(xdp_ring, ntu++);
tx_desc->buffer_addr = cpu_to_le64(dma);
- tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC |
- I40E_TX_DESC_CMD_EOP,
- 0, desc[i].len, 0);
+ tx_desc->cmd_type_offset_bsz = build_ctob(cmd, 0, desc[i].len, 0);
*total_bytes += desc[i].len;
}
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 817977e3039d..960277d78e09 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -47,5 +47,5 @@ ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
-ice-$(CONFIG_ICE_SWITCHDEV) += ice_eswitch.o
+ice-$(CONFIG_ICE_SWITCHDEV) += ice_eswitch.o ice_eswitch_br.o
ice-$(CONFIG_GNSS) += ice_gnss.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 4ba3d99439a0..9109006336f0 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -370,6 +370,7 @@ struct ice_vsi {
u16 rx_buf_len;
struct ice_aqc_vsi_props info; /* VSI properties */
+ struct ice_vsi_vlan_info vlan_info; /* vlan config to be restored */
/* VSI stats */
struct rtnl_link_stats64 net_stats;
@@ -517,6 +518,7 @@ enum ice_misc_thread_tasks {
struct ice_switchdev_info {
struct ice_vsi *control_vsi;
struct ice_vsi *uplink_vsi;
+ struct ice_esw_br_offloads *br_offloads;
bool is_running;
};
@@ -626,6 +628,7 @@ struct ice_pf {
struct ice_lag *lag; /* Link Aggregation information */
struct ice_switchdev_info switchdev;
+ struct ice_esw_br_port *br_port;
#define ICE_INVALID_AGG_NODE_ID 0
#define ICE_PF_AGG_NODE_ID_START 1
@@ -853,7 +856,7 @@ static inline bool ice_is_adq_active(struct ice_pf *pf)
return false;
}
-bool netif_is_ice(struct net_device *dev);
+bool netif_is_ice(const struct net_device *dev);
int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
int ice_vsi_setup_rx_rings(struct ice_vsi *vsi);
int ice_vsi_open_ctrl(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index b678bdf96f3a..9ab9fb558b5e 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -408,7 +408,6 @@ static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring)
*/
static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
{
- int chain_len = ICE_MAX_CHAINED_RX_BUFS;
struct ice_vsi *vsi = ring->vsi;
u32 rxdid = ICE_RXDID_FLEX_NIC;
struct ice_rlan_ctx rlan_ctx;
@@ -472,17 +471,11 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
*/
rlan_ctx.showiv = 0;
- /* For AF_XDP ZC, we disallow packets to span on
- * multiple buffers, thus letting us skip that
- * handling in the fast-path.
- */
- if (ring->xsk_pool)
- chain_len = 1;
/* Max packet size for this queue - must not be set to a larger value
* than 5 x DBUF
*/
rlan_ctx.rxmax = min_t(u32, vsi->max_frame,
- chain_len * ring->rx_buf_len);
+ ICE_MAX_CHAINED_RX_BUFS * ring->rx_buf_len);
/* Rx queue threshold in units of 64 */
rlan_ctx.lrxqthresh = 1;
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index ad0a007b7398..9a53a5e5d73e 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -4,6 +4,7 @@
#include "ice.h"
#include "ice_lib.h"
#include "ice_eswitch.h"
+#include "ice_eswitch_br.h"
#include "ice_fltr.h"
#include "ice_repr.h"
#include "ice_devlink.h"
@@ -103,17 +104,28 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
rule_added = true;
}
+ vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
+ if (vlan_ops->dis_rx_filtering(uplink_vsi))
+ goto err_dis_rx;
+
if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
goto err_override_uplink;
if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override))
goto err_override_control;
+ if (ice_vsi_update_local_lb(uplink_vsi, true))
+ goto err_override_local_lb;
+
return 0;
+err_override_local_lb:
+ ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
err_override_control:
ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
err_override_uplink:
+ vlan_ops->ena_rx_filtering(uplink_vsi);
+err_dis_rx:
if (rule_added)
ice_clear_dflt_vsi(uplink_vsi);
err_def_rx:
@@ -306,6 +318,9 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi)
repr->src_vsi = vsi;
repr->dst->u.port_info.port_id = vsi->vsi_num;
+ if (repr->br_port)
+ repr->br_port->vsi = vsi;
+
ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
if (ret) {
ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, ICE_FWD_TO_VSI);
@@ -331,6 +346,9 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
np = netdev_priv(netdev);
vsi = np->vsi;
+ if (!vsi || !ice_is_switchdev_running(vsi->back))
+ return NETDEV_TX_BUSY;
+
if (ice_is_reset_in_progress(vsi->back->state) ||
test_bit(ICE_VF_DIS, vsi->back->state))
return NETDEV_TX_BUSY;
@@ -378,9 +396,14 @@ static void ice_eswitch_release_env(struct ice_pf *pf)
{
struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+ struct ice_vsi_vlan_ops *vlan_ops;
+
+ vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
+ ice_vsi_update_local_lb(uplink_vsi, false);
ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
+ vlan_ops->ena_rx_filtering(uplink_vsi);
ice_clear_dflt_vsi(uplink_vsi);
ice_fltr_add_mac_and_broadcast(uplink_vsi,
uplink_vsi->port_info->mac.perm_addr,
@@ -455,16 +478,24 @@ static void ice_eswitch_napi_disable(struct ice_pf *pf)
*/
static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
{
- struct ice_vsi *ctrl_vsi;
+ struct ice_vsi *ctrl_vsi, *uplink_vsi;
+
+ uplink_vsi = ice_get_main_vsi(pf);
+ if (!uplink_vsi)
+ return -ENODEV;
+
+ if (netif_is_any_bridge_port(uplink_vsi->netdev)) {
+ dev_err(ice_pf_to_dev(pf),
+ "Uplink port cannot be a bridge port\n");
+ return -EINVAL;
+ }
pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info);
if (!pf->switchdev.control_vsi)
return -ENODEV;
ctrl_vsi = pf->switchdev.control_vsi;
- pf->switchdev.uplink_vsi = ice_get_main_vsi(pf);
- if (!pf->switchdev.uplink_vsi)
- goto err_vsi;
+ pf->switchdev.uplink_vsi = uplink_vsi;
if (ice_eswitch_setup_env(pf))
goto err_vsi;
@@ -480,10 +511,15 @@ static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
if (ice_vsi_open(ctrl_vsi))
goto err_setup_reprs;
+ if (ice_eswitch_br_offloads_init(pf))
+ goto err_br_offloads;
+
ice_eswitch_napi_enable(pf);
return 0;
+err_br_offloads:
+ ice_vsi_close(ctrl_vsi);
err_setup_reprs:
ice_repr_rem_from_all_vfs(pf);
err_repr_add:
@@ -502,8 +538,8 @@ static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
ice_eswitch_napi_disable(pf);
+ ice_eswitch_br_offloads_deinit(pf);
ice_eswitch_release_env(pf);
- ice_rem_adv_rule_for_vsi(&pf->hw, ctrl_vsi->idx);
ice_eswitch_release_reprs(pf, ctrl_vsi);
ice_vsi_release(ctrl_vsi);
ice_repr_rem_from_all_vfs(pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
new file mode 100644
index 000000000000..cc7357ed6e5f
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
@@ -0,0 +1,1309 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_eswitch_br.h"
+#include "ice_repr.h"
+#include "ice_switch.h"
+#include "ice_vlan.h"
+#include "ice_vf_vsi_vlan_ops.h"
+#include "ice_trace.h"
+
+#define ICE_ESW_BRIDGE_UPDATE_INTERVAL msecs_to_jiffies(1000)
+
+static const struct rhashtable_params ice_fdb_ht_params = {
+ .key_offset = offsetof(struct ice_esw_br_fdb_entry, data),
+ .key_len = sizeof(struct ice_esw_br_fdb_data),
+ .head_offset = offsetof(struct ice_esw_br_fdb_entry, ht_node),
+ .automatic_shrinking = true,
+};
+
+static bool ice_eswitch_br_is_dev_valid(const struct net_device *dev)
+{
+ /* Accept only PF netdev and PRs */
+ return ice_is_port_repr_netdev(dev) || netif_is_ice(dev);
+}
+
+static struct ice_esw_br_port *
+ice_eswitch_br_netdev_to_port(struct net_device *dev)
+{
+ if (ice_is_port_repr_netdev(dev)) {
+ struct ice_repr *repr = ice_netdev_to_repr(dev);
+
+ return repr->br_port;
+ } else if (netif_is_ice(dev)) {
+ struct ice_pf *pf = ice_netdev_to_pf(dev);
+
+ return pf->br_port;
+ }
+
+ return NULL;
+}
+
+static void
+ice_eswitch_br_ingress_rule_setup(struct ice_adv_rule_info *rule_info,
+ u8 pf_id, u16 vf_vsi_idx)
+{
+ rule_info->sw_act.vsi_handle = vf_vsi_idx;
+ rule_info->sw_act.flag |= ICE_FLTR_RX;
+ rule_info->sw_act.src = pf_id;
+ rule_info->priority = 5;
+}
+
+static void
+ice_eswitch_br_egress_rule_setup(struct ice_adv_rule_info *rule_info,
+ u16 pf_vsi_idx)
+{
+ rule_info->sw_act.vsi_handle = pf_vsi_idx;
+ rule_info->sw_act.flag |= ICE_FLTR_TX;
+ rule_info->flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
+ rule_info->flags_info.act_valid = true;
+ rule_info->priority = 5;
+}
+
+static int
+ice_eswitch_br_rule_delete(struct ice_hw *hw, struct ice_rule_query_data *rule)
+{
+ int err;
+
+ if (!rule)
+ return -EINVAL;
+
+ err = ice_rem_adv_rule_by_id(hw, rule);
+ kfree(rule);
+
+ return err;
+}
+
+static u16
+ice_eswitch_br_get_lkups_cnt(u16 vid)
+{
+ return ice_eswitch_br_is_vid_valid(vid) ? 2 : 1;
+}
+
+static void
+ice_eswitch_br_add_vlan_lkup(struct ice_adv_lkup_elem *list, u16 vid)
+{
+ if (ice_eswitch_br_is_vid_valid(vid)) {
+ list[1].type = ICE_VLAN_OFOS;
+ list[1].h_u.vlan_hdr.vlan = cpu_to_be16(vid & VLAN_VID_MASK);
+ list[1].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
+ }
+}
+
+static struct ice_rule_query_data *
+ice_eswitch_br_fwd_rule_create(struct ice_hw *hw, int vsi_idx, int port_type,
+ const unsigned char *mac, u16 vid)
+{
+ struct ice_adv_rule_info rule_info = { 0 };
+ struct ice_rule_query_data *rule;
+ struct ice_adv_lkup_elem *list;
+ u16 lkups_cnt;
+ int err;
+
+ lkups_cnt = ice_eswitch_br_get_lkups_cnt(vid);
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return ERR_PTR(-ENOMEM);
+
+ list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
+ if (!list) {
+ err = -ENOMEM;
+ goto err_list_alloc;
+ }
+
+ switch (port_type) {
+ case ICE_ESWITCH_BR_UPLINK_PORT:
+ ice_eswitch_br_egress_rule_setup(&rule_info, vsi_idx);
+ break;
+ case ICE_ESWITCH_BR_VF_REPR_PORT:
+ ice_eswitch_br_ingress_rule_setup(&rule_info, hw->pf_id,
+ vsi_idx);
+ break;
+ default:
+ err = -EINVAL;
+ goto err_add_rule;
+ }
+
+ list[0].type = ICE_MAC_OFOS;
+ ether_addr_copy(list[0].h_u.eth_hdr.dst_addr, mac);
+ eth_broadcast_addr(list[0].m_u.eth_hdr.dst_addr);
+
+ ice_eswitch_br_add_vlan_lkup(list, vid);
+
+ rule_info.need_pass_l2 = true;
+
+ rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
+
+ err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, rule);
+ if (err)
+ goto err_add_rule;
+
+ kfree(list);
+
+ return rule;
+
+err_add_rule:
+ kfree(list);
+err_list_alloc:
+ kfree(rule);
+
+ return ERR_PTR(err);
+}
+
+static struct ice_rule_query_data *
+ice_eswitch_br_guard_rule_create(struct ice_hw *hw, u16 vsi_idx,
+ const unsigned char *mac, u16 vid)
+{
+ struct ice_adv_rule_info rule_info = { 0 };
+ struct ice_rule_query_data *rule;
+ struct ice_adv_lkup_elem *list;
+ int err = -ENOMEM;
+ u16 lkups_cnt;
+
+ lkups_cnt = ice_eswitch_br_get_lkups_cnt(vid);
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ goto err_exit;
+
+ list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
+ if (!list)
+ goto err_list_alloc;
+
+ list[0].type = ICE_MAC_OFOS;
+ ether_addr_copy(list[0].h_u.eth_hdr.src_addr, mac);
+ eth_broadcast_addr(list[0].m_u.eth_hdr.src_addr);
+
+ ice_eswitch_br_add_vlan_lkup(list, vid);
+
+ rule_info.allow_pass_l2 = true;
+ rule_info.sw_act.vsi_handle = vsi_idx;
+ rule_info.sw_act.fltr_act = ICE_NOP;
+ rule_info.priority = 5;
+
+ err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, rule);
+ if (err)
+ goto err_add_rule;
+
+ kfree(list);
+
+ return rule;
+
+err_add_rule:
+ kfree(list);
+err_list_alloc:
+ kfree(rule);
+err_exit:
+ return ERR_PTR(err);
+}
+
+static struct ice_esw_br_flow *
+ice_eswitch_br_flow_create(struct device *dev, struct ice_hw *hw, int vsi_idx,
+ int port_type, const unsigned char *mac, u16 vid)
+{
+ struct ice_rule_query_data *fwd_rule, *guard_rule;
+ struct ice_esw_br_flow *flow;
+ int err;
+
+ flow = kzalloc(sizeof(*flow), GFP_KERNEL);
+ if (!flow)
+ return ERR_PTR(-ENOMEM);
+
+ fwd_rule = ice_eswitch_br_fwd_rule_create(hw, vsi_idx, port_type, mac,
+ vid);
+ err = PTR_ERR_OR_ZERO(fwd_rule);
+ if (err) {
+ dev_err(dev, "Failed to create eswitch bridge %sgress forward rule, err: %d\n",
+ port_type == ICE_ESWITCH_BR_UPLINK_PORT ? "e" : "in",
+ err);
+ goto err_fwd_rule;
+ }
+
+ guard_rule = ice_eswitch_br_guard_rule_create(hw, vsi_idx, mac, vid);
+ err = PTR_ERR_OR_ZERO(guard_rule);
+ if (err) {
+ dev_err(dev, "Failed to create eswitch bridge %sgress guard rule, err: %d\n",
+ port_type == ICE_ESWITCH_BR_UPLINK_PORT ? "e" : "in",
+ err);
+ goto err_guard_rule;
+ }
+
+ flow->fwd_rule = fwd_rule;
+ flow->guard_rule = guard_rule;
+
+ return flow;
+
+err_guard_rule:
+ ice_eswitch_br_rule_delete(hw, fwd_rule);
+err_fwd_rule:
+ kfree(flow);
+
+ return ERR_PTR(err);
+}
+
+static struct ice_esw_br_fdb_entry *
+ice_eswitch_br_fdb_find(struct ice_esw_br *bridge, const unsigned char *mac,
+ u16 vid)
+{
+ struct ice_esw_br_fdb_data data = {
+ .vid = vid,
+ };
+
+ ether_addr_copy(data.addr, mac);
+ return rhashtable_lookup_fast(&bridge->fdb_ht, &data,
+ ice_fdb_ht_params);
+}
+
+static void
+ice_eswitch_br_flow_delete(struct ice_pf *pf, struct ice_esw_br_flow *flow)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+
+ err = ice_eswitch_br_rule_delete(&pf->hw, flow->fwd_rule);
+ if (err)
+ dev_err(dev, "Failed to delete FDB forward rule, err: %d\n",
+ err);
+
+ err = ice_eswitch_br_rule_delete(&pf->hw, flow->guard_rule);
+ if (err)
+ dev_err(dev, "Failed to delete FDB guard rule, err: %d\n",
+ err);
+
+ kfree(flow);
+}
+
+static struct ice_esw_br_vlan *
+ice_esw_br_port_vlan_lookup(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid)
+{
+ struct ice_pf *pf = bridge->br_offloads->pf;
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_esw_br_port *port;
+ struct ice_esw_br_vlan *vlan;
+
+ port = xa_load(&bridge->ports, vsi_idx);
+ if (!port) {
+ dev_info(dev, "Bridge port lookup failed (vsi=%u)\n", vsi_idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ vlan = xa_load(&port->vlans, vid);
+ if (!vlan) {
+ dev_info(dev, "Bridge port vlan metadata lookup failed (vsi=%u)\n",
+ vsi_idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return vlan;
+}
+
+static void
+ice_eswitch_br_fdb_entry_delete(struct ice_esw_br *bridge,
+ struct ice_esw_br_fdb_entry *fdb_entry)
+{
+ struct ice_pf *pf = bridge->br_offloads->pf;
+
+ rhashtable_remove_fast(&bridge->fdb_ht, &fdb_entry->ht_node,
+ ice_fdb_ht_params);
+ list_del(&fdb_entry->list);
+
+ ice_eswitch_br_flow_delete(pf, fdb_entry->flow);
+
+ kfree(fdb_entry);
+}
+
+static void
+ice_eswitch_br_fdb_offload_notify(struct net_device *dev,
+ const unsigned char *mac, u16 vid,
+ unsigned long val)
+{
+ struct switchdev_notifier_fdb_info fdb_info = {
+ .addr = mac,
+ .vid = vid,
+ .offloaded = true,
+ };
+
+ call_switchdev_notifiers(val, dev, &fdb_info.info, NULL);
+}
+
+static void
+ice_eswitch_br_fdb_entry_notify_and_cleanup(struct ice_esw_br *bridge,
+ struct ice_esw_br_fdb_entry *entry)
+{
+ if (!(entry->flags & ICE_ESWITCH_BR_FDB_ADDED_BY_USER))
+ ice_eswitch_br_fdb_offload_notify(entry->dev, entry->data.addr,
+ entry->data.vid,
+ SWITCHDEV_FDB_DEL_TO_BRIDGE);
+ ice_eswitch_br_fdb_entry_delete(bridge, entry);
+}
+
+static void
+ice_eswitch_br_fdb_entry_find_and_delete(struct ice_esw_br *bridge,
+ const unsigned char *mac, u16 vid)
+{
+ struct ice_pf *pf = bridge->br_offloads->pf;
+ struct ice_esw_br_fdb_entry *fdb_entry;
+ struct device *dev = ice_pf_to_dev(pf);
+
+ fdb_entry = ice_eswitch_br_fdb_find(bridge, mac, vid);
+ if (!fdb_entry) {
+ dev_err(dev, "FDB entry with mac: %pM and vid: %u not found\n",
+ mac, vid);
+ return;
+ }
+
+ trace_ice_eswitch_br_fdb_entry_find_and_delete(fdb_entry);
+ ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, fdb_entry);
+}
+
+static void
+ice_eswitch_br_fdb_entry_create(struct net_device *netdev,
+ struct ice_esw_br_port *br_port,
+ bool added_by_user,
+ const unsigned char *mac, u16 vid)
+{
+ struct ice_esw_br *bridge = br_port->bridge;
+ struct ice_pf *pf = bridge->br_offloads->pf;
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_esw_br_fdb_entry *fdb_entry;
+ struct ice_esw_br_flow *flow;
+ struct ice_esw_br_vlan *vlan;
+ struct ice_hw *hw = &pf->hw;
+ unsigned long event;
+ int err;
+
+ /* untagged filtering is not yet supported */
+ if (!(bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING) && vid)
+ return;
+
+ if ((bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING)) {
+ vlan = ice_esw_br_port_vlan_lookup(bridge, br_port->vsi_idx,
+ vid);
+ if (IS_ERR(vlan)) {
+ dev_err(dev, "Failed to find vlan lookup, err: %ld\n",
+ PTR_ERR(vlan));
+ return;
+ }
+ }
+
+ fdb_entry = ice_eswitch_br_fdb_find(bridge, mac, vid);
+ if (fdb_entry)
+ ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, fdb_entry);
+
+ fdb_entry = kzalloc(sizeof(*fdb_entry), GFP_KERNEL);
+ if (!fdb_entry) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ flow = ice_eswitch_br_flow_create(dev, hw, br_port->vsi_idx,
+ br_port->type, mac, vid);
+ if (IS_ERR(flow)) {
+ err = PTR_ERR(flow);
+ goto err_add_flow;
+ }
+
+ ether_addr_copy(fdb_entry->data.addr, mac);
+ fdb_entry->data.vid = vid;
+ fdb_entry->br_port = br_port;
+ fdb_entry->flow = flow;
+ fdb_entry->dev = netdev;
+ fdb_entry->last_use = jiffies;
+ event = SWITCHDEV_FDB_ADD_TO_BRIDGE;
+
+ if (added_by_user) {
+ fdb_entry->flags |= ICE_ESWITCH_BR_FDB_ADDED_BY_USER;
+ event = SWITCHDEV_FDB_OFFLOADED;
+ }
+
+ err = rhashtable_insert_fast(&bridge->fdb_ht, &fdb_entry->ht_node,
+ ice_fdb_ht_params);
+ if (err)
+ goto err_fdb_insert;
+
+ list_add(&fdb_entry->list, &bridge->fdb_list);
+ trace_ice_eswitch_br_fdb_entry_create(fdb_entry);
+
+ ice_eswitch_br_fdb_offload_notify(netdev, mac, vid, event);
+
+ return;
+
+err_fdb_insert:
+ ice_eswitch_br_flow_delete(pf, flow);
+err_add_flow:
+ kfree(fdb_entry);
+err_exit:
+ dev_err(dev, "Failed to create fdb entry, err: %d\n", err);
+}
+
+static void
+ice_eswitch_br_fdb_work_dealloc(struct ice_esw_br_fdb_work *fdb_work)
+{
+ kfree(fdb_work->fdb_info.addr);
+ kfree(fdb_work);
+}
+
+static void
+ice_eswitch_br_fdb_event_work(struct work_struct *work)
+{
+ struct ice_esw_br_fdb_work *fdb_work = ice_work_to_fdb_work(work);
+ bool added_by_user = fdb_work->fdb_info.added_by_user;
+ const unsigned char *mac = fdb_work->fdb_info.addr;
+ u16 vid = fdb_work->fdb_info.vid;
+ struct ice_esw_br_port *br_port;
+
+ rtnl_lock();
+
+ br_port = ice_eswitch_br_netdev_to_port(fdb_work->dev);
+ if (!br_port)
+ goto err_exit;
+
+ switch (fdb_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ ice_eswitch_br_fdb_entry_create(fdb_work->dev, br_port,
+ added_by_user, mac, vid);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ ice_eswitch_br_fdb_entry_find_and_delete(br_port->bridge,
+ mac, vid);
+ break;
+ default:
+ goto err_exit;
+ }
+
+err_exit:
+ rtnl_unlock();
+ dev_put(fdb_work->dev);
+ ice_eswitch_br_fdb_work_dealloc(fdb_work);
+}
+
+static struct ice_esw_br_fdb_work *
+ice_eswitch_br_fdb_work_alloc(struct switchdev_notifier_fdb_info *fdb_info,
+ struct net_device *dev,
+ unsigned long event)
+{
+ struct ice_esw_br_fdb_work *work;
+ unsigned char *mac;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_WORK(&work->work, ice_eswitch_br_fdb_event_work);
+ memcpy(&work->fdb_info, fdb_info, sizeof(work->fdb_info));
+
+ mac = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!mac) {
+ kfree(work);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ether_addr_copy(mac, fdb_info->addr);
+ work->fdb_info.addr = mac;
+ work->event = event;
+ work->dev = dev;
+
+ return work;
+}
+
+static int
+ice_eswitch_br_switchdev_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct switchdev_notifier_info *info = ptr;
+ struct ice_esw_br_offloads *br_offloads;
+ struct ice_esw_br_fdb_work *work;
+ struct netlink_ext_ack *extack;
+ struct net_device *upper;
+
+ br_offloads = ice_nb_to_br_offloads(nb, switchdev_nb);
+ extack = switchdev_notifier_info_to_extack(ptr);
+
+ upper = netdev_master_upper_dev_get_rcu(dev);
+ if (!upper)
+ return NOTIFY_DONE;
+
+ if (!netif_is_bridge_master(upper))
+ return NOTIFY_DONE;
+
+ if (!ice_eswitch_br_is_dev_valid(dev))
+ return NOTIFY_DONE;
+
+ if (!ice_eswitch_br_netdev_to_port(dev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ fdb_info = container_of(info, typeof(*fdb_info), info);
+
+ work = ice_eswitch_br_fdb_work_alloc(fdb_info, dev, event);
+ if (IS_ERR(work)) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to init switchdev fdb work");
+ return notifier_from_errno(PTR_ERR(work));
+ }
+ dev_hold(dev);
+
+ queue_work(br_offloads->wq, &work->work);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static void ice_eswitch_br_fdb_flush(struct ice_esw_br *bridge)
+{
+ struct ice_esw_br_fdb_entry *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list)
+ ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, entry);
+}
+
+static void
+ice_eswitch_br_vlan_filtering_set(struct ice_esw_br *bridge, bool enable)
+{
+ if (enable == !!(bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING))
+ return;
+
+ ice_eswitch_br_fdb_flush(bridge);
+ if (enable)
+ bridge->flags |= ICE_ESWITCH_BR_VLAN_FILTERING;
+ else
+ bridge->flags &= ~ICE_ESWITCH_BR_VLAN_FILTERING;
+}
+
+static void
+ice_eswitch_br_clear_pvid(struct ice_esw_br_port *port)
+{
+ struct ice_vlan port_vlan = ICE_VLAN(ETH_P_8021Q, port->pvid, 0);
+ struct ice_vsi_vlan_ops *vlan_ops;
+
+ vlan_ops = ice_get_compat_vsi_vlan_ops(port->vsi);
+
+ vlan_ops->del_vlan(port->vsi, &port_vlan);
+ vlan_ops->clear_port_vlan(port->vsi);
+
+ ice_vf_vsi_disable_port_vlan(port->vsi);
+
+ port->pvid = 0;
+}
+
+static void
+ice_eswitch_br_vlan_cleanup(struct ice_esw_br_port *port,
+ struct ice_esw_br_vlan *vlan)
+{
+ struct ice_esw_br_fdb_entry *fdb_entry, *tmp;
+ struct ice_esw_br *bridge = port->bridge;
+
+ trace_ice_eswitch_br_vlan_cleanup(vlan);
+
+ list_for_each_entry_safe(fdb_entry, tmp, &bridge->fdb_list, list) {
+ if (vlan->vid == fdb_entry->data.vid)
+ ice_eswitch_br_fdb_entry_delete(bridge, fdb_entry);
+ }
+
+ xa_erase(&port->vlans, vlan->vid);
+ if (port->pvid == vlan->vid)
+ ice_eswitch_br_clear_pvid(port);
+ kfree(vlan);
+}
+
+static void ice_eswitch_br_port_vlans_flush(struct ice_esw_br_port *port)
+{
+ struct ice_esw_br_vlan *vlan;
+ unsigned long index;
+
+ xa_for_each(&port->vlans, index, vlan)
+ ice_eswitch_br_vlan_cleanup(port, vlan);
+}
+
+static int
+ice_eswitch_br_set_pvid(struct ice_esw_br_port *port,
+ struct ice_esw_br_vlan *vlan)
+{
+ struct ice_vlan port_vlan = ICE_VLAN(ETH_P_8021Q, vlan->vid, 0);
+ struct device *dev = ice_pf_to_dev(port->vsi->back);
+ struct ice_vsi_vlan_ops *vlan_ops;
+ int err;
+
+ if (port->pvid == vlan->vid || vlan->vid == 1)
+ return 0;
+
+ /* Setting port vlan on uplink isn't supported by hw */
+ if (port->type == ICE_ESWITCH_BR_UPLINK_PORT)
+ return -EOPNOTSUPP;
+
+ if (port->pvid) {
+ dev_info(dev,
+ "Port VLAN (vsi=%u, vid=%u) already exists on the port, remove it before adding new one\n",
+ port->vsi_idx, port->pvid);
+ return -EEXIST;
+ }
+
+ ice_vf_vsi_enable_port_vlan(port->vsi);
+
+ vlan_ops = ice_get_compat_vsi_vlan_ops(port->vsi);
+ err = vlan_ops->set_port_vlan(port->vsi, &port_vlan);
+ if (err)
+ return err;
+
+ err = vlan_ops->add_vlan(port->vsi, &port_vlan);
+ if (err)
+ return err;
+
+ ice_eswitch_br_port_vlans_flush(port);
+ port->pvid = vlan->vid;
+
+ return 0;
+}
+
+static struct ice_esw_br_vlan *
+ice_eswitch_br_vlan_create(u16 vid, u16 flags, struct ice_esw_br_port *port)
+{
+ struct device *dev = ice_pf_to_dev(port->vsi->back);
+ struct ice_esw_br_vlan *vlan;
+ int err;
+
+ vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+ if (!vlan)
+ return ERR_PTR(-ENOMEM);
+
+ vlan->vid = vid;
+ vlan->flags = flags;
+ if ((flags & BRIDGE_VLAN_INFO_PVID) &&
+ (flags & BRIDGE_VLAN_INFO_UNTAGGED)) {
+ err = ice_eswitch_br_set_pvid(port, vlan);
+ if (err)
+ goto err_set_pvid;
+ } else if ((flags & BRIDGE_VLAN_INFO_PVID) ||
+ (flags & BRIDGE_VLAN_INFO_UNTAGGED)) {
+ dev_info(dev, "VLAN push and pop are supported only simultaneously\n");
+ err = -EOPNOTSUPP;
+ goto err_set_pvid;
+ }
+
+ err = xa_insert(&port->vlans, vlan->vid, vlan, GFP_KERNEL);
+ if (err)
+ goto err_insert;
+
+ trace_ice_eswitch_br_vlan_create(vlan);
+
+ return vlan;
+
+err_insert:
+ if (port->pvid)
+ ice_eswitch_br_clear_pvid(port);
+err_set_pvid:
+ kfree(vlan);
+ return ERR_PTR(err);
+}
+
+static int
+ice_eswitch_br_port_vlan_add(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid,
+ u16 flags, struct netlink_ext_ack *extack)
+{
+ struct ice_esw_br_port *port;
+ struct ice_esw_br_vlan *vlan;
+
+ port = xa_load(&bridge->ports, vsi_idx);
+ if (!port)
+ return -EINVAL;
+
+ if (port->pvid) {
+ dev_info(ice_pf_to_dev(port->vsi->back),
+ "Port VLAN (vsi=%u, vid=%d) exists on the port, remove it to add trunk VLANs\n",
+ port->vsi_idx, port->pvid);
+ return -EEXIST;
+ }
+
+ vlan = xa_load(&port->vlans, vid);
+ if (vlan) {
+ if (vlan->flags == flags)
+ return 0;
+
+ ice_eswitch_br_vlan_cleanup(port, vlan);
+ }
+
+ vlan = ice_eswitch_br_vlan_create(vid, flags, port);
+ if (IS_ERR(vlan)) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Failed to create VLAN entry, vid: %u, vsi: %u",
+ vid, vsi_idx);
+ return PTR_ERR(vlan);
+ }
+
+ return 0;
+}
+
+static void
+ice_eswitch_br_port_vlan_del(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid)
+{
+ struct ice_esw_br_port *port;
+ struct ice_esw_br_vlan *vlan;
+
+ port = xa_load(&bridge->ports, vsi_idx);
+ if (!port)
+ return;
+
+ vlan = xa_load(&port->vlans, vid);
+ if (!vlan)
+ return;
+
+ ice_eswitch_br_vlan_cleanup(port, vlan);
+}
+
+static int
+ice_eswitch_br_port_obj_add(struct net_device *netdev, const void *ctx,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev);
+ struct switchdev_obj_port_vlan *vlan;
+ int err;
+
+ if (!br_port)
+ return -EINVAL;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ err = ice_eswitch_br_port_vlan_add(br_port->bridge,
+ br_port->vsi_idx, vlan->vid,
+ vlan->flags, extack);
+ return err;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+ice_eswitch_br_port_obj_del(struct net_device *netdev, const void *ctx,
+ const struct switchdev_obj *obj)
+{
+ struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev);
+ struct switchdev_obj_port_vlan *vlan;
+
+ if (!br_port)
+ return -EINVAL;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ ice_eswitch_br_port_vlan_del(br_port->bridge, br_port->vsi_idx,
+ vlan->vid);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+ice_eswitch_br_port_obj_attr_set(struct net_device *netdev, const void *ctx,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev);
+
+ if (!br_port)
+ return -EINVAL;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+ ice_eswitch_br_vlan_filtering_set(br_port->bridge,
+ attr->u.vlan_filtering);
+ return 0;
+ case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+ br_port->bridge->ageing_time =
+ clock_t_to_jiffies(attr->u.ageing_time);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+ice_eswitch_br_event_blocking(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = switchdev_handle_port_obj_add(dev, ptr,
+ ice_eswitch_br_is_dev_valid,
+ ice_eswitch_br_port_obj_add);
+ break;
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = switchdev_handle_port_obj_del(dev, ptr,
+ ice_eswitch_br_is_dev_valid,
+ ice_eswitch_br_port_obj_del);
+ break;
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ ice_eswitch_br_is_dev_valid,
+ ice_eswitch_br_port_obj_attr_set);
+ break;
+ default:
+ err = 0;
+ }
+
+ return notifier_from_errno(err);
+}
+
+static void
+ice_eswitch_br_port_deinit(struct ice_esw_br *bridge,
+ struct ice_esw_br_port *br_port)
+{
+ struct ice_esw_br_fdb_entry *fdb_entry, *tmp;
+ struct ice_vsi *vsi = br_port->vsi;
+
+ list_for_each_entry_safe(fdb_entry, tmp, &bridge->fdb_list, list) {
+ if (br_port == fdb_entry->br_port)
+ ice_eswitch_br_fdb_entry_delete(bridge, fdb_entry);
+ }
+
+ if (br_port->type == ICE_ESWITCH_BR_UPLINK_PORT && vsi->back)
+ vsi->back->br_port = NULL;
+ else if (vsi->vf && vsi->vf->repr)
+ vsi->vf->repr->br_port = NULL;
+
+ xa_erase(&bridge->ports, br_port->vsi_idx);
+ ice_eswitch_br_port_vlans_flush(br_port);
+ kfree(br_port);
+}
+
+static struct ice_esw_br_port *
+ice_eswitch_br_port_init(struct ice_esw_br *bridge)
+{
+ struct ice_esw_br_port *br_port;
+
+ br_port = kzalloc(sizeof(*br_port), GFP_KERNEL);
+ if (!br_port)
+ return ERR_PTR(-ENOMEM);
+
+ xa_init(&br_port->vlans);
+
+ br_port->bridge = bridge;
+
+ return br_port;
+}
+
+static int
+ice_eswitch_br_vf_repr_port_init(struct ice_esw_br *bridge,
+ struct ice_repr *repr)
+{
+ struct ice_esw_br_port *br_port;
+ int err;
+
+ br_port = ice_eswitch_br_port_init(bridge);
+ if (IS_ERR(br_port))
+ return PTR_ERR(br_port);
+
+ br_port->vsi = repr->src_vsi;
+ br_port->vsi_idx = br_port->vsi->idx;
+ br_port->type = ICE_ESWITCH_BR_VF_REPR_PORT;
+ repr->br_port = br_port;
+
+ err = xa_insert(&bridge->ports, br_port->vsi_idx, br_port, GFP_KERNEL);
+ if (err) {
+ ice_eswitch_br_port_deinit(bridge, br_port);
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+ice_eswitch_br_uplink_port_init(struct ice_esw_br *bridge, struct ice_pf *pf)
+{
+ struct ice_vsi *vsi = pf->switchdev.uplink_vsi;
+ struct ice_esw_br_port *br_port;
+ int err;
+
+ br_port = ice_eswitch_br_port_init(bridge);
+ if (IS_ERR(br_port))
+ return PTR_ERR(br_port);
+
+ br_port->vsi = vsi;
+ br_port->vsi_idx = br_port->vsi->idx;
+ br_port->type = ICE_ESWITCH_BR_UPLINK_PORT;
+ pf->br_port = br_port;
+
+ err = xa_insert(&bridge->ports, br_port->vsi_idx, br_port, GFP_KERNEL);
+ if (err) {
+ ice_eswitch_br_port_deinit(bridge, br_port);
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+ice_eswitch_br_ports_flush(struct ice_esw_br *bridge)
+{
+ struct ice_esw_br_port *port;
+ unsigned long i;
+
+ xa_for_each(&bridge->ports, i, port)
+ ice_eswitch_br_port_deinit(bridge, port);
+}
+
+static void
+ice_eswitch_br_deinit(struct ice_esw_br_offloads *br_offloads,
+ struct ice_esw_br *bridge)
+{
+ if (!bridge)
+ return;
+
+ /* Cleanup all the ports that were added asynchronously
+ * through NETDEV_CHANGEUPPER event.
+ */
+ ice_eswitch_br_ports_flush(bridge);
+ WARN_ON(!xa_empty(&bridge->ports));
+ xa_destroy(&bridge->ports);
+ rhashtable_destroy(&bridge->fdb_ht);
+
+ br_offloads->bridge = NULL;
+ kfree(bridge);
+}
+
+static struct ice_esw_br *
+ice_eswitch_br_init(struct ice_esw_br_offloads *br_offloads, int ifindex)
+{
+ struct ice_esw_br *bridge;
+ int err;
+
+ bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+ if (!bridge)
+ return ERR_PTR(-ENOMEM);
+
+ err = rhashtable_init(&bridge->fdb_ht, &ice_fdb_ht_params);
+ if (err) {
+ kfree(bridge);
+ return ERR_PTR(err);
+ }
+
+ INIT_LIST_HEAD(&bridge->fdb_list);
+ bridge->br_offloads = br_offloads;
+ bridge->ifindex = ifindex;
+ bridge->ageing_time = clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME);
+ xa_init(&bridge->ports);
+ br_offloads->bridge = bridge;
+
+ return bridge;
+}
+
+static struct ice_esw_br *
+ice_eswitch_br_get(struct ice_esw_br_offloads *br_offloads, int ifindex,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_esw_br *bridge = br_offloads->bridge;
+
+ if (bridge) {
+ if (bridge->ifindex != ifindex) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one bridge is supported per eswitch");
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+ return bridge;
+ }
+
+ /* Create the bridge if it doesn't exist yet */
+ bridge = ice_eswitch_br_init(br_offloads, ifindex);
+ if (IS_ERR(bridge))
+ NL_SET_ERR_MSG_MOD(extack, "Failed to init the bridge");
+
+ return bridge;
+}
+
+static void
+ice_eswitch_br_verify_deinit(struct ice_esw_br_offloads *br_offloads,
+ struct ice_esw_br *bridge)
+{
+ /* Remove the bridge if it exists and there are no ports left */
+ if (!bridge || !xa_empty(&bridge->ports))
+ return;
+
+ ice_eswitch_br_deinit(br_offloads, bridge);
+}
+
+static int
+ice_eswitch_br_port_unlink(struct ice_esw_br_offloads *br_offloads,
+ struct net_device *dev, int ifindex,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(dev);
+ struct ice_esw_br *bridge;
+
+ if (!br_port) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port representor is not attached to any bridge");
+ return -EINVAL;
+ }
+
+ if (br_port->bridge->ifindex != ifindex) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port representor is attached to another bridge");
+ return -EINVAL;
+ }
+
+ bridge = br_port->bridge;
+
+ trace_ice_eswitch_br_port_unlink(br_port);
+ ice_eswitch_br_port_deinit(br_port->bridge, br_port);
+ ice_eswitch_br_verify_deinit(br_offloads, bridge);
+
+ return 0;
+}
+
+static int
+ice_eswitch_br_port_link(struct ice_esw_br_offloads *br_offloads,
+ struct net_device *dev, int ifindex,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_esw_br *bridge;
+ int err;
+
+ if (ice_eswitch_br_netdev_to_port(dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port is already attached to the bridge");
+ return -EINVAL;
+ }
+
+ bridge = ice_eswitch_br_get(br_offloads, ifindex, extack);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+
+ if (ice_is_port_repr_netdev(dev)) {
+ struct ice_repr *repr = ice_netdev_to_repr(dev);
+
+ err = ice_eswitch_br_vf_repr_port_init(bridge, repr);
+ trace_ice_eswitch_br_port_link(repr->br_port);
+ } else {
+ struct ice_pf *pf = ice_netdev_to_pf(dev);
+
+ err = ice_eswitch_br_uplink_port_init(bridge, pf);
+ trace_ice_eswitch_br_port_link(pf->br_port);
+ }
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to init bridge port");
+ goto err_port_init;
+ }
+
+ return 0;
+
+err_port_init:
+ ice_eswitch_br_verify_deinit(br_offloads, bridge);
+ return err;
+}
+
+static int
+ice_eswitch_br_port_changeupper(struct notifier_block *nb, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct ice_esw_br_offloads *br_offloads;
+ struct netlink_ext_ack *extack;
+ struct net_device *upper;
+
+ br_offloads = ice_nb_to_br_offloads(nb, netdev_nb);
+
+ if (!ice_eswitch_br_is_dev_valid(dev))
+ return 0;
+
+ upper = info->upper_dev;
+ if (!netif_is_bridge_master(upper))
+ return 0;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+
+ if (info->linking)
+ return ice_eswitch_br_port_link(br_offloads, dev,
+ upper->ifindex, extack);
+ else
+ return ice_eswitch_br_port_unlink(br_offloads, dev,
+ upper->ifindex, extack);
+}
+
+static int
+ice_eswitch_br_port_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ int err = 0;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ err = ice_eswitch_br_port_changeupper(nb, ptr);
+ break;
+ }
+
+ return notifier_from_errno(err);
+}
+
+static void
+ice_eswitch_br_offloads_dealloc(struct ice_pf *pf)
+{
+ struct ice_esw_br_offloads *br_offloads = pf->switchdev.br_offloads;
+
+ ASSERT_RTNL();
+
+ if (!br_offloads)
+ return;
+
+ ice_eswitch_br_deinit(br_offloads, br_offloads->bridge);
+
+ pf->switchdev.br_offloads = NULL;
+ kfree(br_offloads);
+}
+
+static struct ice_esw_br_offloads *
+ice_eswitch_br_offloads_alloc(struct ice_pf *pf)
+{
+ struct ice_esw_br_offloads *br_offloads;
+
+ ASSERT_RTNL();
+
+ if (pf->switchdev.br_offloads)
+ return ERR_PTR(-EEXIST);
+
+ br_offloads = kzalloc(sizeof(*br_offloads), GFP_KERNEL);
+ if (!br_offloads)
+ return ERR_PTR(-ENOMEM);
+
+ pf->switchdev.br_offloads = br_offloads;
+ br_offloads->pf = pf;
+
+ return br_offloads;
+}
+
+void
+ice_eswitch_br_offloads_deinit(struct ice_pf *pf)
+{
+ struct ice_esw_br_offloads *br_offloads;
+
+ br_offloads = pf->switchdev.br_offloads;
+ if (!br_offloads)
+ return;
+
+ cancel_delayed_work_sync(&br_offloads->update_work);
+ unregister_netdevice_notifier(&br_offloads->netdev_nb);
+ unregister_switchdev_blocking_notifier(&br_offloads->switchdev_blk);
+ unregister_switchdev_notifier(&br_offloads->switchdev_nb);
+ destroy_workqueue(br_offloads->wq);
+ /* Although notifier block is unregistered just before,
+ * so we don't get any new events, some events might be
+ * already in progress. Hold the rtnl lock and wait for
+ * them to finished.
+ */
+ rtnl_lock();
+ ice_eswitch_br_offloads_dealloc(pf);
+ rtnl_unlock();
+}
+
+static void ice_eswitch_br_update(struct ice_esw_br_offloads *br_offloads)
+{
+ struct ice_esw_br *bridge = br_offloads->bridge;
+ struct ice_esw_br_fdb_entry *entry, *tmp;
+
+ if (!bridge)
+ return;
+
+ rtnl_lock();
+ list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) {
+ if (entry->flags & ICE_ESWITCH_BR_FDB_ADDED_BY_USER)
+ continue;
+
+ if (time_is_after_eq_jiffies(entry->last_use +
+ bridge->ageing_time))
+ continue;
+
+ ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, entry);
+ }
+ rtnl_unlock();
+}
+
+static void ice_eswitch_br_update_work(struct work_struct *work)
+{
+ struct ice_esw_br_offloads *br_offloads;
+
+ br_offloads = ice_work_to_br_offloads(work);
+
+ ice_eswitch_br_update(br_offloads);
+
+ queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
+ ICE_ESW_BRIDGE_UPDATE_INTERVAL);
+}
+
+int
+ice_eswitch_br_offloads_init(struct ice_pf *pf)
+{
+ struct ice_esw_br_offloads *br_offloads;
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+
+ rtnl_lock();
+ br_offloads = ice_eswitch_br_offloads_alloc(pf);
+ rtnl_unlock();
+ if (IS_ERR(br_offloads)) {
+ dev_err(dev, "Failed to init eswitch bridge\n");
+ return PTR_ERR(br_offloads);
+ }
+
+ br_offloads->wq = alloc_ordered_workqueue("ice_bridge_wq", 0);
+ if (!br_offloads->wq) {
+ err = -ENOMEM;
+ dev_err(dev, "Failed to allocate bridge workqueue\n");
+ goto err_alloc_wq;
+ }
+
+ br_offloads->switchdev_nb.notifier_call =
+ ice_eswitch_br_switchdev_event;
+ err = register_switchdev_notifier(&br_offloads->switchdev_nb);
+ if (err) {
+ dev_err(dev,
+ "Failed to register switchdev notifier\n");
+ goto err_reg_switchdev_nb;
+ }
+
+ br_offloads->switchdev_blk.notifier_call =
+ ice_eswitch_br_event_blocking;
+ err = register_switchdev_blocking_notifier(&br_offloads->switchdev_blk);
+ if (err) {
+ dev_err(dev,
+ "Failed to register bridge blocking switchdev notifier\n");
+ goto err_reg_switchdev_blk;
+ }
+
+ br_offloads->netdev_nb.notifier_call = ice_eswitch_br_port_event;
+ err = register_netdevice_notifier(&br_offloads->netdev_nb);
+ if (err) {
+ dev_err(dev,
+ "Failed to register bridge port event notifier\n");
+ goto err_reg_netdev_nb;
+ }
+
+ INIT_DELAYED_WORK(&br_offloads->update_work,
+ ice_eswitch_br_update_work);
+ queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
+ ICE_ESW_BRIDGE_UPDATE_INTERVAL);
+
+ return 0;
+
+err_reg_netdev_nb:
+ unregister_switchdev_blocking_notifier(&br_offloads->switchdev_blk);
+err_reg_switchdev_blk:
+ unregister_switchdev_notifier(&br_offloads->switchdev_nb);
+err_reg_switchdev_nb:
+ destroy_workqueue(br_offloads->wq);
+err_alloc_wq:
+ rtnl_lock();
+ ice_eswitch_br_offloads_dealloc(pf);
+ rtnl_unlock();
+
+ return err;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch_br.h b/drivers/net/ethernet/intel/ice/ice_eswitch_br.h
new file mode 100644
index 000000000000..85a8fadb2928
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch_br.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2023, Intel Corporation. */
+
+#ifndef _ICE_ESWITCH_BR_H_
+#define _ICE_ESWITCH_BR_H_
+
+#include <linux/rhashtable.h>
+#include <linux/workqueue.h>
+
+struct ice_esw_br_fdb_data {
+ unsigned char addr[ETH_ALEN];
+ u16 vid;
+};
+
+struct ice_esw_br_flow {
+ struct ice_rule_query_data *fwd_rule;
+ struct ice_rule_query_data *guard_rule;
+};
+
+enum {
+ ICE_ESWITCH_BR_FDB_ADDED_BY_USER = BIT(0),
+};
+
+struct ice_esw_br_fdb_entry {
+ struct ice_esw_br_fdb_data data;
+ struct rhash_head ht_node;
+ struct list_head list;
+
+ int flags;
+
+ struct net_device *dev;
+ struct ice_esw_br_port *br_port;
+ struct ice_esw_br_flow *flow;
+
+ unsigned long last_use;
+};
+
+enum ice_esw_br_port_type {
+ ICE_ESWITCH_BR_UPLINK_PORT = 0,
+ ICE_ESWITCH_BR_VF_REPR_PORT = 1,
+};
+
+struct ice_esw_br_port {
+ struct ice_esw_br *bridge;
+ struct ice_vsi *vsi;
+ enum ice_esw_br_port_type type;
+ u16 vsi_idx;
+ u16 pvid;
+ struct xarray vlans;
+};
+
+enum {
+ ICE_ESWITCH_BR_VLAN_FILTERING = BIT(0),
+};
+
+struct ice_esw_br {
+ struct ice_esw_br_offloads *br_offloads;
+ struct xarray ports;
+
+ struct rhashtable fdb_ht;
+ struct list_head fdb_list;
+
+ int ifindex;
+ u32 flags;
+ unsigned long ageing_time;
+};
+
+struct ice_esw_br_offloads {
+ struct ice_pf *pf;
+ struct ice_esw_br *bridge;
+ struct notifier_block netdev_nb;
+ struct notifier_block switchdev_blk;
+ struct notifier_block switchdev_nb;
+
+ struct workqueue_struct *wq;
+ struct delayed_work update_work;
+};
+
+struct ice_esw_br_fdb_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct net_device *dev;
+ unsigned long event;
+};
+
+struct ice_esw_br_vlan {
+ u16 vid;
+ u16 flags;
+};
+
+#define ice_nb_to_br_offloads(nb, nb_name) \
+ container_of(nb, \
+ struct ice_esw_br_offloads, \
+ nb_name)
+
+#define ice_work_to_br_offloads(w) \
+ container_of(w, \
+ struct ice_esw_br_offloads, \
+ update_work.work)
+
+#define ice_work_to_fdb_work(w) \
+ container_of(w, \
+ struct ice_esw_br_fdb_work, \
+ work)
+
+static inline bool ice_eswitch_br_is_vid_valid(u16 vid)
+{
+ /* In trunk VLAN mode, for untagged traffic the bridge sends requests
+ * to offload VLAN 1 with pvid and untagged flags set. Since these
+ * flags are not supported, add a MAC filter instead.
+ */
+ return vid > 1;
+}
+
+void
+ice_eswitch_br_offloads_deinit(struct ice_pf *pf);
+int
+ice_eswitch_br_offloads_init(struct ice_pf *pf);
+
+#endif /* _ICE_ESWITCH_BR_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 0054d7e64ec3..a43c23c80565 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -4076,3 +4076,28 @@ void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx)
{
ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
}
+
+/**
+ * ice_vsi_update_local_lb - update sw block in VSI with local loopback bit
+ * @vsi: pointer to VSI structure
+ * @set: set or unset the bit
+ */
+int
+ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set)
+{
+ struct ice_vsi_ctx ctx = {
+ .info = vsi->info,
+ };
+
+ ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
+ if (set)
+ ctx.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_LOCAL_LB;
+ else
+ ctx.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_LOCAL_LB;
+
+ if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL))
+ return -ENODEV;
+
+ vsi->info = ctx.info;
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index e985766e6bb5..1628385a9672 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -157,6 +157,7 @@ void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx);
void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx);
void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx);
+int ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set);
int ice_vsi_add_vlan_zero(struct ice_vsi *vsi);
int ice_vsi_del_vlan_zero(struct ice_vsi *vsi);
bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index f02d44455772..9b36cce306b8 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -80,7 +80,7 @@ ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
void *data,
void (*cleanup)(struct flow_block_cb *block_cb));
-bool netif_is_ice(struct net_device *dev)
+bool netif_is_ice(const struct net_device *dev)
{
return dev && (dev->netdev_ops == &ice_netdev_ops);
}
@@ -3392,6 +3392,7 @@ static void ice_set_ops(struct ice_vsi *vsi)
netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_XSK_ZEROCOPY |
NETDEV_XDP_ACT_RX_SG;
+ netdev->xdp_zc_max_segs = ICE_MAX_BUF_TXD;
}
/**
@@ -5703,7 +5704,7 @@ static void ice_set_rx_mode(struct net_device *netdev)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
- if (!vsi)
+ if (!vsi || ice_is_switchdev_running(vsi->back))
return;
/* Set the flags to synchronize filters
@@ -6255,7 +6256,7 @@ static void ice_tx_dim_work(struct work_struct *work)
u16 itr;
dim = container_of(work, struct dim, work);
- rc = (struct ice_ring_container *)dim->priv;
+ rc = dim->priv;
WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile));
@@ -6275,7 +6276,7 @@ static void ice_rx_dim_work(struct work_struct *work)
u16 itr;
dim = container_of(work, struct dim, work);
- rc = (struct ice_ring_container *)dim->priv;
+ rc = dim->priv;
WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile));
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
index e30e12321abd..c686ac0935eb 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.c
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -254,7 +254,7 @@ static const struct net_device_ops ice_repr_netdev_ops = {
* ice_is_port_repr_netdev - Check if a given netdevice is a port representor netdev
* @netdev: pointer to netdev
*/
-bool ice_is_port_repr_netdev(struct net_device *netdev)
+bool ice_is_port_repr_netdev(const struct net_device *netdev)
{
return netdev && (netdev->netdev_ops == &ice_repr_netdev_ops);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h
index 9c2a6f496b3b..e1ee2d2c1d2d 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.h
+++ b/drivers/net/ethernet/intel/ice/ice_repr.h
@@ -12,6 +12,7 @@ struct ice_repr {
struct ice_q_vector *q_vector;
struct net_device *netdev;
struct metadata_dst *dst;
+ struct ice_esw_br_port *br_port;
#ifdef CONFIG_ICE_SWITCHDEV
/* info about slow path rule */
struct ice_rule_query_data sp_rule;
@@ -27,5 +28,5 @@ void ice_repr_stop_tx_queues(struct ice_repr *repr);
void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi);
struct ice_repr *ice_netdev_to_repr(struct net_device *netdev);
-bool ice_is_port_repr_netdev(struct net_device *netdev);
+bool ice_is_port_repr_netdev(const struct net_device *netdev);
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 6db4ca7978cb..813fef5fd748 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -2272,6 +2272,10 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
/* Propagate some data to the recipe database */
recps[idx].is_root = !!is_root;
recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
+ recps[idx].need_pass_l2 = root_bufs.content.act_ctrl &
+ ICE_AQ_RECIPE_ACT_NEED_PASS_L2;
+ recps[idx].allow_pass_l2 = root_bufs.content.act_ctrl &
+ ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2;
bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
recps[idx].chain_idx = root_bufs.content.result_indx &
@@ -4613,13 +4617,13 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
* ice_find_recp - find a recipe
* @hw: pointer to the hardware structure
* @lkup_exts: extension sequence to match
- * @tun_type: type of recipe tunnel
+ * @rinfo: information regarding the rule e.g. priority and action info
*
* Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
*/
static u16
ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
- enum ice_sw_tunnel_type tun_type)
+ const struct ice_adv_rule_info *rinfo)
{
bool refresh_required = true;
struct ice_sw_recipe *recp;
@@ -4680,9 +4684,12 @@ ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
}
/* If for "i"th recipe the found was never set to false
* then it means we found our match
- * Also tun type of recipe needs to be checked
+ * Also tun type and *_pass_l2 of recipe needs to be
+ * checked
*/
- if (found && recp[i].tun_type == tun_type)
+ if (found && recp[i].tun_type == rinfo->tun_type &&
+ recp[i].need_pass_l2 == rinfo->need_pass_l2 &&
+ recp[i].allow_pass_l2 == rinfo->allow_pass_l2)
return i; /* Return the recipe ID */
}
}
@@ -4952,6 +4959,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
unsigned long *profiles)
{
DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS);
+ struct ice_aqc_recipe_content *content;
struct ice_aqc_recipe_data_elem *tmp;
struct ice_aqc_recipe_data_elem *buf;
struct ice_recp_grp_entry *entry;
@@ -5012,6 +5020,8 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
if (status)
goto err_unroll;
+ content = &buf[recps].content;
+
/* Clear the result index of the located recipe, as this will be
* updated, if needed, later in the recipe creation process.
*/
@@ -5022,26 +5032,24 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
/* if the recipe is a non-root recipe RID should be programmed
* as 0 for the rules to be applied correctly.
*/
- buf[recps].content.rid = 0;
- memset(&buf[recps].content.lkup_indx, 0,
- sizeof(buf[recps].content.lkup_indx));
+ content->rid = 0;
+ memset(&content->lkup_indx, 0,
+ sizeof(content->lkup_indx));
/* All recipes use look-up index 0 to match switch ID. */
- buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
- buf[recps].content.mask[0] =
- cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
+ content->lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
+ content->mask[0] = cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
/* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
* to be 0
*/
for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
- buf[recps].content.lkup_indx[i] = 0x80;
- buf[recps].content.mask[i] = 0;
+ content->lkup_indx[i] = 0x80;
+ content->mask[i] = 0;
}
for (i = 0; i < entry->r_group.n_val_pairs; i++) {
- buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
- buf[recps].content.mask[i + 1] =
- cpu_to_le16(entry->fv_mask[i]);
+ content->lkup_indx[i + 1] = entry->fv_idx[i];
+ content->mask[i + 1] = cpu_to_le16(entry->fv_mask[i]);
}
if (rm->n_grp_count > 1) {
@@ -5055,7 +5063,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
}
entry->chain_idx = chain_idx;
- buf[recps].content.result_indx =
+ content->result_indx =
ICE_AQ_RECIPE_RESULT_EN |
((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
ICE_AQ_RECIPE_RESULT_DATA_M);
@@ -5069,7 +5077,13 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
ICE_MAX_NUM_RECIPES);
set_bit(buf[recps].recipe_indx,
(unsigned long *)buf[recps].recipe_bitmap);
- buf[recps].content.act_ctrl_fwd_priority = rm->priority;
+ content->act_ctrl_fwd_priority = rm->priority;
+
+ if (rm->need_pass_l2)
+ content->act_ctrl |= ICE_AQ_RECIPE_ACT_NEED_PASS_L2;
+
+ if (rm->allow_pass_l2)
+ content->act_ctrl |= ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2;
recps++;
}
@@ -5107,9 +5121,11 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
if (status)
goto err_unroll;
+ content = &buf[recps].content;
+
buf[recps].recipe_indx = (u8)rid;
- buf[recps].content.rid = (u8)rid;
- buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
+ content->rid = (u8)rid;
+ content->rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
/* the new entry created should also be part of rg_list to
* make sure we have complete recipe
*/
@@ -5121,16 +5137,13 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
goto err_unroll;
}
last_chain_entry->rid = rid;
- memset(&buf[recps].content.lkup_indx, 0,
- sizeof(buf[recps].content.lkup_indx));
+ memset(&content->lkup_indx, 0, sizeof(content->lkup_indx));
/* All recipes use look-up index 0 to match switch ID. */
- buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
- buf[recps].content.mask[0] =
- cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
+ content->lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
+ content->mask[0] = cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
- buf[recps].content.lkup_indx[i] =
- ICE_AQ_RECIPE_LKUP_IGNORE;
- buf[recps].content.mask[i] = 0;
+ content->lkup_indx[i] = ICE_AQ_RECIPE_LKUP_IGNORE;
+ content->mask[i] = 0;
}
i = 1;
@@ -5142,8 +5155,8 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
list_for_each_entry(entry, &rm->rg_list, l_entry) {
last_chain_entry->fv_idx[i] = entry->chain_idx;
- buf[recps].content.lkup_indx[i] = entry->chain_idx;
- buf[recps].content.mask[i++] = cpu_to_le16(0xFFFF);
+ content->lkup_indx[i] = entry->chain_idx;
+ content->mask[i++] = cpu_to_le16(0xFFFF);
set_bit(entry->rid, rm->r_bitmap);
}
list_add(&last_chain_entry->l_entry, &rm->rg_list);
@@ -5155,7 +5168,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
status = -EINVAL;
goto err_unroll;
}
- buf[recps].content.act_ctrl_fwd_priority = rm->priority;
+ content->act_ctrl_fwd_priority = rm->priority;
recps++;
rm->root_rid = (u8)rid;
@@ -5220,6 +5233,8 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
recp->n_grp_count = rm->n_grp_count;
recp->tun_type = rm->tun_type;
+ recp->need_pass_l2 = rm->need_pass_l2;
+ recp->allow_pass_l2 = rm->allow_pass_l2;
recp->recp_created = true;
}
rm->root_buf = buf;
@@ -5388,6 +5403,9 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
/* set the recipe priority if specified */
rm->priority = (u8)rinfo->priority;
+ rm->need_pass_l2 = rinfo->need_pass_l2;
+ rm->allow_pass_l2 = rinfo->allow_pass_l2;
+
/* Find offsets from the field vector. Pick the first one for all the
* recipes.
*/
@@ -5403,7 +5421,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
}
/* Look for a recipe which matches our requested fv / mask list */
- *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
+ *rid = ice_find_recp(hw, lkup_exts, rinfo);
if (*rid < ICE_MAX_NUM_RECIPES)
/* Success if found a recipe that match the existing criteria */
goto err_unroll;
@@ -5839,7 +5857,9 @@ static bool ice_rules_equal(const struct ice_adv_rule_info *first,
return first->sw_act.flag == second->sw_act.flag &&
first->tun_type == second->tun_type &&
first->vlan_type == second->vlan_type &&
- first->src_vsi == second->src_vsi;
+ first->src_vsi == second->src_vsi &&
+ first->need_pass_l2 == second->need_pass_l2 &&
+ first->allow_pass_l2 == second->allow_pass_l2;
}
/**
@@ -6078,7 +6098,8 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
- rinfo->sw_act.fltr_act == ICE_DROP_PACKET)) {
+ rinfo->sw_act.fltr_act == ICE_DROP_PACKET ||
+ rinfo->sw_act.fltr_act == ICE_NOP)) {
status = -EIO;
goto free_pkt_profile;
}
@@ -6089,7 +6110,8 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
goto free_pkt_profile;
}
- if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
+ if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
+ rinfo->sw_act.fltr_act == ICE_NOP)
rinfo->sw_act.fwd_id.hw_vsi_id =
ice_get_hw_vsi_num(hw, vsi_handle);
@@ -6159,6 +6181,11 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
ICE_SINGLE_ACT_VALID_BIT;
break;
+ case ICE_NOP:
+ act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M,
+ rinfo->sw_act.fwd_id.hw_vsi_id);
+ act &= ~ICE_SINGLE_ACT_VALID_BIT;
+ break;
default:
status = -EIO;
goto err_ice_add_adv_rule;
@@ -6439,7 +6466,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
return -EIO;
}
- rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
+ rid = ice_find_recp(hw, &lkup_exts, rinfo);
/* If did not find a recipe that match the existing criteria */
if (rid == ICE_MAX_NUM_RECIPES)
return -EINVAL;
@@ -6533,59 +6560,6 @@ ice_rem_adv_rule_by_id(struct ice_hw *hw,
}
/**
- * ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a
- * given VSI handle
- * @hw: pointer to the hardware structure
- * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
- *
- * This function is used to remove all the rules for a given VSI and as soon
- * as removing a rule fails, it will return immediately with the error code,
- * else it will return success.
- */
-int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
-{
- struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
- struct ice_vsi_list_map_info *map_info;
- struct ice_adv_rule_info rinfo;
- struct list_head *list_head;
- struct ice_switch_info *sw;
- int status;
- u8 rid;
-
- sw = hw->switch_info;
- for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
- if (!sw->recp_list[rid].recp_created)
- continue;
- if (!sw->recp_list[rid].adv_rule)
- continue;
-
- list_head = &sw->recp_list[rid].filt_rules;
- list_for_each_entry_safe(list_itr, tmp_entry, list_head,
- list_entry) {
- rinfo = list_itr->rule_info;
-
- if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
- map_info = list_itr->vsi_list_info;
- if (!map_info)
- continue;
-
- if (!test_bit(vsi_handle, map_info->vsi_map))
- continue;
- } else if (rinfo.sw_act.vsi_handle != vsi_handle) {
- continue;
- }
-
- rinfo.sw_act.vsi_handle = vsi_handle;
- status = ice_rem_adv_rule(hw, list_itr->lkups,
- list_itr->lkups_cnt, &rinfo);
- if (status)
- return status;
- }
- }
- return 0;
-}
-
-/**
* ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
* @hw: pointer to the hardware structure
* @vsi_handle: driver VSI handle
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index c84b56fe84a5..10f5a0ae199e 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -191,6 +191,8 @@ struct ice_adv_rule_info {
u16 vlan_type;
u16 fltr_rule_id;
u32 priority;
+ u16 need_pass_l2:1;
+ u16 allow_pass_l2:1;
u16 src_vsi;
struct ice_sw_act_ctrl sw_act;
struct ice_adv_rule_flags_info flags_info;
@@ -254,6 +256,9 @@ struct ice_sw_recipe {
*/
u8 priority;
+ u8 need_pass_l2:1;
+ u8 allow_pass_l2:1;
+
struct list_head rg_list;
/* AQ buffer associated with this recipe */
@@ -379,7 +384,6 @@ int
ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
bool rm_vlan_promisc);
-int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle);
int
ice_rem_adv_rule_by_id(struct ice_hw *hw,
struct ice_rule_query_data *remove_entry);
diff --git a/drivers/net/ethernet/intel/ice/ice_trace.h b/drivers/net/ethernet/intel/ice/ice_trace.h
index ae98d5a8ff60..b2f5c9fe0149 100644
--- a/drivers/net/ethernet/intel/ice/ice_trace.h
+++ b/drivers/net/ethernet/intel/ice/ice_trace.h
@@ -21,6 +21,7 @@
#define _ICE_TRACE_H_
#include <linux/tracepoint.h>
+#include "ice_eswitch_br.h"
/* ice_trace() macro enables shared code to refer to trace points
* like:
@@ -240,6 +241,95 @@ DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_fw_req);
DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_fw_done);
DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_complete);
+DECLARE_EVENT_CLASS(ice_esw_br_fdb_template,
+ TP_PROTO(struct ice_esw_br_fdb_entry *fdb),
+ TP_ARGS(fdb),
+ TP_STRUCT__entry(__array(char, dev_name, IFNAMSIZ)
+ __array(unsigned char, addr, ETH_ALEN)
+ __field(u16, vid)
+ __field(int, flags)),
+ TP_fast_assign(strscpy(__entry->dev_name,
+ netdev_name(fdb->dev),
+ IFNAMSIZ);
+ memcpy(__entry->addr, fdb->data.addr, ETH_ALEN);
+ __entry->vid = fdb->data.vid;
+ __entry->flags = fdb->flags;),
+ TP_printk("net_device=%s addr=%pM vid=%u flags=%x",
+ __entry->dev_name,
+ __entry->addr,
+ __entry->vid,
+ __entry->flags)
+);
+
+DEFINE_EVENT(ice_esw_br_fdb_template,
+ ice_eswitch_br_fdb_entry_create,
+ TP_PROTO(struct ice_esw_br_fdb_entry *fdb),
+ TP_ARGS(fdb)
+);
+
+DEFINE_EVENT(ice_esw_br_fdb_template,
+ ice_eswitch_br_fdb_entry_find_and_delete,
+ TP_PROTO(struct ice_esw_br_fdb_entry *fdb),
+ TP_ARGS(fdb)
+);
+
+DECLARE_EVENT_CLASS(ice_esw_br_vlan_template,
+ TP_PROTO(struct ice_esw_br_vlan *vlan),
+ TP_ARGS(vlan),
+ TP_STRUCT__entry(__field(u16, vid)
+ __field(u16, flags)),
+ TP_fast_assign(__entry->vid = vlan->vid;
+ __entry->flags = vlan->flags;),
+ TP_printk("vid=%u flags=%x",
+ __entry->vid,
+ __entry->flags)
+);
+
+DEFINE_EVENT(ice_esw_br_vlan_template,
+ ice_eswitch_br_vlan_create,
+ TP_PROTO(struct ice_esw_br_vlan *vlan),
+ TP_ARGS(vlan)
+);
+
+DEFINE_EVENT(ice_esw_br_vlan_template,
+ ice_eswitch_br_vlan_cleanup,
+ TP_PROTO(struct ice_esw_br_vlan *vlan),
+ TP_ARGS(vlan)
+);
+
+#define ICE_ESW_BR_PORT_NAME_L 16
+
+DECLARE_EVENT_CLASS(ice_esw_br_port_template,
+ TP_PROTO(struct ice_esw_br_port *port),
+ TP_ARGS(port),
+ TP_STRUCT__entry(__field(u16, vport_num)
+ __array(char, port_type, ICE_ESW_BR_PORT_NAME_L)),
+ TP_fast_assign(__entry->vport_num = port->vsi_idx;
+ if (port->type == ICE_ESWITCH_BR_UPLINK_PORT)
+ strscpy(__entry->port_type,
+ "Uplink",
+ ICE_ESW_BR_PORT_NAME_L);
+ else
+ strscpy(__entry->port_type,
+ "VF Representor",
+ ICE_ESW_BR_PORT_NAME_L);),
+ TP_printk("vport_num=%u port type=%s",
+ __entry->vport_num,
+ __entry->port_type)
+);
+
+DEFINE_EVENT(ice_esw_br_port_template,
+ ice_eswitch_br_port_link,
+ TP_PROTO(struct ice_esw_br_port *port),
+ TP_ARGS(port)
+);
+
+DEFINE_EVENT(ice_esw_br_port_template,
+ ice_eswitch_br_port_unlink,
+ TP_PROTO(struct ice_esw_br_port *port),
+ TP_ARGS(port)
+);
+
/* End tracepoints */
#endif /* _ICE_TRACE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index a09556e57803..df9171a1a34f 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -1033,6 +1033,7 @@ enum ice_sw_fwd_act_type {
ICE_FWD_TO_Q,
ICE_FWD_TO_QGRP,
ICE_DROP_PACKET,
+ ICE_NOP,
ICE_INVAL_ACT
};
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
index b1ffb81893d4..d7b10dc67f03 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
@@ -21,6 +21,99 @@ noop_vlan(struct ice_vsi __always_unused *vsi)
return 0;
}
+static void ice_port_vlan_on(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops;
+ struct ice_pf *pf = vsi->back;
+
+ if (ice_is_dvm_ena(&pf->hw)) {
+ vlan_ops = &vsi->outer_vlan_ops;
+
+ /* setup outer VLAN ops */
+ vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
+ vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan;
+ vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan;
+
+ /* setup inner VLAN ops */
+ vlan_ops = &vsi->inner_vlan_ops;
+ vlan_ops->add_vlan = noop_vlan_arg;
+ vlan_ops->del_vlan = noop_vlan_arg;
+ vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
+ vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
+ vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
+ vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
+ } else {
+ vlan_ops = &vsi->inner_vlan_ops;
+
+ vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan;
+ vlan_ops->clear_port_vlan = ice_vsi_clear_inner_port_vlan;
+ vlan_ops->clear_port_vlan = ice_vsi_clear_inner_port_vlan;
+ }
+ vlan_ops->ena_rx_filtering = ice_vsi_ena_rx_vlan_filtering;
+}
+
+static void ice_port_vlan_off(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops;
+ struct ice_pf *pf = vsi->back;
+
+ /* setup inner VLAN ops */
+ vlan_ops = &vsi->inner_vlan_ops;
+
+ vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
+ vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
+ vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
+ vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
+
+ if (ice_is_dvm_ena(&pf->hw)) {
+ vlan_ops = &vsi->outer_vlan_ops;
+
+ vlan_ops->del_vlan = ice_vsi_del_vlan;
+ vlan_ops->ena_stripping = ice_vsi_ena_outer_stripping;
+ vlan_ops->dis_stripping = ice_vsi_dis_outer_stripping;
+ vlan_ops->ena_insertion = ice_vsi_ena_outer_insertion;
+ vlan_ops->dis_insertion = ice_vsi_dis_outer_insertion;
+ } else {
+ vlan_ops->del_vlan = ice_vsi_del_vlan;
+ }
+
+ if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
+ vlan_ops->ena_rx_filtering = noop_vlan;
+ else
+ vlan_ops->ena_rx_filtering =
+ ice_vsi_ena_rx_vlan_filtering;
+}
+
+/**
+ * ice_vf_vsi_enable_port_vlan - Set VSI VLAN ops to support port VLAN
+ * @vsi: VF's VSI being configured
+ *
+ * The function won't create port VLAN, it only allows to create port VLAN
+ * using VLAN ops on the VF VSI.
+ */
+void ice_vf_vsi_enable_port_vlan(struct ice_vsi *vsi)
+{
+ if (WARN_ON_ONCE(!vsi->vf))
+ return;
+
+ ice_port_vlan_on(vsi);
+}
+
+/**
+ * ice_vf_vsi_disable_port_vlan - Clear VSI support for creating port VLAN
+ * @vsi: VF's VSI being configured
+ *
+ * The function should be called after removing port VLAN on VSI
+ * (using VLAN ops)
+ */
+void ice_vf_vsi_disable_port_vlan(struct ice_vsi *vsi)
+{
+ if (WARN_ON_ONCE(!vsi->vf))
+ return;
+
+ ice_port_vlan_off(vsi);
+}
+
/**
* ice_vf_vsi_init_vlan_ops - Initialize default VSI VLAN ops for VF VSI
* @vsi: VF's VSI being configured
@@ -39,91 +132,18 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
if (WARN_ON(!vf))
return;
- if (ice_is_dvm_ena(&pf->hw)) {
- vlan_ops = &vsi->outer_vlan_ops;
+ if (ice_vf_is_port_vlan_ena(vf))
+ ice_port_vlan_on(vsi);
+ else
+ ice_port_vlan_off(vsi);
- /* outer VLAN ops regardless of port VLAN config */
- vlan_ops->add_vlan = ice_vsi_add_vlan;
- vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering;
- vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
-
- if (ice_vf_is_port_vlan_ena(vf)) {
- /* setup outer VLAN ops */
- vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
- /* all Rx traffic should be in the domain of the
- * assigned port VLAN, so prevent disabling Rx VLAN
- * filtering
- */
- vlan_ops->dis_rx_filtering = noop_vlan;
- vlan_ops->ena_rx_filtering =
- ice_vsi_ena_rx_vlan_filtering;
-
- /* setup inner VLAN ops */
- vlan_ops = &vsi->inner_vlan_ops;
- vlan_ops->add_vlan = noop_vlan_arg;
- vlan_ops->del_vlan = noop_vlan_arg;
- vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
- vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
- vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
- vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
- } else {
- vlan_ops->dis_rx_filtering =
- ice_vsi_dis_rx_vlan_filtering;
-
- if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
- vlan_ops->ena_rx_filtering = noop_vlan;
- else
- vlan_ops->ena_rx_filtering =
- ice_vsi_ena_rx_vlan_filtering;
-
- vlan_ops->del_vlan = ice_vsi_del_vlan;
- vlan_ops->ena_stripping = ice_vsi_ena_outer_stripping;
- vlan_ops->dis_stripping = ice_vsi_dis_outer_stripping;
- vlan_ops->ena_insertion = ice_vsi_ena_outer_insertion;
- vlan_ops->dis_insertion = ice_vsi_dis_outer_insertion;
-
- /* setup inner VLAN ops */
- vlan_ops = &vsi->inner_vlan_ops;
-
- vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
- vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
- vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
- vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
- }
- } else {
- vlan_ops = &vsi->inner_vlan_ops;
+ vlan_ops = ice_is_dvm_ena(&pf->hw) ?
+ &vsi->outer_vlan_ops : &vsi->inner_vlan_ops;
- /* inner VLAN ops regardless of port VLAN config */
- vlan_ops->add_vlan = ice_vsi_add_vlan;
- vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
- vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering;
- vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
-
- if (ice_vf_is_port_vlan_ena(vf)) {
- vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan;
- vlan_ops->ena_rx_filtering =
- ice_vsi_ena_rx_vlan_filtering;
- /* all Rx traffic should be in the domain of the
- * assigned port VLAN, so prevent disabling Rx VLAN
- * filtering
- */
- vlan_ops->dis_rx_filtering = noop_vlan;
- } else {
- vlan_ops->dis_rx_filtering =
- ice_vsi_dis_rx_vlan_filtering;
- if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
- vlan_ops->ena_rx_filtering = noop_vlan;
- else
- vlan_ops->ena_rx_filtering =
- ice_vsi_ena_rx_vlan_filtering;
-
- vlan_ops->del_vlan = ice_vsi_del_vlan;
- vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
- vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
- vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
- vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
- }
- }
+ vlan_ops->add_vlan = ice_vsi_add_vlan;
+ vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
+ vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering;
+ vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.h b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.h
index 875a4e615f39..df8aa09df3e3 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.h
@@ -13,7 +13,11 @@ void ice_vf_vsi_cfg_svm_legacy_vlan_mode(struct ice_vsi *vsi);
#ifdef CONFIG_PCI_IOV
void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi);
+void ice_vf_vsi_enable_port_vlan(struct ice_vsi *vsi);
+void ice_vf_vsi_disable_port_vlan(struct ice_vsi *vsi);
#else
static inline void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi) { }
+static inline void ice_vf_vsi_enable_port_vlan(struct ice_vsi *vsi) { }
+static inline void ice_vf_vsi_disable_port_vlan(struct ice_vsi *vsi) { }
#endif /* CONFIG_PCI_IOV */
#endif /* _ICE_PF_VSI_VLAN_OPS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
index 5b4a0abb4607..76266e709a39 100644
--- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
@@ -202,6 +202,24 @@ int ice_vsi_dis_inner_insertion(struct ice_vsi *vsi)
return ice_vsi_manage_vlan_insertion(vsi);
}
+static void
+ice_save_vlan_info(struct ice_aqc_vsi_props *info,
+ struct ice_vsi_vlan_info *vlan)
+{
+ vlan->sw_flags2 = info->sw_flags2;
+ vlan->inner_vlan_flags = info->inner_vlan_flags;
+ vlan->outer_vlan_flags = info->outer_vlan_flags;
+}
+
+static void
+ice_restore_vlan_info(struct ice_aqc_vsi_props *info,
+ struct ice_vsi_vlan_info *vlan)
+{
+ info->sw_flags2 = vlan->sw_flags2;
+ info->inner_vlan_flags = vlan->inner_vlan_flags;
+ info->outer_vlan_flags = vlan->outer_vlan_flags;
+}
+
/**
* __ice_vsi_set_inner_port_vlan - set port VLAN VSI context settings to enable a port VLAN
* @vsi: the VSI to update
@@ -218,6 +236,7 @@ static int __ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, u16 pvid_info)
if (!ctxt)
return -ENOMEM;
+ ice_save_vlan_info(&vsi->info, &vsi->vlan_info);
ctxt->info = vsi->info;
info = &ctxt->info;
info->inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED |
@@ -259,6 +278,33 @@ int ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
return __ice_vsi_set_inner_port_vlan(vsi, port_vlan_info);
}
+int ice_vsi_clear_inner_port_vlan(struct ice_vsi *vsi)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_aqc_vsi_props *info;
+ struct ice_vsi_ctx *ctxt;
+ int ret;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ice_restore_vlan_info(&vsi->info, &vsi->vlan_info);
+ vsi->info.port_based_inner_vlan = 0;
+ ctxt->info = vsi->info;
+ info = &ctxt->info;
+ info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
+
+ ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (ret)
+ dev_err(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %s\n",
+ ret, ice_aq_str(hw->adminq.sq_last_status));
+
+ kfree(ctxt);
+ return ret;
+}
+
/**
* ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
* @vsi: VSI to enable or disable VLAN pruning on
@@ -647,6 +693,7 @@ __ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, u16 vlan_info, u16 tpid)
if (!ctxt)
return -ENOMEM;
+ ice_save_vlan_info(&vsi->info, &vsi->vlan_info);
ctxt->info = vsi->info;
ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
@@ -689,9 +736,6 @@ __ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, u16 vlan_info, u16 tpid)
* used if DVM is supported. Also, this function should never be called directly
* as it should be part of ice_vsi_vlan_ops if it's needed.
*
- * This function does not support clearing the port VLAN as there is currently
- * no use case for this.
- *
* Use the ice_vlan structure passed in to set this VSI in a port VLAN.
*/
int ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
@@ -705,3 +749,37 @@ int ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
return __ice_vsi_set_outer_port_vlan(vsi, port_vlan_info, vlan->tpid);
}
+
+/**
+ * ice_vsi_clear_outer_port_vlan - clear outer port vlan
+ * @vsi: VSI to configure
+ *
+ * The function is restoring previously set vlan config (saved in
+ * vsi->vlan_info). Setting happens in port vlan configuration.
+ */
+int ice_vsi_clear_outer_port_vlan(struct ice_vsi *vsi)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctxt;
+ int err;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ice_restore_vlan_info(&vsi->info, &vsi->vlan_info);
+ vsi->info.port_based_outer_vlan = 0;
+ ctxt->info = vsi->info;
+
+ ctxt->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
+
+ err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (err)
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for clearing outer port based VLAN failed, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+
+ kfree(ctxt);
+ return err;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h
index f459909490ec..f0d84d11bd5b 100644
--- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h
@@ -7,6 +7,12 @@
#include <linux/types.h>
#include "ice_vlan.h"
+struct ice_vsi_vlan_info {
+ u8 sw_flags2;
+ u8 inner_vlan_flags;
+ u8 outer_vlan_flags;
+};
+
struct ice_vsi;
int ice_vsi_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
@@ -17,6 +23,7 @@ int ice_vsi_dis_inner_stripping(struct ice_vsi *vsi);
int ice_vsi_ena_inner_insertion(struct ice_vsi *vsi, u16 tpid);
int ice_vsi_dis_inner_insertion(struct ice_vsi *vsi);
int ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
+int ice_vsi_clear_inner_port_vlan(struct ice_vsi *vsi);
int ice_vsi_ena_rx_vlan_filtering(struct ice_vsi *vsi);
int ice_vsi_dis_rx_vlan_filtering(struct ice_vsi *vsi);
@@ -28,5 +35,6 @@ int ice_vsi_dis_outer_stripping(struct ice_vsi *vsi);
int ice_vsi_ena_outer_insertion(struct ice_vsi *vsi, u16 tpid);
int ice_vsi_dis_outer_insertion(struct ice_vsi *vsi);
int ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
+int ice_vsi_clear_outer_port_vlan(struct ice_vsi *vsi);
#endif /* _ICE_VSI_VLAN_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.h b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.h
index 5b47568f6256..b2d2330dedcb 100644
--- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.h
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.h
@@ -21,6 +21,7 @@ struct ice_vsi_vlan_ops {
int (*ena_tx_filtering)(struct ice_vsi *vsi);
int (*dis_tx_filtering)(struct ice_vsi *vsi);
int (*set_port_vlan)(struct ice_vsi *vsi, struct ice_vlan *vlan);
+ int (*clear_port_vlan)(struct ice_vsi *vsi);
};
void ice_vsi_init_vlan_ops(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index a7fe2b4ce655..2a3f0834e139 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -546,19 +546,6 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
}
/**
- * ice_bump_ntc - Bump the next_to_clean counter of an Rx ring
- * @rx_ring: Rx ring
- */
-static void ice_bump_ntc(struct ice_rx_ring *rx_ring)
-{
- int ntc = rx_ring->next_to_clean + 1;
-
- ntc = (ntc < rx_ring->count) ? ntc : 0;
- rx_ring->next_to_clean = ntc;
- prefetch(ICE_RX_DESC(rx_ring, ntc));
-}
-
-/**
* ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
* @rx_ring: Rx ring
* @xdp: Pointer to XDP buffer
@@ -572,8 +559,14 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
{
unsigned int totalsize = xdp->data_end - xdp->data_meta;
unsigned int metasize = xdp->data - xdp->data_meta;
+ struct skb_shared_info *sinfo = NULL;
struct sk_buff *skb;
+ u32 nr_frags = 0;
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ nr_frags = sinfo->nr_frags;
+ }
net_prefetch(xdp->data_meta);
skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
@@ -589,6 +582,29 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
__skb_pull(skb, metasize);
}
+ if (likely(!xdp_buff_has_frags(xdp)))
+ goto out;
+
+ for (int i = 0; i < nr_frags; i++) {
+ struct skb_shared_info *skinfo = skb_shinfo(skb);
+ skb_frag_t *frag = &sinfo->frags[i];
+ struct page *page;
+ void *addr;
+
+ page = dev_alloc_page();
+ if (!page) {
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+ addr = page_to_virt(page);
+
+ memcpy(addr, skb_frag_page(frag), skb_frag_size(frag));
+
+ __skb_fill_page_desc_noacc(skinfo, skinfo->nr_frags++,
+ addr, 0, skb_frag_size(frag));
+ }
+
+out:
xsk_buff_free(xdp);
return skb;
}
@@ -597,7 +613,7 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
* ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ
* @xdp_ring: XDP Tx ring
*/
-static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
+static u32 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
{
u16 ntc = xdp_ring->next_to_clean;
struct ice_tx_desc *tx_desc;
@@ -619,7 +635,7 @@ static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
}
if (!completed_frames)
- return;
+ return 0;
if (likely(!xdp_ring->xdp_tx_active)) {
xsk_frames = completed_frames;
@@ -649,6 +665,8 @@ skip:
xdp_ring->next_to_clean -= cnt;
if (xsk_frames)
xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
+
+ return completed_frames;
}
/**
@@ -666,37 +684,72 @@ skip:
static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
struct ice_tx_ring *xdp_ring)
{
+ struct skb_shared_info *sinfo = NULL;
u32 size = xdp->data_end - xdp->data;
u32 ntu = xdp_ring->next_to_use;
struct ice_tx_desc *tx_desc;
struct ice_tx_buf *tx_buf;
- dma_addr_t dma;
-
- if (ICE_DESC_UNUSED(xdp_ring) < ICE_RING_QUARTER(xdp_ring)) {
- ice_clean_xdp_irq_zc(xdp_ring);
- if (!ICE_DESC_UNUSED(xdp_ring)) {
- xdp_ring->ring_stats->tx_stats.tx_busy++;
- return ICE_XDP_CONSUMED;
- }
+ struct xdp_buff *head;
+ u32 nr_frags = 0;
+ u32 free_space;
+ u32 frag = 0;
+
+ free_space = ICE_DESC_UNUSED(xdp_ring);
+ if (free_space < ICE_RING_QUARTER(xdp_ring))
+ free_space += ice_clean_xdp_irq_zc(xdp_ring);
+
+ if (unlikely(!free_space))
+ goto busy;
+
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ nr_frags = sinfo->nr_frags;
+ if (free_space < nr_frags + 1)
+ goto busy;
}
- dma = xsk_buff_xdp_get_dma(xdp);
- xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, size);
-
- tx_buf = &xdp_ring->tx_buf[ntu];
- tx_buf->xdp = xdp;
- tx_buf->type = ICE_TX_BUF_XSK_TX;
tx_desc = ICE_TX_DESC(xdp_ring, ntu);
- tx_desc->buf_addr = cpu_to_le64(dma);
- tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP,
- 0, size, 0);
- xdp_ring->xdp_tx_active++;
+ tx_buf = &xdp_ring->tx_buf[ntu];
+ head = xdp;
+
+ for (;;) {
+ dma_addr_t dma;
+
+ dma = xsk_buff_xdp_get_dma(xdp);
+ xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, size);
+
+ tx_buf->xdp = xdp;
+ tx_buf->type = ICE_TX_BUF_XSK_TX;
+ tx_desc->buf_addr = cpu_to_le64(dma);
+ tx_desc->cmd_type_offset_bsz = ice_build_ctob(0, 0, size, 0);
+ /* account for each xdp_buff from xsk_buff_pool */
+ xdp_ring->xdp_tx_active++;
+
+ if (++ntu == xdp_ring->count)
+ ntu = 0;
+
+ if (frag == nr_frags)
+ break;
+
+ tx_desc = ICE_TX_DESC(xdp_ring, ntu);
+ tx_buf = &xdp_ring->tx_buf[ntu];
+
+ xdp = xsk_buff_get_frag(head);
+ size = skb_frag_size(&sinfo->frags[frag]);
+ frag++;
+ }
- if (++ntu == xdp_ring->count)
- ntu = 0;
xdp_ring->next_to_use = ntu;
+ /* update last descriptor from a frame with EOP */
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64(ICE_TX_DESC_CMD_EOP << ICE_TXD_QW1_CMD_S);
return ICE_XDP_TX;
+
+busy:
+ xdp_ring->ring_stats->tx_stats.tx_busy++;
+
+ return ICE_XDP_CONSUMED;
}
/**
@@ -752,6 +805,34 @@ out_failure:
return result;
}
+static int
+ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first,
+ struct xdp_buff *xdp, const unsigned int size)
+{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first);
+
+ if (!size)
+ return 0;
+
+ if (!xdp_buff_has_frags(first)) {
+ sinfo->nr_frags = 0;
+ sinfo->xdp_frags_size = 0;
+ xdp_buff_set_frags_flag(first);
+ }
+
+ if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
+ xsk_buff_free(first);
+ return -ENOMEM;
+ }
+
+ __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
+ virt_to_page(xdp->data_hard_start), 0, size);
+ sinfo->xdp_frags_size += size;
+ xsk_buff_add_frag(xdp);
+
+ return 0;
+}
+
/**
* ice_clean_rx_irq_zc - consumes packets from the hardware ring
* @rx_ring: AF_XDP Rx ring
@@ -762,9 +843,14 @@ out_failure:
int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ struct xsk_buff_pool *xsk_pool = rx_ring->xsk_pool;
+ u32 ntc = rx_ring->next_to_clean;
+ u32 ntu = rx_ring->next_to_use;
+ struct xdp_buff *first = NULL;
struct ice_tx_ring *xdp_ring;
unsigned int xdp_xmit = 0;
struct bpf_prog *xdp_prog;
+ u32 cnt = rx_ring->count;
bool failure = false;
int entries_to_alloc;
@@ -774,6 +860,9 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
xdp_ring = rx_ring->xdp_ring;
+ if (ntc != rx_ring->first_desc)
+ first = *ice_xdp_buf(rx_ring, rx_ring->first_desc);
+
while (likely(total_rx_packets < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
unsigned int size, xdp_res = 0;
@@ -783,7 +872,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
u16 vlan_tag = 0;
u16 rx_ptype;
- rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
+ rx_desc = ICE_RX_DESC(rx_ring, ntc);
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
@@ -795,51 +884,61 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
*/
dma_rmb();
- if (unlikely(rx_ring->next_to_clean == rx_ring->next_to_use))
+ if (unlikely(ntc == ntu))
break;
- xdp = *ice_xdp_buf(rx_ring, rx_ring->next_to_clean);
+ xdp = *ice_xdp_buf(rx_ring, ntc);
size = le16_to_cpu(rx_desc->wb.pkt_len) &
ICE_RX_FLX_DESC_PKT_LEN_M;
- if (!size) {
- xdp->data = NULL;
- xdp->data_end = NULL;
- xdp->data_hard_start = NULL;
- xdp->data_meta = NULL;
- goto construct_skb;
- }
xsk_buff_set_size(xdp, size);
- xsk_buff_dma_sync_for_cpu(xdp, rx_ring->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(xdp, xsk_pool);
+
+ if (!first) {
+ first = xdp;
+ xdp_buff_clear_frags_flag(first);
+ } else if (ice_add_xsk_frag(rx_ring, first, xdp, size)) {
+ break;
+ }
+
+ if (++ntc == cnt)
+ ntc = 0;
- xdp_res = ice_run_xdp_zc(rx_ring, xdp, xdp_prog, xdp_ring);
+ if (ice_is_non_eop(rx_ring, rx_desc))
+ continue;
+
+ xdp_res = ice_run_xdp_zc(rx_ring, first, xdp_prog, xdp_ring);
if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) {
xdp_xmit |= xdp_res;
} else if (xdp_res == ICE_XDP_EXIT) {
failure = true;
+ first = NULL;
+ rx_ring->first_desc = ntc;
break;
} else if (xdp_res == ICE_XDP_CONSUMED) {
- xsk_buff_free(xdp);
+ xsk_buff_free(first);
} else if (xdp_res == ICE_XDP_PASS) {
goto construct_skb;
}
- total_rx_bytes += size;
+ total_rx_bytes += xdp_get_buff_len(first);
total_rx_packets++;
- ice_bump_ntc(rx_ring);
+ first = NULL;
+ rx_ring->first_desc = ntc;
continue;
construct_skb:
/* XDP_PASS path */
- skb = ice_construct_skb_zc(rx_ring, xdp);
+ skb = ice_construct_skb_zc(rx_ring, first);
if (!skb) {
rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
break;
}
- ice_bump_ntc(rx_ring);
+ first = NULL;
+ rx_ring->first_desc = ntc;
if (eth_skb_pad(skb)) {
skb = NULL;
@@ -858,18 +957,22 @@ construct_skb:
ice_receive_skb(rx_ring, skb, vlan_tag);
}
- entries_to_alloc = ICE_DESC_UNUSED(rx_ring);
+ rx_ring->next_to_clean = ntc;
+ entries_to_alloc = ICE_RX_DESC_UNUSED(rx_ring);
if (entries_to_alloc > ICE_RING_QUARTER(rx_ring))
failure |= !ice_alloc_rx_bufs_zc(rx_ring, entries_to_alloc);
ice_finalize_xdp_rx(xdp_ring, xdp_xmit, 0);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
- if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
- if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
- xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
+ if (xsk_uses_need_wakeup(xsk_pool)) {
+ /* ntu could have changed when allocating entries above, so
+ * use rx_ring value instead of stack based one
+ */
+ if (failure || ntc == rx_ring->next_to_use)
+ xsk_set_rx_need_wakeup(xsk_pool);
else
- xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
+ xsk_clear_rx_need_wakeup(xsk_pool);
return (int)total_rx_packets;
}
@@ -894,7 +997,7 @@ static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc,
tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use++);
tx_desc->buf_addr = cpu_to_le64(dma);
- tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP,
+ tx_desc->cmd_type_offset_bsz = ice_build_ctob(xsk_is_eop_desc(desc),
0, desc->len, 0);
*total_bytes += desc->len;
@@ -921,7 +1024,7 @@ static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *de
tx_desc = ICE_TX_DESC(xdp_ring, ntu++);
tx_desc->buf_addr = cpu_to_le64(dma);
- tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP,
+ tx_desc->cmd_type_offset_bsz = ice_build_ctob(xsk_is_eop_desc(&descs[i]),
0, descs[i].len, 0);
*total_bytes += descs[i].len;
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index bdeb36790d77..2a10254edbbd 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -6143,6 +6143,26 @@ static int igc_tsn_clear_schedule(struct igc_adapter *adapter)
return 0;
}
+static void igc_taprio_stats(struct net_device *dev,
+ struct tc_taprio_qopt_stats *stats)
+{
+ /* When Strict_End is enabled, the tx_overruns counter
+ * will always be zero.
+ */
+ stats->tx_overruns = 0;
+}
+
+static void igc_taprio_queue_stats(struct net_device *dev,
+ struct tc_taprio_qopt_queue_stats *queue_stats)
+{
+ struct tc_taprio_qopt_stats *stats = &queue_stats->stats;
+
+ /* When Strict_End is enabled, the tx_overruns counter
+ * will always be zero.
+ */
+ stats->tx_overruns = 0;
+}
+
static int igc_save_qbv_schedule(struct igc_adapter *adapter,
struct tc_taprio_qopt_offload *qopt)
{
@@ -6153,11 +6173,20 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
size_t n;
int i;
- if (qopt->cmd == TAPRIO_CMD_DESTROY)
+ switch (qopt->cmd) {
+ case TAPRIO_CMD_REPLACE:
+ break;
+ case TAPRIO_CMD_DESTROY:
return igc_tsn_clear_schedule(adapter);
-
- if (qopt->cmd != TAPRIO_CMD_REPLACE)
+ case TAPRIO_CMD_STATS:
+ igc_taprio_stats(adapter->netdev, &qopt->stats);
+ return 0;
+ case TAPRIO_CMD_QUEUE_STATS:
+ igc_taprio_queue_stats(adapter->netdev, &qopt->queue_stats);
+ return 0;
+ default:
return -EOPNOTSUPP;
+ }
if (qopt->base_time < 0)
return -ERANGE;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
index 75e83ea2a926..0f9bc4f8ec3b 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
@@ -593,8 +593,6 @@ static int mvpp2_dbgfs_c2_entry_init(struct dentry *parent,
sprintf(c2_entry_name, "%03d", id);
c2_entry_dir = debugfs_create_dir(c2_entry_name, parent);
- if (!c2_entry_dir)
- return -ENOMEM;
entry = &priv->dbgfs_entries->c2_entries[id];
@@ -626,8 +624,6 @@ static int mvpp2_dbgfs_flow_tbl_entry_init(struct dentry *parent,
sprintf(flow_tbl_entry_name, "%03d", id);
flow_tbl_entry_dir = debugfs_create_dir(flow_tbl_entry_name, parent);
- if (!flow_tbl_entry_dir)
- return -ENOMEM;
entry = &priv->dbgfs_entries->flt_entries[id];
@@ -646,12 +642,8 @@ static int mvpp2_dbgfs_cls_init(struct dentry *parent, struct mvpp2 *priv)
int i, ret;
cls_dir = debugfs_create_dir("classifier", parent);
- if (!cls_dir)
- return -ENOMEM;
c2_dir = debugfs_create_dir("c2", cls_dir);
- if (!c2_dir)
- return -ENOMEM;
for (i = 0; i < MVPP22_CLS_C2_N_ENTRIES; i++) {
ret = mvpp2_dbgfs_c2_entry_init(c2_dir, priv, i);
@@ -660,8 +652,6 @@ static int mvpp2_dbgfs_cls_init(struct dentry *parent, struct mvpp2 *priv)
}
flow_tbl_dir = debugfs_create_dir("flow_table", cls_dir);
- if (!flow_tbl_dir)
- return -ENOMEM;
for (i = 0; i < MVPP2_CLS_FLOWS_TBL_SIZE; i++) {
ret = mvpp2_dbgfs_flow_tbl_entry_init(flow_tbl_dir, priv, i);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index eba307eee2b2..ed66c5989102 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -235,7 +235,7 @@ M(NPC_GET_KEX_CFG, 0x600c, npc_get_kex_cfg, \
M(NPC_INSTALL_FLOW, 0x600d, npc_install_flow, \
npc_install_flow_req, npc_install_flow_rsp) \
M(NPC_DELETE_FLOW, 0x600e, npc_delete_flow, \
- npc_delete_flow_req, msg_rsp) \
+ npc_delete_flow_req, npc_delete_flow_rsp) \
M(NPC_MCAM_READ_ENTRY, 0x600f, npc_mcam_read_entry, \
npc_mcam_read_entry_req, \
npc_mcam_read_entry_rsp) \
@@ -1491,6 +1491,8 @@ struct npc_install_flow_req {
u8 vtag0_op;
u16 vtag1_def;
u8 vtag1_op;
+ /* old counter value */
+ u16 cntr_val;
};
struct npc_install_flow_rsp {
@@ -1506,6 +1508,11 @@ struct npc_delete_flow_req {
u8 all; /* PF + VFs */
};
+struct npc_delete_flow_rsp {
+ struct mbox_msghdr hdr;
+ u16 cntr_val;
+};
+
struct npc_mcam_read_entry_req {
struct mbox_msghdr hdr;
u16 entry; /* MCAM entry to read */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index 952319453701..9c365cc3e736 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -1192,7 +1192,7 @@ find_rule:
write_req.enable_entry = (u8)enable;
/* if counter is available then clear and use it */
if (req->set_cntr && rule->has_cntr) {
- rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(rule->cntr), 0x00);
+ rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(rule->cntr), req->cntr_val);
write_req.set_cntr = 1;
write_req.cntr = rule->cntr;
}
@@ -1407,12 +1407,13 @@ static int npc_delete_flow(struct rvu *rvu, struct rvu_npc_mcam_rule *rule,
int rvu_mbox_handler_npc_delete_flow(struct rvu *rvu,
struct npc_delete_flow_req *req,
- struct msg_rsp *rsp)
+ struct npc_delete_flow_rsp *rsp)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
struct rvu_npc_mcam_rule *iter, *tmp;
u16 pcifunc = req->hdr.pcifunc;
struct list_head del_list;
+ int blkaddr;
INIT_LIST_HEAD(&del_list);
@@ -1428,6 +1429,10 @@ int rvu_mbox_handler_npc_delete_flow(struct rvu *rvu,
list_move_tail(&iter->list, &del_list);
/* single rule */
} else if (req->entry == iter->entry) {
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr)
+ rsp->cntr_val = rvu_read64(rvu, blkaddr,
+ NPC_AF_MATCH_STATX(iter->cntr));
list_move_tail(&iter->list, &del_list);
break;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
index 592b317f4637..854045ed3b06 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
@@ -158,6 +158,7 @@ void rvu_switch_enable(struct rvu *rvu)
struct npc_mcam_alloc_entry_req alloc_req = { 0 };
struct npc_mcam_alloc_entry_rsp alloc_rsp = { 0 };
struct npc_delete_flow_req uninstall_req = { 0 };
+ struct npc_delete_flow_rsp uninstall_rsp = { 0 };
struct npc_mcam_free_entry_req free_req = { 0 };
struct rvu_switch *rswitch = &rvu->rswitch;
struct msg_rsp rsp;
@@ -197,7 +198,7 @@ void rvu_switch_enable(struct rvu *rvu)
uninstall_rules:
uninstall_req.start = rswitch->start_entry;
uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1;
- rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp);
+ rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp);
kfree(rswitch->entry2pcifunc);
free_entries:
free_req.all = 1;
@@ -209,6 +210,7 @@ exit:
void rvu_switch_disable(struct rvu *rvu)
{
struct npc_delete_flow_req uninstall_req = { 0 };
+ struct npc_delete_flow_rsp uninstall_rsp = { 0 };
struct npc_mcam_free_entry_req free_req = { 0 };
struct rvu_switch *rswitch = &rvu->rswitch;
struct rvu_hwinfo *hw = rvu->hw;
@@ -250,7 +252,7 @@ void rvu_switch_disable(struct rvu *rvu)
uninstall_req.start = rswitch->start_entry;
uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1;
free_req.all = 1;
- rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp);
+ rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp);
rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp);
rswitch->used_entries = 0;
kfree(rswitch->entry2pcifunc);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 77c8f650f7ac..8cdd92dd9762 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -774,6 +774,7 @@ int otx2_txsch_alloc(struct otx2_nic *pfvf)
rsp->schq_list[lvl][schq];
pfvf->hw.txschq_link_cfg_lvl = rsp->link_cfg_lvl;
+ pfvf->hw.txschq_aggr_lvl_rr_prio = rsp->aggr_lvl_rr_prio;
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index ba8091131ec0..25e99fd2e3fd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -224,6 +224,7 @@ struct otx2_hw {
/* NIX */
u8 txschq_link_cfg_lvl;
+ u8 txschq_aggr_lvl_rr_prio;
u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
u16 matchall_ipolicer;
u32 dwrr_mtu;
@@ -360,13 +361,8 @@ struct otx2_flow_config {
struct list_head flow_list;
u32 dmacflt_max_flows;
u16 max_flows;
-};
-
-struct otx2_tc_info {
- /* hash table to store TC offloaded flows */
- struct rhashtable flow_table;
- struct rhashtable_params flow_ht_params;
- unsigned long *tc_entries_bitmap;
+ struct list_head flow_list_tc;
+ bool ntuple;
};
struct dev_hw_ops {
@@ -491,7 +487,6 @@ struct otx2_nic {
/* NPC MCAM */
struct otx2_flow_config *flow_cfg;
struct otx2_mac_table *mac_table;
- struct otx2_tc_info tc_info;
u64 reset_count;
struct work_struct reset_task;
@@ -1063,7 +1058,6 @@ int otx2_init_tc(struct otx2_nic *nic);
void otx2_shutdown_tc(struct otx2_nic *nic);
int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data);
-int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic);
/* CGX/RPM DMAC filters support */
int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf);
int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
index 63ef7c41d18d..4e1130496573 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
@@ -41,7 +41,6 @@ static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id,
return 0;
otx2_alloc_mcam_entries(pfvf, ctx->val.vu16);
- otx2_tc_alloc_ent_bitmap(pfvf);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index c47d91da32dc..9efcec549834 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -764,6 +764,7 @@ static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
struct otx2_nic *pfvf = netdev_priv(dev);
int ret = -EOPNOTSUPP;
+ pfvf->flow_cfg->ntuple = ntuple;
switch (nfc->cmd) {
case ETHTOOL_SRXFH:
ret = otx2_set_rss_hash_opts(pfvf, nfc);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index 2d7713a1a153..4762dbea64a1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -276,6 +276,7 @@ int otx2vf_mcam_flow_init(struct otx2_nic *pfvf)
flow_cfg = pfvf->flow_cfg;
INIT_LIST_HEAD(&flow_cfg->flow_list);
+ INIT_LIST_HEAD(&flow_cfg->flow_list_tc);
flow_cfg->max_flows = 0;
return 0;
@@ -298,6 +299,7 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
return -ENOMEM;
INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
+ INIT_LIST_HEAD(&pf->flow_cfg->flow_list_tc);
/* Allocate bare minimum number of MCAM entries needed for
* unicast and ntuple filters.
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 5e56b6c3e60a..1e6fc23eca4f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -34,9 +34,8 @@ struct otx2_tc_flow_stats {
};
struct otx2_tc_flow {
- struct rhash_head node;
+ struct list_head list;
unsigned long cookie;
- unsigned int bitpos;
struct rcu_head rcu;
struct otx2_tc_flow_stats stats;
spinlock_t lock; /* lock for stats */
@@ -44,31 +43,10 @@ struct otx2_tc_flow {
u16 entry;
u16 leaf_profile;
bool is_act_police;
+ u32 prio;
+ struct npc_install_flow_req req;
};
-int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic)
-{
- struct otx2_tc_info *tc = &nic->tc_info;
-
- if (!nic->flow_cfg->max_flows)
- return 0;
-
- /* Max flows changed, free the existing bitmap */
- kfree(tc->tc_entries_bitmap);
-
- tc->tc_entries_bitmap =
- kcalloc(BITS_TO_LONGS(nic->flow_cfg->max_flows),
- sizeof(long), GFP_KERNEL);
- if (!tc->tc_entries_bitmap) {
- netdev_err(nic->netdev,
- "Unable to alloc TC flow entries bitmap\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(otx2_tc_alloc_ent_bitmap);
-
static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst,
u32 *burst_exp, u32 *burst_mantissa)
{
@@ -707,8 +685,117 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
return otx2_tc_parse_actions(nic, &rule->action, req, f, node);
}
-static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry)
+static void otx2_destroy_tc_flow_list(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct otx2_tc_flow *iter, *tmp;
+
+ if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
+ return;
+
+ list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list_tc, list) {
+ list_del(&iter->list);
+ kfree(iter);
+ flow_cfg->nr_flows--;
+ }
+}
+
+static struct otx2_tc_flow *otx2_tc_get_entry_by_cookie(struct otx2_flow_config *flow_cfg,
+ unsigned long cookie)
+{
+ struct otx2_tc_flow *tmp;
+
+ list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) {
+ if (tmp->cookie == cookie)
+ return tmp;
+ }
+
+ return NULL;
+}
+
+static struct otx2_tc_flow *otx2_tc_get_entry_by_index(struct otx2_flow_config *flow_cfg,
+ int index)
{
+ struct otx2_tc_flow *tmp;
+ int i = 0;
+
+ list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) {
+ if (i == index)
+ return tmp;
+ i++;
+ }
+
+ return NULL;
+}
+
+static void otx2_tc_del_from_flow_list(struct otx2_flow_config *flow_cfg,
+ struct otx2_tc_flow *node)
+{
+ struct list_head *pos, *n;
+ struct otx2_tc_flow *tmp;
+
+ list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
+ tmp = list_entry(pos, struct otx2_tc_flow, list);
+ if (node == tmp) {
+ list_del(&node->list);
+ return;
+ }
+ }
+}
+
+static int otx2_tc_add_to_flow_list(struct otx2_flow_config *flow_cfg,
+ struct otx2_tc_flow *node)
+{
+ struct list_head *pos, *n;
+ struct otx2_tc_flow *tmp;
+ int index = 0;
+
+ /* If the flow list is empty then add the new node */
+ if (list_empty(&flow_cfg->flow_list_tc)) {
+ list_add(&node->list, &flow_cfg->flow_list_tc);
+ return index;
+ }
+
+ list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
+ tmp = list_entry(pos, struct otx2_tc_flow, list);
+ if (node->prio < tmp->prio)
+ break;
+ index++;
+ }
+
+ list_add(&node->list, pos->prev);
+ return index;
+}
+
+static int otx2_add_mcam_flow_entry(struct otx2_nic *nic, struct npc_install_flow_req *req)
+{
+ struct npc_install_flow_req *tmp_req;
+ int err;
+
+ mutex_lock(&nic->mbox.lock);
+ tmp_req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
+ if (!tmp_req) {
+ mutex_unlock(&nic->mbox.lock);
+ return -ENOMEM;
+ }
+
+ memcpy(tmp_req, req, sizeof(struct npc_install_flow_req));
+ /* Send message to AF */
+ err = otx2_sync_mbox_msg(&nic->mbox);
+ if (err) {
+ netdev_err(nic->netdev, "Failed to install MCAM flow entry %d\n",
+ req->entry);
+ mutex_unlock(&nic->mbox.lock);
+ return -EFAULT;
+ }
+
+ mutex_unlock(&nic->mbox.lock);
+ return 0;
+}
+
+static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry, u16 *cntr_val)
+{
+ struct npc_delete_flow_rsp *rsp;
struct npc_delete_flow_req *req;
int err;
@@ -729,22 +816,113 @@ static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry)
mutex_unlock(&nic->mbox.lock);
return -EFAULT;
}
+
+ if (cntr_val) {
+ rsp = (struct npc_delete_flow_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ netdev_err(nic->netdev, "Failed to get MCAM delete response for entry %d\n",
+ entry);
+ mutex_unlock(&nic->mbox.lock);
+ return -EFAULT;
+ }
+
+ *cntr_val = rsp->cntr_val;
+ }
+
mutex_unlock(&nic->mbox.lock);
+ return 0;
+}
+
+static int otx2_tc_update_mcam_table_del_req(struct otx2_nic *nic,
+ struct otx2_flow_config *flow_cfg,
+ struct otx2_tc_flow *node)
+{
+ struct list_head *pos, *n;
+ struct otx2_tc_flow *tmp;
+ int i = 0, index = 0;
+ u16 cntr_val;
+
+ /* Find and delete the entry from the list and re-install
+ * all the entries from beginning to the index of the
+ * deleted entry to higher mcam indexes.
+ */
+ list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
+ tmp = list_entry(pos, struct otx2_tc_flow, list);
+ if (node == tmp) {
+ list_del(&tmp->list);
+ break;
+ }
+
+ otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val);
+ tmp->entry++;
+ tmp->req.entry = tmp->entry;
+ tmp->req.cntr_val = cntr_val;
+ index++;
+ }
+
+ list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
+ if (i == index)
+ break;
+
+ tmp = list_entry(pos, struct otx2_tc_flow, list);
+ otx2_add_mcam_flow_entry(nic, &tmp->req);
+ i++;
+ }
return 0;
}
+static int otx2_tc_update_mcam_table_add_req(struct otx2_nic *nic,
+ struct otx2_flow_config *flow_cfg,
+ struct otx2_tc_flow *node)
+{
+ int mcam_idx = flow_cfg->max_flows - flow_cfg->nr_flows - 1;
+ struct otx2_tc_flow *tmp;
+ int list_idx, i;
+ u16 cntr_val;
+
+ /* Find the index of the entry(list_idx) whose priority
+ * is greater than the new entry and re-install all
+ * the entries from beginning to list_idx to higher
+ * mcam indexes.
+ */
+ list_idx = otx2_tc_add_to_flow_list(flow_cfg, node);
+ for (i = 0; i < list_idx; i++) {
+ tmp = otx2_tc_get_entry_by_index(flow_cfg, i);
+ if (!tmp)
+ return -ENOMEM;
+
+ otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val);
+ tmp->entry = flow_cfg->flow_ent[mcam_idx];
+ tmp->req.entry = tmp->entry;
+ tmp->req.cntr_val = cntr_val;
+ otx2_add_mcam_flow_entry(nic, &tmp->req);
+ mcam_idx++;
+ }
+
+ return mcam_idx;
+}
+
+static int otx2_tc_update_mcam_table(struct otx2_nic *nic,
+ struct otx2_flow_config *flow_cfg,
+ struct otx2_tc_flow *node,
+ bool add_req)
+{
+ if (add_req)
+ return otx2_tc_update_mcam_table_add_req(nic, flow_cfg, node);
+
+ return otx2_tc_update_mcam_table_del_req(nic, flow_cfg, node);
+}
+
static int otx2_tc_del_flow(struct otx2_nic *nic,
struct flow_cls_offload *tc_flow_cmd)
{
struct otx2_flow_config *flow_cfg = nic->flow_cfg;
- struct otx2_tc_info *tc_info = &nic->tc_info;
struct otx2_tc_flow *flow_node;
int err;
- flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
- &tc_flow_cmd->cookie,
- tc_info->flow_ht_params);
+ flow_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie);
if (!flow_node) {
netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n",
tc_flow_cmd->cookie);
@@ -772,16 +950,10 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
mutex_unlock(&nic->mbox.lock);
}
- otx2_del_mcam_flow_entry(nic, flow_node->entry);
-
- WARN_ON(rhashtable_remove_fast(&nic->tc_info.flow_table,
- &flow_node->node,
- nic->tc_info.flow_ht_params));
+ otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL);
+ otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false);
kfree_rcu(flow_node, rcu);
-
- clear_bit(flow_node->bitpos, tc_info->tc_entries_bitmap);
flow_cfg->nr_flows--;
-
return 0;
}
@@ -790,15 +962,14 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
{
struct netlink_ext_ack *extack = tc_flow_cmd->common.extack;
struct otx2_flow_config *flow_cfg = nic->flow_cfg;
- struct otx2_tc_info *tc_info = &nic->tc_info;
struct otx2_tc_flow *new_node, *old_node;
struct npc_install_flow_req *req, dummy;
- int rc, err;
+ int rc, err, mcam_idx;
if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
return -ENOMEM;
- if (bitmap_full(tc_info->tc_entries_bitmap, flow_cfg->max_flows)) {
+ if (flow_cfg->nr_flows == flow_cfg->max_flows) {
NL_SET_ERR_MSG_MOD(extack,
"Free MCAM entry not available to add the flow");
return -ENOMEM;
@@ -810,6 +981,7 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
return -ENOMEM;
spin_lock_init(&new_node->lock);
new_node->cookie = tc_flow_cmd->cookie;
+ new_node->prio = tc_flow_cmd->common.prio;
memset(&dummy, 0, sizeof(struct npc_install_flow_req));
@@ -820,12 +992,11 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
}
/* If a flow exists with the same cookie, delete it */
- old_node = rhashtable_lookup_fast(&tc_info->flow_table,
- &tc_flow_cmd->cookie,
- tc_info->flow_ht_params);
+ old_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie);
if (old_node)
otx2_tc_del_flow(nic, tc_flow_cmd);
+ mcam_idx = otx2_tc_update_mcam_table(nic, flow_cfg, new_node, true);
mutex_lock(&nic->mbox.lock);
req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
if (!req) {
@@ -836,11 +1007,8 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr));
memcpy(req, &dummy, sizeof(struct npc_install_flow_req));
-
- new_node->bitpos = find_first_zero_bit(tc_info->tc_entries_bitmap,
- flow_cfg->max_flows);
req->channel = nic->hw.rx_chan_base;
- req->entry = flow_cfg->flow_ent[flow_cfg->max_flows - new_node->bitpos - 1];
+ req->entry = flow_cfg->flow_ent[mcam_idx];
req->intf = NIX_INTF_RX;
req->set_cntr = 1;
new_node->entry = req->entry;
@@ -850,26 +1018,18 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
if (rc) {
NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry");
mutex_unlock(&nic->mbox.lock);
- kfree_rcu(new_node, rcu);
goto free_leaf;
}
- mutex_unlock(&nic->mbox.lock);
- /* add new flow to flow-table */
- rc = rhashtable_insert_fast(&nic->tc_info.flow_table, &new_node->node,
- nic->tc_info.flow_ht_params);
- if (rc) {
- otx2_del_mcam_flow_entry(nic, req->entry);
- kfree_rcu(new_node, rcu);
- goto free_leaf;
- }
+ mutex_unlock(&nic->mbox.lock);
+ memcpy(&new_node->req, req, sizeof(struct npc_install_flow_req));
- set_bit(new_node->bitpos, tc_info->tc_entries_bitmap);
flow_cfg->nr_flows++;
-
return 0;
free_leaf:
+ otx2_tc_del_from_flow_list(flow_cfg, new_node);
+ kfree_rcu(new_node, rcu);
if (new_node->is_act_police) {
mutex_lock(&nic->mbox.lock);
@@ -896,16 +1056,13 @@ free_leaf:
static int otx2_tc_get_flow_stats(struct otx2_nic *nic,
struct flow_cls_offload *tc_flow_cmd)
{
- struct otx2_tc_info *tc_info = &nic->tc_info;
struct npc_mcam_get_stats_req *req;
struct npc_mcam_get_stats_rsp *rsp;
struct otx2_tc_flow_stats *stats;
struct otx2_tc_flow *flow_node;
int err;
- flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
- &tc_flow_cmd->cookie,
- tc_info->flow_ht_params);
+ flow_node = otx2_tc_get_entry_by_cookie(nic->flow_cfg, tc_flow_cmd->cookie);
if (!flow_node) {
netdev_info(nic->netdev, "tc flow not found for cookie %lx",
tc_flow_cmd->cookie);
@@ -1053,12 +1210,20 @@ static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type,
void *type_data, void *cb_priv)
{
struct otx2_nic *nic = cb_priv;
+ bool ntuple;
if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data))
return -EOPNOTSUPP;
+ ntuple = nic->netdev->features & NETIF_F_NTUPLE;
switch (type) {
case TC_SETUP_CLSFLOWER:
+ if (ntuple) {
+ netdev_warn(nic->netdev,
+ "Can't install TC flower offload rule when NTUPLE is active");
+ return -EOPNOTSUPP;
+ }
+
return otx2_setup_tc_cls_flower(nic, type_data);
case TC_SETUP_CLSMATCHALL:
return otx2_setup_tc_ingress_matchall(nic, type_data);
@@ -1143,18 +1308,8 @@ int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
}
EXPORT_SYMBOL(otx2_setup_tc);
-static const struct rhashtable_params tc_flow_ht_params = {
- .head_offset = offsetof(struct otx2_tc_flow, node),
- .key_offset = offsetof(struct otx2_tc_flow, cookie),
- .key_len = sizeof(((struct otx2_tc_flow *)0)->cookie),
- .automatic_shrinking = true,
-};
-
int otx2_init_tc(struct otx2_nic *nic)
{
- struct otx2_tc_info *tc = &nic->tc_info;
- int err;
-
/* Exclude receive queue 0 being used for police action */
set_bit(0, &nic->rq_bmap);
@@ -1164,25 +1319,12 @@ int otx2_init_tc(struct otx2_nic *nic)
return -EINVAL;
}
- err = otx2_tc_alloc_ent_bitmap(nic);
- if (err)
- return err;
-
- tc->flow_ht_params = tc_flow_ht_params;
- err = rhashtable_init(&tc->flow_table, &tc->flow_ht_params);
- if (err) {
- kfree(tc->tc_entries_bitmap);
- tc->tc_entries_bitmap = NULL;
- }
- return err;
+ return 0;
}
EXPORT_SYMBOL(otx2_init_tc);
void otx2_shutdown_tc(struct otx2_nic *nic)
{
- struct otx2_tc_info *tc = &nic->tc_info;
-
- kfree(tc->tc_entries_bitmap);
- rhashtable_destroy(&tc->flow_table);
+ otx2_destroy_tc_flow_list(nic);
}
EXPORT_SYMBOL(otx2_shutdown_tc);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
index d3a76c5ccda8..1e77bbf5d22a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
@@ -19,6 +19,9 @@
#define OTX2_QOS_CLASS_NONE 0
#define OTX2_QOS_DEFAULT_PRIO 0xF
#define OTX2_QOS_INVALID_SQ 0xFFFF
+#define OTX2_QOS_INVALID_TXSCHQ_IDX 0xFFFF
+#define CN10K_MAX_RR_WEIGHT GENMASK_ULL(13, 0)
+#define OTX2_MAX_RR_QUANTUM GENMASK_ULL(23, 0)
static void otx2_qos_update_tx_netdev_queues(struct otx2_nic *pfvf)
{
@@ -65,11 +68,24 @@ static void otx2_qos_get_regaddr(struct otx2_qos_node *node,
}
}
+static int otx2_qos_quantum_to_dwrr_weight(struct otx2_nic *pfvf, u32 quantum)
+{
+ u32 weight;
+
+ weight = quantum / pfvf->hw.dwrr_mtu;
+ if (quantum % pfvf->hw.dwrr_mtu)
+ weight += 1;
+
+ return weight;
+}
+
static void otx2_config_sched_shaping(struct otx2_nic *pfvf,
struct otx2_qos_node *node,
struct nix_txschq_config *cfg,
int *num_regs)
{
+ u32 rr_weight;
+ u32 quantum;
u64 maxrate;
otx2_qos_get_regaddr(node, cfg, *num_regs);
@@ -86,8 +102,17 @@ static void otx2_config_sched_shaping(struct otx2_nic *pfvf,
return;
}
- /* configure priority */
- cfg->regval[*num_regs] = (node->schq - node->parent->prio_anchor) << 24;
+ /* configure priority/quantum */
+ if (node->is_static) {
+ cfg->regval[*num_regs] =
+ (node->schq - node->parent->prio_anchor) << 24;
+ } else {
+ quantum = node->quantum ?
+ node->quantum : pfvf->tx_max_pktlen;
+ rr_weight = otx2_qos_quantum_to_dwrr_weight(pfvf, quantum);
+ cfg->regval[*num_regs] = node->parent->child_dwrr_prio << 24 |
+ rr_weight;
+ }
(*num_regs)++;
/* configure PIR */
@@ -195,9 +220,8 @@ static int otx2_qos_txschq_set_parent_topology(struct otx2_nic *pfvf,
cfg->reg[0] = NIX_AF_TL1X_TOPOLOGY(parent->schq);
cfg->regval[0] = (u64)parent->prio_anchor << 32;
- if (parent->level == NIX_TXSCH_LVL_TL1)
- cfg->regval[0] |= (u64)TXSCH_TL1_DFLT_RR_PRIO << 1;
-
+ cfg->regval[0] |= ((parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO) ?
+ parent->child_dwrr_prio : 0) << 1;
cfg->num_regs++;
rc = otx2_sync_mbox_msg(&pfvf->mbox);
@@ -315,9 +339,14 @@ static void otx2_qos_fill_cfg_tl(struct otx2_qos_node *parent,
list_for_each_entry(node, &parent->child_list, list) {
otx2_qos_fill_cfg_tl(node, cfg);
- cfg->schq_contig[node->level]++;
otx2_qos_fill_cfg_schq(node, cfg);
}
+
+ /* Assign the required number of transmit schedular queues under the
+ * given class
+ */
+ cfg->schq_contig[parent->level - 1] += parent->child_dwrr_cnt +
+ parent->max_static_prio + 1;
}
static void otx2_qos_prepare_txschq_cfg(struct otx2_nic *pfvf,
@@ -378,10 +407,12 @@ otx2_qos_alloc_root(struct otx2_nic *pfvf)
return ERR_PTR(-ENOMEM);
node->parent = NULL;
- if (!is_otx2_vf(pfvf->pcifunc))
+ if (!is_otx2_vf(pfvf->pcifunc)) {
node->level = NIX_TXSCH_LVL_TL1;
- else
+ } else {
node->level = NIX_TXSCH_LVL_TL2;
+ node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
+ }
WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
node->classid = OTX2_QOS_ROOT_CLASSID;
@@ -401,9 +432,13 @@ static int otx2_qos_add_child_node(struct otx2_qos_node *parent,
struct otx2_qos_node *tmp_node;
struct list_head *tmp;
+ if (node->prio > parent->max_static_prio)
+ parent->max_static_prio = node->prio;
+
for (tmp = head->next; tmp != head; tmp = tmp->next) {
tmp_node = list_entry(tmp, struct otx2_qos_node, list);
- if (tmp_node->prio == node->prio)
+ if (tmp_node->prio == node->prio &&
+ tmp_node->is_static)
return -EEXIST;
if (tmp_node->prio > node->prio) {
list_add_tail(&node->list, tmp);
@@ -434,6 +469,10 @@ static int otx2_qos_alloc_txschq_node(struct otx2_nic *pfvf,
txschq_node->rate = 0;
txschq_node->ceil = 0;
txschq_node->prio = 0;
+ txschq_node->quantum = 0;
+ txschq_node->is_static = true;
+ txschq_node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
+ txschq_node->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX;
mutex_lock(&pfvf->qos.qos_lock);
list_add_tail(&txschq_node->list, &node->child_schq_list);
@@ -459,7 +498,7 @@ static struct otx2_qos_node *
otx2_qos_sw_create_leaf_node(struct otx2_nic *pfvf,
struct otx2_qos_node *parent,
u16 classid, u32 prio, u64 rate, u64 ceil,
- u16 qid)
+ u32 quantum, u16 qid, bool static_cfg)
{
struct otx2_qos_node *node;
int err;
@@ -476,6 +515,10 @@ otx2_qos_sw_create_leaf_node(struct otx2_nic *pfvf,
node->rate = otx2_convert_rate(rate);
node->ceil = otx2_convert_rate(ceil);
node->prio = prio;
+ node->quantum = quantum;
+ node->is_static = static_cfg;
+ node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
+ node->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX;
__set_bit(qid, pfvf->qos.qos_sq_bmap);
@@ -622,12 +665,28 @@ static int otx2_qos_txschq_alloc(struct otx2_nic *pfvf,
}
pfvf->qos.link_cfg_lvl = rsp->link_cfg_lvl;
+ pfvf->hw.txschq_aggr_lvl_rr_prio = rsp->aggr_lvl_rr_prio;
out:
mutex_unlock(&mbox->lock);
return rc;
}
+static void otx2_qos_free_unused_txschq(struct otx2_nic *pfvf,
+ struct otx2_qos_cfg *cfg)
+{
+ int lvl, idx, schq;
+
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ for (idx = 0; idx < cfg->schq_contig[lvl]; idx++) {
+ if (!cfg->schq_index_used[lvl][idx]) {
+ schq = cfg->schq_contig_list[lvl][idx];
+ otx2_txschq_free_one(pfvf, lvl, schq);
+ }
+ }
+ }
+}
+
static void otx2_qos_txschq_fill_cfg_schq(struct otx2_nic *pfvf,
struct otx2_qos_node *node,
struct otx2_qos_cfg *cfg)
@@ -652,9 +711,11 @@ static void otx2_qos_txschq_fill_cfg_tl(struct otx2_nic *pfvf,
list_for_each_entry(tmp, &node->child_list, list) {
otx2_qos_txschq_fill_cfg_tl(pfvf, tmp, cfg);
cnt = cfg->static_node_pos[tmp->level];
- tmp->schq = cfg->schq_contig_list[tmp->level][cnt];
+ tmp->schq = cfg->schq_contig_list[tmp->level][tmp->txschq_idx];
+ cfg->schq_index_used[tmp->level][tmp->txschq_idx] = true;
if (cnt == 0)
- node->prio_anchor = tmp->schq;
+ node->prio_anchor =
+ cfg->schq_contig_list[tmp->level][0];
cfg->static_node_pos[tmp->level]++;
otx2_qos_txschq_fill_cfg_schq(pfvf, tmp, cfg);
}
@@ -667,9 +728,87 @@ static void otx2_qos_txschq_fill_cfg(struct otx2_nic *pfvf,
mutex_lock(&pfvf->qos.qos_lock);
otx2_qos_txschq_fill_cfg_tl(pfvf, node, cfg);
otx2_qos_txschq_fill_cfg_schq(pfvf, node, cfg);
+ otx2_qos_free_unused_txschq(pfvf, cfg);
mutex_unlock(&pfvf->qos.qos_lock);
}
+static void __otx2_qos_assign_base_idx_tl(struct otx2_nic *pfvf,
+ struct otx2_qos_node *tmp,
+ unsigned long *child_idx_bmap,
+ int child_cnt)
+{
+ int idx;
+
+ if (tmp->txschq_idx != OTX2_QOS_INVALID_TXSCHQ_IDX)
+ return;
+
+ /* assign static nodes 1:1 prio mapping first, then remaining nodes */
+ for (idx = 0; idx < child_cnt; idx++) {
+ if (tmp->is_static && tmp->prio == idx &&
+ !test_bit(idx, child_idx_bmap)) {
+ tmp->txschq_idx = idx;
+ set_bit(idx, child_idx_bmap);
+ return;
+ } else if (!tmp->is_static && idx >= tmp->prio &&
+ !test_bit(idx, child_idx_bmap)) {
+ tmp->txschq_idx = idx;
+ set_bit(idx, child_idx_bmap);
+ return;
+ }
+ }
+}
+
+static int otx2_qos_assign_base_idx_tl(struct otx2_nic *pfvf,
+ struct otx2_qos_node *node)
+{
+ unsigned long *child_idx_bmap;
+ struct otx2_qos_node *tmp;
+ int child_cnt;
+
+ list_for_each_entry(tmp, &node->child_list, list)
+ tmp->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX;
+
+ /* allocate child index array */
+ child_cnt = node->child_dwrr_cnt + node->max_static_prio + 1;
+ child_idx_bmap = kcalloc(BITS_TO_LONGS(child_cnt),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!child_idx_bmap)
+ return -ENOMEM;
+
+ list_for_each_entry(tmp, &node->child_list, list)
+ otx2_qos_assign_base_idx_tl(pfvf, tmp);
+
+ /* assign base index of static priority children first */
+ list_for_each_entry(tmp, &node->child_list, list) {
+ if (!tmp->is_static)
+ continue;
+ __otx2_qos_assign_base_idx_tl(pfvf, tmp, child_idx_bmap,
+ child_cnt);
+ }
+
+ /* assign base index of dwrr priority children */
+ list_for_each_entry(tmp, &node->child_list, list)
+ __otx2_qos_assign_base_idx_tl(pfvf, tmp, child_idx_bmap,
+ child_cnt);
+
+ kfree(child_idx_bmap);
+
+ return 0;
+}
+
+static int otx2_qos_assign_base_idx(struct otx2_nic *pfvf,
+ struct otx2_qos_node *node)
+{
+ int ret = 0;
+
+ mutex_lock(&pfvf->qos.qos_lock);
+ ret = otx2_qos_assign_base_idx_tl(pfvf, node);
+ mutex_unlock(&pfvf->qos.qos_lock);
+
+ return ret;
+}
+
static int otx2_qos_txschq_push_cfg_schq(struct otx2_nic *pfvf,
struct otx2_qos_node *node,
struct otx2_qos_cfg *cfg)
@@ -761,8 +900,10 @@ static void otx2_qos_free_cfg(struct otx2_nic *pfvf, struct otx2_qos_cfg *cfg)
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
for (idx = 0; idx < cfg->schq_contig[lvl]; idx++) {
- schq = cfg->schq_contig_list[lvl][idx];
- otx2_txschq_free_one(pfvf, lvl, schq);
+ if (cfg->schq_index_used[lvl][idx]) {
+ schq = cfg->schq_contig_list[lvl][idx];
+ otx2_txschq_free_one(pfvf, lvl, schq);
+ }
}
}
}
@@ -838,6 +979,10 @@ static int otx2_qos_push_txschq_cfg(struct otx2_nic *pfvf,
if (ret)
return -ENOSPC;
+ ret = otx2_qos_assign_base_idx(pfvf, node);
+ if (ret)
+ return -ENOMEM;
+
if (!(pfvf->netdev->flags & IFF_UP)) {
otx2_qos_txschq_fill_cfg(pfvf, node, cfg);
return 0;
@@ -894,6 +1039,13 @@ static int otx2_qos_root_add(struct otx2_nic *pfvf, u16 htb_maj_id, u16 htb_defc
goto free_root_node;
}
+ /* Update TL1 RR PRIO */
+ if (root->level == NIX_TXSCH_LVL_TL1) {
+ root->child_dwrr_prio = pfvf->hw.txschq_aggr_lvl_rr_prio;
+ netdev_dbg(pfvf->netdev,
+ "TL1 DWRR Priority %d\n", root->child_dwrr_prio);
+ }
+
if (!(pfvf->netdev->flags & IFF_UP) ||
root->level == NIX_TXSCH_LVL_TL1) {
root->schq = new_cfg->schq_list[root->level][0];
@@ -940,37 +1092,126 @@ static int otx2_qos_root_destroy(struct otx2_nic *pfvf)
return 0;
}
+static int otx2_qos_validate_quantum(struct otx2_nic *pfvf, u32 quantum)
+{
+ u32 rr_weight = otx2_qos_quantum_to_dwrr_weight(pfvf, quantum);
+ int err = 0;
+
+ /* Max Round robin weight supported by octeontx2 and CN10K
+ * is different. Validate accordingly
+ */
+ if (is_dev_otx2(pfvf->pdev))
+ err = (rr_weight > OTX2_MAX_RR_QUANTUM) ? -EINVAL : 0;
+ else if (rr_weight > CN10K_MAX_RR_WEIGHT)
+ err = -EINVAL;
+
+ return err;
+}
+
+static int otx2_qos_validate_dwrr_cfg(struct otx2_qos_node *parent,
+ struct netlink_ext_ack *extack,
+ struct otx2_nic *pfvf,
+ u64 prio, u64 quantum)
+{
+ int err;
+
+ err = otx2_qos_validate_quantum(pfvf, quantum);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported quantum value");
+ return err;
+ }
+
+ if (parent->child_dwrr_prio == OTX2_QOS_DEFAULT_PRIO) {
+ parent->child_dwrr_prio = prio;
+ } else if (prio != parent->child_dwrr_prio) {
+ NL_SET_ERR_MSG_MOD(extack, "Only one DWRR group is allowed");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int otx2_qos_validate_configuration(struct otx2_qos_node *parent,
struct netlink_ext_ack *extack,
struct otx2_nic *pfvf,
- u64 prio)
+ u64 prio, bool static_cfg)
{
- if (test_bit(prio, parent->prio_bmap)) {
- NL_SET_ERR_MSG_MOD(extack,
- "Static priority child with same priority exists");
+ if (prio == parent->child_dwrr_prio && static_cfg) {
+ NL_SET_ERR_MSG_MOD(extack, "DWRR child group with same priority exists");
return -EEXIST;
}
- if (prio == TXSCH_TL1_DFLT_RR_PRIO) {
+ if (static_cfg && test_bit(prio, parent->prio_bmap)) {
NL_SET_ERR_MSG_MOD(extack,
- "Priority is reserved for Round Robin");
- return -EINVAL;
+ "Static priority child with same priority exists");
+ return -EEXIST;
}
return 0;
}
+static void otx2_reset_dwrr_prio(struct otx2_qos_node *parent, u64 prio)
+{
+ /* For PF, root node dwrr priority is static */
+ if (parent->level == NIX_TXSCH_LVL_TL1)
+ return;
+
+ if (parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO) {
+ parent->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
+ clear_bit(prio, parent->prio_bmap);
+ }
+}
+
+static bool is_qos_node_dwrr(struct otx2_qos_node *parent,
+ struct otx2_nic *pfvf,
+ u64 prio)
+{
+ struct otx2_qos_node *node;
+ bool ret = false;
+
+ if (parent->child_dwrr_prio == prio)
+ return true;
+
+ mutex_lock(&pfvf->qos.qos_lock);
+ list_for_each_entry(node, &parent->child_list, list) {
+ if (prio == node->prio) {
+ if (parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO &&
+ parent->child_dwrr_prio != prio)
+ continue;
+
+ if (otx2_qos_validate_quantum(pfvf, node->quantum)) {
+ netdev_err(pfvf->netdev,
+ "Unsupported quantum value for existing classid=0x%x quantum=%d prio=%d",
+ node->classid, node->quantum,
+ node->prio);
+ break;
+ }
+ /* mark old node as dwrr */
+ node->is_static = false;
+ parent->child_dwrr_cnt++;
+ parent->child_static_cnt--;
+ ret = true;
+ break;
+ }
+ }
+ mutex_unlock(&pfvf->qos.qos_lock);
+
+ return ret;
+}
+
static int otx2_qos_leaf_alloc_queue(struct otx2_nic *pfvf, u16 classid,
u32 parent_classid, u64 rate, u64 ceil,
- u64 prio, struct netlink_ext_ack *extack)
+ u64 prio, u32 quantum,
+ struct netlink_ext_ack *extack)
{
struct otx2_qos_cfg *old_cfg, *new_cfg;
struct otx2_qos_node *node, *parent;
int qid, ret, err;
+ bool static_cfg;
netdev_dbg(pfvf->netdev,
- "TC_HTB_LEAF_ALLOC_QUEUE: classid=0x%x parent_classid=0x%x rate=%lld ceil=%lld prio=%lld\n",
- classid, parent_classid, rate, ceil, prio);
+ "TC_HTB_LEAF_ALLOC_QUEUE: classid=0x%x parent_classid=0x%x rate=%lld ceil=%lld prio=%lld quantum=%d\n",
+ classid, parent_classid, rate, ceil, prio, quantum);
if (prio > OTX2_QOS_MAX_PRIO) {
NL_SET_ERR_MSG_MOD(extack, "Valid priority range 0 to 7");
@@ -978,6 +1219,12 @@ static int otx2_qos_leaf_alloc_queue(struct otx2_nic *pfvf, u16 classid,
goto out;
}
+ if (!quantum || quantum > INT_MAX) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid quantum, range 1 - 2147483647 bytes");
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
/* get parent node */
parent = otx2_sw_node_find(pfvf, parent_classid);
if (!parent) {
@@ -991,10 +1238,24 @@ static int otx2_qos_leaf_alloc_queue(struct otx2_nic *pfvf, u16 classid,
goto out;
}
- ret = otx2_qos_validate_configuration(parent, extack, pfvf, prio);
+ static_cfg = !is_qos_node_dwrr(parent, pfvf, prio);
+ ret = otx2_qos_validate_configuration(parent, extack, pfvf, prio,
+ static_cfg);
if (ret)
goto out;
+ if (!static_cfg) {
+ ret = otx2_qos_validate_dwrr_cfg(parent, extack, pfvf, prio,
+ quantum);
+ if (ret)
+ goto out;
+ }
+
+ if (static_cfg)
+ parent->child_static_cnt++;
+ else
+ parent->child_dwrr_cnt++;
+
set_bit(prio, parent->prio_bmap);
/* read current txschq configuration */
@@ -1019,7 +1280,7 @@ static int otx2_qos_leaf_alloc_queue(struct otx2_nic *pfvf, u16 classid,
/* allocate and initialize a new child node */
node = otx2_qos_sw_create_leaf_node(pfvf, parent, classid, prio, rate,
- ceil, qid);
+ ceil, quantum, qid, static_cfg);
if (IS_ERR(node)) {
NL_SET_ERR_MSG_MOD(extack, "Unable to allocate leaf node");
ret = PTR_ERR(node);
@@ -1067,6 +1328,11 @@ free_node:
free_old_cfg:
kfree(old_cfg);
reset_prio:
+ if (static_cfg)
+ parent->child_static_cnt--;
+ else
+ parent->child_dwrr_cnt--;
+
clear_bit(prio, parent->prio_bmap);
out:
return ret;
@@ -1074,10 +1340,11 @@ out:
static int otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid,
u16 child_classid, u64 rate, u64 ceil, u64 prio,
- struct netlink_ext_ack *extack)
+ u32 quantum, struct netlink_ext_ack *extack)
{
struct otx2_qos_cfg *old_cfg, *new_cfg;
struct otx2_qos_node *node, *child;
+ bool static_cfg;
int ret, err;
u16 qid;
@@ -1091,6 +1358,12 @@ static int otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid,
goto out;
}
+ if (!quantum || quantum > INT_MAX) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid quantum, range 1 - 2147483647 bytes");
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
/* find node related to classid */
node = otx2_sw_node_find(pfvf, classid);
if (!node) {
@@ -1105,6 +1378,19 @@ static int otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid,
goto out;
}
+ static_cfg = !is_qos_node_dwrr(node, pfvf, prio);
+ if (!static_cfg) {
+ ret = otx2_qos_validate_dwrr_cfg(node, extack, pfvf, prio,
+ quantum);
+ if (ret)
+ goto out;
+ }
+
+ if (static_cfg)
+ node->child_static_cnt++;
+ else
+ node->child_dwrr_cnt++;
+
set_bit(prio, node->prio_bmap);
/* store the qid to assign to leaf node */
@@ -1127,7 +1413,8 @@ static int otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid,
/* allocate and initialize a new child node */
child = otx2_qos_sw_create_leaf_node(pfvf, node, child_classid,
- prio, rate, ceil, qid);
+ prio, rate, ceil, quantum,
+ qid, static_cfg);
if (IS_ERR(child)) {
NL_SET_ERR_MSG_MOD(extack, "Unable to allocate leaf node");
ret = PTR_ERR(child);
@@ -1178,6 +1465,10 @@ free_node:
free_old_cfg:
kfree(old_cfg);
reset_prio:
+ if (static_cfg)
+ node->child_static_cnt--;
+ else
+ node->child_dwrr_cnt--;
clear_bit(prio, node->prio_bmap);
out:
return ret;
@@ -1187,6 +1478,7 @@ static int otx2_qos_leaf_del(struct otx2_nic *pfvf, u16 *classid,
struct netlink_ext_ack *extack)
{
struct otx2_qos_node *node, *parent;
+ int dwrr_del_node = false;
u64 prio;
u16 qid;
@@ -1202,12 +1494,27 @@ static int otx2_qos_leaf_del(struct otx2_nic *pfvf, u16 *classid,
prio = node->prio;
qid = node->qid;
+ if (!node->is_static)
+ dwrr_del_node = true;
+
otx2_qos_disable_sq(pfvf, node->qid);
otx2_qos_destroy_node(pfvf, node);
pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
- clear_bit(prio, parent->prio_bmap);
+ if (dwrr_del_node) {
+ parent->child_dwrr_cnt--;
+ } else {
+ parent->child_static_cnt--;
+ clear_bit(prio, parent->prio_bmap);
+ }
+
+ /* Reset DWRR priority if all dwrr nodes are deleted */
+ if (!parent->child_dwrr_cnt)
+ otx2_reset_dwrr_prio(parent, prio);
+
+ if (!parent->child_static_cnt)
+ parent->max_static_prio = 0;
return 0;
}
@@ -1217,6 +1524,7 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force
{
struct otx2_qos_node *node, *parent;
struct otx2_qos_cfg *new_cfg;
+ int dwrr_del_node = false;
u64 prio;
int err;
u16 qid;
@@ -1241,11 +1549,26 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force
return -ENOENT;
}
+ if (!node->is_static)
+ dwrr_del_node = true;
+
/* destroy the leaf node */
otx2_qos_destroy_node(pfvf, node);
pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
- clear_bit(prio, parent->prio_bmap);
+ if (dwrr_del_node) {
+ parent->child_dwrr_cnt--;
+ } else {
+ parent->child_static_cnt--;
+ clear_bit(prio, parent->prio_bmap);
+ }
+
+ /* Reset DWRR priority if all dwrr nodes are deleted */
+ if (!parent->child_dwrr_cnt)
+ otx2_reset_dwrr_prio(parent, prio);
+
+ if (!parent->child_static_cnt)
+ parent->max_static_prio = 0;
/* create downstream txschq entries to parent */
err = otx2_qos_alloc_txschq_node(pfvf, parent);
@@ -1298,10 +1621,12 @@ void otx2_qos_config_txschq(struct otx2_nic *pfvf)
if (!root)
return;
- err = otx2_qos_txschq_config(pfvf, root);
- if (err) {
- netdev_err(pfvf->netdev, "Error update txschq configuration\n");
- goto root_destroy;
+ if (root->level != NIX_TXSCH_LVL_TL1) {
+ err = otx2_qos_txschq_config(pfvf, root);
+ if (err) {
+ netdev_err(pfvf->netdev, "Error update txschq configuration\n");
+ goto root_destroy;
+ }
}
err = otx2_qos_txschq_push_cfg_tl(pfvf, root, NULL);
@@ -1334,7 +1659,8 @@ int otx2_setup_tc_htb(struct net_device *ndev, struct tc_htb_qopt_offload *htb)
res = otx2_qos_leaf_alloc_queue(pfvf, htb->classid,
htb->parent_classid,
htb->rate, htb->ceil,
- htb->prio, htb->extack);
+ htb->prio, htb->quantum,
+ htb->extack);
if (res < 0)
return res;
htb->qid = res;
@@ -1343,7 +1669,7 @@ int otx2_setup_tc_htb(struct net_device *ndev, struct tc_htb_qopt_offload *htb)
return otx2_qos_leaf_to_inner(pfvf, htb->parent_classid,
htb->classid, htb->rate,
htb->ceil, htb->prio,
- htb->extack);
+ htb->quantum, htb->extack);
case TC_HTB_LEAF_DEL:
return otx2_qos_leaf_del(pfvf, &htb->classid, htb->extack);
case TC_HTB_LEAF_DEL_LAST:
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.h b/drivers/net/ethernet/marvell/octeontx2/nic/qos.h
index 19773284be27..221bd0438f60 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.h
@@ -35,6 +35,7 @@ struct otx2_qos_cfg {
int dwrr_node_pos[NIX_TXSCH_LVL_CNT];
u16 schq_contig_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
u16 schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ bool schq_index_used[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
};
struct otx2_qos {
@@ -59,10 +60,18 @@ struct otx2_qos_node {
u64 ceil;
u32 classid;
u32 prio;
- u16 schq; /* hw txschq */
+ u32 quantum;
+ /* hw txschq */
+ u16 schq;
u16 qid;
u16 prio_anchor;
+ u16 max_static_prio;
+ u16 child_dwrr_cnt;
+ u16 child_static_cnt;
+ u16 child_dwrr_prio;
+ u16 txschq_idx; /* txschq allocation index */
u8 level;
+ bool is_static;
};
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_path.c b/drivers/net/ethernet/mediatek/mtk_eth_path.c
index 317e447f4991..7c27a19c4d8f 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_path.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_path.c
@@ -15,10 +15,10 @@
struct mtk_eth_muxc {
const char *name;
int cap_bit;
- int (*set_path)(struct mtk_eth *eth, int path);
+ int (*set_path)(struct mtk_eth *eth, u64 path);
};
-static const char *mtk_eth_path_name(int path)
+static const char *mtk_eth_path_name(u64 path)
{
switch (path) {
case MTK_ETH_PATH_GMAC1_RGMII:
@@ -40,10 +40,10 @@ static const char *mtk_eth_path_name(int path)
}
}
-static int set_mux_gdm1_to_gmac1_esw(struct mtk_eth *eth, int path)
+static int set_mux_gdm1_to_gmac1_esw(struct mtk_eth *eth, u64 path)
{
bool updated = true;
- u32 val, mask, set;
+ u32 mask, set, reg;
switch (path) {
case MTK_ETH_PATH_GMAC1_SGMII:
@@ -59,11 +59,13 @@ static int set_mux_gdm1_to_gmac1_esw(struct mtk_eth *eth, int path)
break;
}
- if (updated) {
- val = mtk_r32(eth, MTK_MAC_MISC);
- val = (val & mask) | set;
- mtk_w32(eth, val, MTK_MAC_MISC);
- }
+ if (mtk_is_netsys_v3_or_greater(eth))
+ reg = MTK_MAC_MISC_V3;
+ else
+ reg = MTK_MAC_MISC;
+
+ if (updated)
+ mtk_m32(eth, mask, set, reg);
dev_dbg(eth->dev, "path %s in %s updated = %d\n",
mtk_eth_path_name(path), __func__, updated);
@@ -71,7 +73,7 @@ static int set_mux_gdm1_to_gmac1_esw(struct mtk_eth *eth, int path)
return 0;
}
-static int set_mux_gmac2_gmac0_to_gephy(struct mtk_eth *eth, int path)
+static int set_mux_gmac2_gmac0_to_gephy(struct mtk_eth *eth, u64 path)
{
unsigned int val = 0;
bool updated = true;
@@ -94,7 +96,7 @@ static int set_mux_gmac2_gmac0_to_gephy(struct mtk_eth *eth, int path)
return 0;
}
-static int set_mux_u3_gmac2_to_qphy(struct mtk_eth *eth, int path)
+static int set_mux_u3_gmac2_to_qphy(struct mtk_eth *eth, u64 path)
{
unsigned int val = 0, mask = 0, reg = 0;
bool updated = true;
@@ -125,7 +127,7 @@ static int set_mux_u3_gmac2_to_qphy(struct mtk_eth *eth, int path)
return 0;
}
-static int set_mux_gmac1_gmac2_to_sgmii_rgmii(struct mtk_eth *eth, int path)
+static int set_mux_gmac1_gmac2_to_sgmii_rgmii(struct mtk_eth *eth, u64 path)
{
unsigned int val = 0;
bool updated = true;
@@ -163,7 +165,7 @@ static int set_mux_gmac1_gmac2_to_sgmii_rgmii(struct mtk_eth *eth, int path)
return 0;
}
-static int set_mux_gmac12_to_gephy_sgmii(struct mtk_eth *eth, int path)
+static int set_mux_gmac12_to_gephy_sgmii(struct mtk_eth *eth, u64 path)
{
unsigned int val = 0;
bool updated = true;
@@ -218,7 +220,7 @@ static const struct mtk_eth_muxc mtk_eth_muxc[] = {
},
};
-static int mtk_eth_mux_setup(struct mtk_eth *eth, int path)
+static int mtk_eth_mux_setup(struct mtk_eth *eth, u64 path)
{
int i, err = 0;
@@ -249,7 +251,7 @@ out:
int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id)
{
- int path;
+ u64 path;
path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_SGMII :
MTK_ETH_PATH_GMAC2_SGMII;
@@ -260,7 +262,7 @@ int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id)
int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id)
{
- int path = 0;
+ u64 path = 0;
if (mac_id == 1)
path = MTK_ETH_PATH_GMAC2_GEPHY;
@@ -274,7 +276,7 @@ int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id)
int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id)
{
- int path;
+ u64 path;
path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_RGMII :
MTK_ETH_PATH_GMAC2_RGMII;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 2d15342c260a..30e3935e83f9 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -152,6 +152,54 @@ static const struct mtk_reg_map mt7986_reg_map = {
.pse_oq_sta = 0x01a0,
};
+static const struct mtk_reg_map mt7988_reg_map = {
+ .tx_irq_mask = 0x461c,
+ .tx_irq_status = 0x4618,
+ .pdma = {
+ .rx_ptr = 0x6900,
+ .rx_cnt_cfg = 0x6904,
+ .pcrx_ptr = 0x6908,
+ .glo_cfg = 0x6a04,
+ .rst_idx = 0x6a08,
+ .delay_irq = 0x6a0c,
+ .irq_status = 0x6a20,
+ .irq_mask = 0x6a28,
+ .adma_rx_dbg0 = 0x6a38,
+ .int_grp = 0x6a50,
+ },
+ .qdma = {
+ .qtx_cfg = 0x4400,
+ .qtx_sch = 0x4404,
+ .rx_ptr = 0x4500,
+ .rx_cnt_cfg = 0x4504,
+ .qcrx_ptr = 0x4508,
+ .glo_cfg = 0x4604,
+ .rst_idx = 0x4608,
+ .delay_irq = 0x460c,
+ .fc_th = 0x4610,
+ .int_grp = 0x4620,
+ .hred = 0x4644,
+ .ctx_ptr = 0x4700,
+ .dtx_ptr = 0x4704,
+ .crx_ptr = 0x4710,
+ .drx_ptr = 0x4714,
+ .fq_head = 0x4720,
+ .fq_tail = 0x4724,
+ .fq_count = 0x4728,
+ .fq_blen = 0x472c,
+ .tx_sch_rate = 0x4798,
+ },
+ .gdm1_cnt = 0x1c00,
+ .gdma_to_ppe = 0x3333,
+ .ppe_base = 0x2000,
+ .wdma_base = {
+ [0] = 0x4800,
+ [1] = 0x4c00,
+ },
+ .pse_iq_sta = 0x0180,
+ .pse_oq_sta = 0x01a0,
+};
+
/* strings used by ethtool */
static const struct mtk_ethtool_stats {
char str[ETH_GSTRING_LEN];
@@ -179,10 +227,54 @@ static const struct mtk_ethtool_stats {
};
static const char * const mtk_clks_source_name[] = {
- "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
- "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
- "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
- "sgmii_ck", "eth2pll", "wocpu0", "wocpu1", "netsys0", "netsys1"
+ "ethif",
+ "sgmiitop",
+ "esw",
+ "gp0",
+ "gp1",
+ "gp2",
+ "gp3",
+ "xgp1",
+ "xgp2",
+ "xgp3",
+ "crypto",
+ "fe",
+ "trgpll",
+ "sgmii_tx250m",
+ "sgmii_rx250m",
+ "sgmii_cdr_ref",
+ "sgmii_cdr_fb",
+ "sgmii2_tx250m",
+ "sgmii2_rx250m",
+ "sgmii2_cdr_ref",
+ "sgmii2_cdr_fb",
+ "sgmii_ck",
+ "eth2pll",
+ "wocpu0",
+ "wocpu1",
+ "netsys0",
+ "netsys1",
+ "ethwarp_wocpu2",
+ "ethwarp_wocpu1",
+ "ethwarp_wocpu0",
+ "top_usxgmii0_sel",
+ "top_usxgmii1_sel",
+ "top_sgm0_sel",
+ "top_sgm1_sel",
+ "top_xfi_phy0_xtal_sel",
+ "top_xfi_phy1_xtal_sel",
+ "top_eth_gmii_sel",
+ "top_eth_refck_50m_sel",
+ "top_eth_sys_200m_sel",
+ "top_eth_sys_sel",
+ "top_eth_xgmii_sel",
+ "top_eth_mii_sel",
+ "top_netsys_sel",
+ "top_netsys_500m_sel",
+ "top_netsys_pao_2x_sel",
+ "top_netsys_sync_250m_sel",
+ "top_netsys_ppefb_250m_sel",
+ "top_netsys_warp_sel",
};
void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
@@ -195,7 +287,7 @@ u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
return __raw_readl(eth->base + reg);
}
-static u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
+u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg)
{
u32 val;
@@ -385,10 +477,8 @@ static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
}
static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
- phy_interface_t interface, int speed)
+ phy_interface_t interface)
{
- unsigned long rate;
- u32 tck, rck, intf;
int ret;
if (interface == PHY_INTERFACE_MODE_TRGMII) {
@@ -399,30 +489,20 @@ static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
return;
}
- if (speed == SPEED_1000) {
- intf = INTF_MODE_RGMII_1000;
- rate = 250000000;
- rck = RCK_CTRL_RGMII_1000;
- tck = TCK_CTRL_RGMII_1000;
- } else {
- intf = INTF_MODE_RGMII_10_100;
- rate = 500000000;
- rck = RCK_CTRL_RGMII_10_100;
- tck = TCK_CTRL_RGMII_10_100;
- }
-
- mtk_w32(eth, intf, INTF_MODE);
-
- regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
- ETHSYS_TRGMII_CLK_SEL362_5,
- ETHSYS_TRGMII_CLK_SEL362_5);
+ dev_err(eth->dev, "Missing PLL configuration, ethernet may not work\n");
+}
- ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], rate);
- if (ret)
- dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
+static void mtk_setup_bridge_switch(struct mtk_eth *eth)
+{
+ /* Force Port1 XGMAC Link Up */
+ mtk_m32(eth, 0, MTK_XGMAC_FORCE_LINK(MTK_GMAC1_ID),
+ MTK_XGMAC_STS(MTK_GMAC1_ID));
- mtk_w32(eth, rck, TRGMII_RCK_CTRL);
- mtk_w32(eth, tck, TRGMII_TCK_CTRL);
+ /* Adjust GSW bridge IPG to 11 */
+ mtk_m32(eth, GSWTX_IPG_MASK | GSWRX_IPG_MASK,
+ (GSW_IPG_11 << GSWTX_IPG_SHIFT) |
+ (GSW_IPG_11 << GSWRX_IPG_SHIFT),
+ MTK_GSW_CFG);
}
static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
@@ -484,6 +564,8 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
goto init_err;
}
break;
+ case PHY_INTERFACE_MODE_INTERNAL:
+ break;
default:
goto err_phy;
}
@@ -498,17 +580,8 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
state->interface))
goto err_phy;
} else {
- /* FIXME: this is incorrect. Not only does it
- * use state->speed (which is not guaranteed
- * to be correct) but it also makes use of it
- * in a code path that will only be reachable
- * when the PHY interface mode changes, not
- * when the speed changes. Consequently, RGMII
- * is probably broken.
- */
mtk_gmac0_rgmii_adjust(mac->hw,
- state->interface,
- state->speed);
+ state->interface);
/* mt7623_pad_clk_setup */
for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
@@ -562,6 +635,15 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
return;
}
+ /* Setup gmac */
+ if (mtk_is_netsys_v3_or_greater(eth) &&
+ mac->interface == PHY_INTERFACE_MODE_INTERNAL) {
+ mtk_w32(mac->hw, MTK_GDMA_XGDM_SEL, MTK_GDMA_EG_CTRL(mac->id));
+ mtk_w32(mac->hw, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(mac->id));
+
+ mtk_setup_bridge_switch(eth);
+ }
+
return;
err_phy:
@@ -602,38 +684,6 @@ static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
return 0;
}
-static void mtk_mac_pcs_get_state(struct phylink_config *config,
- struct phylink_link_state *state)
-{
- struct mtk_mac *mac = container_of(config, struct mtk_mac,
- phylink_config);
- u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
-
- state->link = (pmsr & MAC_MSR_LINK);
- state->duplex = (pmsr & MAC_MSR_DPX) >> 1;
-
- switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) {
- case 0:
- state->speed = SPEED_10;
- break;
- case MAC_MSR_SPEED_100:
- state->speed = SPEED_100;
- break;
- case MAC_MSR_SPEED_1000:
- state->speed = SPEED_1000;
- break;
- default:
- state->speed = SPEED_UNKNOWN;
- break;
- }
-
- state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
- if (pmsr & MAC_MSR_RX_FC)
- state->pause |= MLO_PAUSE_RX;
- if (pmsr & MAC_MSR_TX_FC)
- state->pause |= MLO_PAUSE_TX;
-}
-
static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
phy_interface_t interface)
{
@@ -659,7 +709,7 @@ static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v1(eth))
val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
if (IS_ENABLED(CONFIG_SOC_MT7621)) {
@@ -756,7 +806,6 @@ static void mtk_mac_link_up(struct phylink_config *config,
static const struct phylink_mac_ops mtk_phylink_ops = {
.mac_select_pcs = mtk_mac_select_pcs,
- .mac_pcs_get_state = mtk_mac_pcs_get_state,
.mac_config = mtk_mac_config,
.mac_finish = mtk_mac_finish,
.mac_link_down = mtk_mac_link_down,
@@ -807,11 +856,15 @@ static int mtk_mdio_init(struct mtk_eth *eth)
}
divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63);
+ /* Configure MDC Turbo Mode */
+ if (mtk_is_netsys_v3_or_greater(eth))
+ mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3);
+
/* Configure MDC Divider */
- val = mtk_r32(eth, MTK_PPSC);
- val &= ~PPSC_MDC_CFG;
- val |= FIELD_PREP(PPSC_MDC_CFG, divider) | PPSC_MDC_TURBO;
- mtk_w32(eth, val, MTK_PPSC);
+ val = FIELD_PREP(PPSC_MDC_CFG, divider);
+ if (!mtk_is_netsys_v3_or_greater(eth))
+ val |= PPSC_MDC_TURBO;
+ mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC);
dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / divider);
@@ -943,17 +996,32 @@ void mtk_stats_update_mac(struct mtk_mac *mac)
mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
hw_stats->rx_flow_control_packets +=
mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
- hw_stats->tx_skip +=
- mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
- hw_stats->tx_collisions +=
- mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
- hw_stats->tx_bytes +=
- mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
- stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
- if (stats)
- hw_stats->tx_bytes += (stats << 32);
- hw_stats->tx_packets +=
- mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
+
+ if (mtk_is_netsys_v3_or_greater(eth)) {
+ hw_stats->tx_skip +=
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x50 + offs);
+ hw_stats->tx_collisions +=
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x54 + offs);
+ hw_stats->tx_bytes +=
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x40 + offs);
+ stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x44 + offs);
+ if (stats)
+ hw_stats->tx_bytes += (stats << 32);
+ hw_stats->tx_packets +=
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x48 + offs);
+ } else {
+ hw_stats->tx_skip +=
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
+ hw_stats->tx_collisions +=
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
+ hw_stats->tx_bytes +=
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
+ stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
+ if (stats)
+ hw_stats->tx_bytes += (stats << 32);
+ hw_stats->tx_packets +=
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
+ }
}
u64_stats_update_end(&hw_stats->syncp);
@@ -963,7 +1031,7 @@ static void mtk_stats_update(struct mtk_eth *eth)
{
int i;
- for (i = 0; i < MTK_MAC_COUNT; i++) {
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
if (!eth->mac[i] || !eth->mac[i]->hw_stats)
continue;
if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
@@ -1037,7 +1105,7 @@ static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ if (mtk_is_netsys_v2_or_greater(eth)) {
rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
}
@@ -1095,7 +1163,7 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
txd->txd4 = 0;
- if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
+ if (mtk_is_netsys_v2_or_greater(eth)) {
txd->txd5 = 0;
txd->txd6 = 0;
txd->txd7 = 0;
@@ -1257,7 +1325,19 @@ static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
data |= TX_DMA_LS0;
WRITE_ONCE(desc->txd3, data);
- data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
+ /* set forward port */
+ switch (mac->id) {
+ case MTK_GMAC1_ID:
+ data = PSE_GDM1_PORT << TX_DMA_FPORT_SHIFT_V2;
+ break;
+ case MTK_GMAC2_ID:
+ data = PSE_GDM2_PORT << TX_DMA_FPORT_SHIFT_V2;
+ break;
+ case MTK_GMAC3_ID:
+ data = PSE_GDM3_PORT << TX_DMA_FPORT_SHIFT_V2;
+ break;
+ }
+
data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
WRITE_ONCE(desc->txd4, data);
@@ -1268,6 +1348,8 @@ static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
/* tx checksum offload */
if (info->csum)
data |= TX_DMA_CHKSUM_V2;
+ if (mtk_is_netsys_v3_or_greater(eth) && netdev_uses_dsa(dev))
+ data |= TX_DMA_SPTAG_V3;
}
WRITE_ONCE(desc->txd5, data);
@@ -1286,7 +1368,7 @@ static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v2_or_greater(eth))
mtk_tx_set_dma_desc_v2(dev, txd, info);
else
mtk_tx_set_dma_desc_v1(dev, txd, info);
@@ -1333,8 +1415,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
mtk_tx_set_dma_desc(dev, itxd, &txd_info);
itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
- itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
- MTK_TX_FLAGS_FPORT1;
+ itx_buf->mac_id = mac->id;
setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
k++);
@@ -1382,8 +1463,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
memset(tx_buf, 0, sizeof(*tx_buf));
tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
- tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
- MTK_TX_FLAGS_FPORT1;
+ tx_buf->mac_id = mac->id;
setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
txd_info.size, k++);
@@ -1468,7 +1548,7 @@ static int mtk_queue_stopped(struct mtk_eth *eth)
{
int i;
- for (i = 0; i < MTK_MAC_COUNT; i++) {
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
if (!eth->netdev[i])
continue;
if (netif_queue_stopped(eth->netdev[i]))
@@ -1482,7 +1562,7 @@ static void mtk_wake_queue(struct mtk_eth *eth)
{
int i;
- for (i = 0; i < MTK_MAC_COUNT; i++) {
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
if (!eth->netdev[i])
continue;
netif_tx_wake_all_queues(eth->netdev[i]);
@@ -1593,7 +1673,7 @@ static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
static bool mtk_page_pool_enabled(struct mtk_eth *eth)
{
- return MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2);
+ return eth->soc->version == 2;
}
static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
@@ -1685,7 +1765,7 @@ static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev,
}
mtk_tx_set_dma_desc(dev, txd, txd_info);
- tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1;
+ tx_buf->mac_id = mac->id;
tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
@@ -1935,13 +2015,26 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
break;
/* find out which mac the packet come from. values start at 1 */
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
- mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1;
- else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
- !(trxd.rxd4 & RX_DMA_SPECIAL_TAG))
+ if (mtk_is_netsys_v2_or_greater(eth)) {
+ u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5);
+
+ switch (val) {
+ case PSE_GDM1_PORT:
+ case PSE_GDM2_PORT:
+ mac = val - 1;
+ break;
+ case PSE_GDM3_PORT:
+ mac = MTK_GMAC3_ID;
+ break;
+ default:
+ break;
+ }
+ } else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
+ !(trxd.rxd4 & RX_DMA_SPECIAL_TAG)) {
mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
+ }
- if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
+ if (unlikely(mac < 0 || mac >= MTK_MAX_DEVS ||
!eth->netdev[mac]))
goto release_desc;
@@ -2031,7 +2124,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
skb->dev = netdev;
bytes += skb->len;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ if (mtk_is_netsys_v2_or_greater(eth)) {
reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
if (hash != MTK_RXD5_FOE_ENTRY)
@@ -2056,8 +2149,8 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
/* When using VLAN untagging in combination with DSA, the
* hardware treats the MTK special tag as a VLAN and untags it.
*/
- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) &&
- (trxd.rxd2 & RX_DMA_VTAG) && netdev_uses_dsa(netdev)) {
+ if (mtk_is_netsys_v1(eth) && (trxd.rxd2 & RX_DMA_VTAG) &&
+ netdev_uses_dsa(netdev)) {
unsigned int port = RX_DMA_VPID(trxd.rxd3) & GENMASK(2, 0);
if (port < ARRAY_SIZE(eth->dsa_meta) &&
@@ -2161,7 +2254,6 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
while ((cpu != dma) && budget) {
u32 next_cpu = desc->txd2;
- int mac = 0;
desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
@@ -2169,15 +2261,13 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
tx_buf = mtk_desc_to_tx_buf(ring, desc,
eth->soc->txrx.txd_size);
- if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
- mac = 1;
-
if (!tx_buf->data)
break;
if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
if (tx_buf->type == MTK_TYPE_SKB)
- mtk_poll_tx_done(eth, state, mac, tx_buf->data);
+ mtk_poll_tx_done(eth, state, tx_buf->mac_id,
+ tx_buf->data);
budget--;
}
@@ -2367,7 +2457,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
txd->txd2 = next_ptr;
txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
txd->txd4 = 0;
- if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
+ if (mtk_is_netsys_v2_or_greater(eth)) {
txd->txd5 = 0;
txd->txd6 = 0;
txd->txd7 = 0;
@@ -2420,14 +2510,14 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v1(eth))
val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
ofs += MTK_QTX_OFFSET;
}
val = MTK_QDMA_TX_SCH_MAX_WFQ | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate);
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v2_or_greater(eth))
mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4);
} else {
mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
@@ -2556,7 +2646,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
rxd->rxd3 = 0;
rxd->rxd4 = 0;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ if (mtk_is_netsys_v2_or_greater(eth)) {
rxd->rxd5 = 0;
rxd->rxd6 = 0;
rxd->rxd7 = 0;
@@ -2978,7 +3068,7 @@ static void mtk_dma_free(struct mtk_eth *eth)
const struct mtk_soc_data *soc = eth->soc;
int i;
- for (i = 0; i < MTK_MAC_COUNT; i++)
+ for (i = 0; i < MTK_MAX_DEVS; i++)
if (eth->netdev[i])
netdev_reset_queue(eth->netdev[i]);
if (eth->scratch_ring) {
@@ -3104,7 +3194,7 @@ static int mtk_start_dma(struct mtk_eth *eth)
MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v2_or_greater(eth))
val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
MTK_CHK_DDONE_EN | MTK_LEAKY_BUCKET_EN;
@@ -3132,8 +3222,13 @@ static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
return;
- for (i = 0; i < MTK_MAC_COUNT; i++) {
- u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ u32 val;
+
+ if (!eth->netdev[i])
+ continue;
+
+ val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
/* default setup the forward port to send frame to PDMA */
val &= ~0xffff;
@@ -3143,7 +3238,7 @@ static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
val |= config;
- if (eth->netdev[i] && netdev_uses_dsa(eth->netdev[i]))
+ if (netdev_uses_dsa(eth->netdev[i]))
val |= MTK_GDMA_SPECIAL_TAG;
mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
@@ -3250,7 +3345,7 @@ static int mtk_open(struct net_device *dev)
phylink_start(mac->phylink);
netif_tx_start_all_queues(dev);
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v2_or_greater(eth))
return 0;
if (mtk_uses_dsa(dev) && !eth->prog) {
@@ -3516,7 +3611,7 @@ static void mtk_hw_reset(struct mtk_eth *eth)
{
u32 val;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ if (mtk_is_netsys_v2_or_greater(eth)) {
regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
val = RSTCTRL_PPE0_V2;
} else {
@@ -3528,7 +3623,7 @@ static void mtk_hw_reset(struct mtk_eth *eth)
ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v2_or_greater(eth))
regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
0x3ffffff);
}
@@ -3554,7 +3649,7 @@ static void mtk_hw_warm_reset(struct mtk_eth *eth)
return;
}
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v2_or_greater(eth))
rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V2;
else
rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0;
@@ -3724,7 +3819,7 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
else
mtk_hw_reset(eth);
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ if (mtk_is_netsys_v2_or_greater(eth)) {
/* Set FE to PDMAv2 if necessary */
val = mtk_r32(eth, MTK_FE_GLO_MISC);
mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
@@ -3745,15 +3840,15 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
* up with the more appropriate value when mtk_mac_config call is being
* invoked.
*/
- for (i = 0; i < MTK_MAC_COUNT; i++) {
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
struct net_device *dev = eth->netdev[i];
- mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
- if (dev) {
- struct mtk_mac *mac = netdev_priv(dev);
+ if (!dev)
+ continue;
- mtk_set_mcr_max_rx(mac, dev->mtu + MTK_RX_ETH_HLEN);
- }
+ mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
+ mtk_set_mcr_max_rx(netdev_priv(dev),
+ dev->mtu + MTK_RX_ETH_HLEN);
}
/* Indicates CDM to parse the MTK special tag from CPU
@@ -3761,7 +3856,7 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
*/
val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ if (mtk_is_netsys_v1(eth)) {
val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
@@ -3783,7 +3878,24 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ if (mtk_is_netsys_v3_or_greater(eth)) {
+ /* PSE should not drop port1, port8 and port9 packets */
+ mtk_w32(eth, 0x00000302, PSE_DROP_CFG);
+
+ /* GDM and CDM Threshold */
+ mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES);
+ mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES);
+
+ /* Disable GDM1 RX CRC stripping */
+ mtk_m32(eth, MTK_GDMA_STRP_CRC, 0, MTK_GDMA_FWD_CFG(0));
+
+ /* PSE GDM3 MIB counter has incorrect hw default values,
+ * so the driver ought to read clear the values beforehand
+ * in case ethtool retrieve wrong mib values.
+ */
+ for (i = 0; i < 0x80; i += 0x4)
+ mtk_r32(eth, reg_map->gdm1_cnt + 0x100 + i);
+ } else if (!mtk_is_netsys_v1(eth)) {
/* PSE should not drop port8 and port9 packets from WDMA Tx */
mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
@@ -3933,7 +4045,7 @@ static void mtk_pending_work(struct work_struct *work)
mtk_prepare_for_reset(eth);
/* stop all devices to make sure that dma is properly shut down */
- for (i = 0; i < MTK_MAC_COUNT; i++) {
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
if (!eth->netdev[i] || !netif_running(eth->netdev[i]))
continue;
@@ -3949,8 +4061,8 @@ static void mtk_pending_work(struct work_struct *work)
mtk_hw_init(eth, true);
/* restart DMA and enable IRQs */
- for (i = 0; i < MTK_MAC_COUNT; i++) {
- if (!test_bit(i, &restart))
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ if (!eth->netdev[i] || !test_bit(i, &restart))
continue;
if (mtk_open(eth->netdev[i])) {
@@ -3977,7 +4089,7 @@ static int mtk_free_dev(struct mtk_eth *eth)
{
int i;
- for (i = 0; i < MTK_MAC_COUNT; i++) {
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
if (!eth->netdev[i])
continue;
free_netdev(eth->netdev[i]);
@@ -3996,7 +4108,7 @@ static int mtk_unreg_dev(struct mtk_eth *eth)
{
int i;
- for (i = 0; i < MTK_MAC_COUNT; i++) {
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
struct mtk_mac *mac;
if (!eth->netdev[i])
continue;
@@ -4298,7 +4410,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
}
id = be32_to_cpup(_id);
- if (id >= MTK_MAC_COUNT) {
+ if (id >= MTK_MAX_DEVS) {
dev_err(eth->dev, "%d is not a valid mac id\n", id);
return -EINVAL;
}
@@ -4346,7 +4458,11 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
}
spin_lock_init(&mac->hw_stats->stats_lock);
u64_stats_init(&mac->hw_stats->syncp);
- mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
+
+ if (mtk_is_netsys_v3_or_greater(eth))
+ mac->hw_stats->reg_offset = id * 0x80;
+ else
+ mac->hw_stats->reg_offset = id * 0x40;
/* phylink create */
err = of_get_phy_mode(np, &phy_mode);
@@ -4361,18 +4477,22 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
mac->phylink_config.dev = &eth->netdev[id]->dev;
mac->phylink_config.type = PHYLINK_NETDEV;
- /* This driver makes use of state->speed in mac_config */
- mac->phylink_config.legacy_pre_march2020 = true;
mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
- __set_bit(PHY_INTERFACE_MODE_MII,
- mac->phylink_config.supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_GMII,
- mac->phylink_config.supported_interfaces);
+ /* MT7623 gmac0 is now missing its speed-specific PLL configuration
+ * in its .mac_config method (since state->speed is not valid there.
+ * Disable support for MII, GMII and RGMII.
+ */
+ if (!mac->hw->soc->disable_pll_modes || mac->id != 0) {
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ mac->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ mac->phylink_config.supported_interfaces);
- if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII))
- phy_interface_set_rgmii(mac->phylink_config.supported_interfaces);
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII))
+ phy_interface_set_rgmii(mac->phylink_config.supported_interfaces);
+ }
if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id)
__set_bit(PHY_INTERFACE_MODE_TRGMII,
@@ -4396,6 +4516,17 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
mac->phylink_config.supported_interfaces);
}
+ if (mtk_is_netsys_v3_or_greater(mac->hw) &&
+ MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW_BIT) &&
+ id == MTK_GMAC1_ID) {
+ mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
+ MAC_SYM_PAUSE |
+ MAC_10000FD;
+ phy_interface_zero(mac->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ mac->phylink_config.supported_interfaces);
+ }
+
phylink = phylink_create(&mac->phylink_config,
of_fwnode_handle(mac->of_node),
phy_mode, &mtk_phylink_ops);
@@ -4454,7 +4585,7 @@ void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
rtnl_lock();
- for (i = 0; i < MTK_MAC_COUNT; i++) {
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
dev = eth->netdev[i];
if (!dev || !(dev->flags & IFF_UP))
@@ -4584,7 +4715,7 @@ static int mtk_probe(struct platform_device *pdev)
}
}
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ if (mtk_is_netsys_v2_or_greater(eth)) {
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
err = -EINVAL;
@@ -4692,9 +4823,8 @@ static int mtk_probe(struct platform_device *pdev)
}
if (eth->soc->offload_version) {
- u32 num_ppe;
+ u32 num_ppe = mtk_is_netsys_v2_or_greater(eth) ? 2 : 1;
- num_ppe = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe);
for (i = 0; i < num_ppe; i++) {
u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
@@ -4761,7 +4891,7 @@ static int mtk_remove(struct platform_device *pdev)
int i;
/* stop all devices to make sure that dma is properly shut down */
- for (i = 0; i < MTK_MAC_COUNT; i++) {
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
if (!eth->netdev[i])
continue;
mtk_stop(eth->netdev[i]);
@@ -4786,6 +4916,7 @@ static const struct mtk_soc_data mt2701_data = {
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
+ .version = 1,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
@@ -4802,9 +4933,10 @@ static const struct mtk_soc_data mt7621_data = {
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7621_CLKS_BITMAP,
.required_pctl = false,
+ .version = 1,
.offload_version = 1,
.hash_offset = 2,
- .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
+ .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
@@ -4822,10 +4954,11 @@ static const struct mtk_soc_data mt7622_data = {
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7622_CLKS_BITMAP,
.required_pctl = false,
+ .version = 1,
.offload_version = 2,
.hash_offset = 2,
.has_accounting = true,
- .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
+ .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
@@ -4842,9 +4975,11 @@ static const struct mtk_soc_data mt7623_data = {
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
+ .version = 1,
.offload_version = 1,
.hash_offset = 2,
- .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
+ .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
+ .disable_pll_modes = true,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
@@ -4863,6 +4998,7 @@ static const struct mtk_soc_data mt7629_data = {
.required_clks = MT7629_CLKS_BITMAP,
.required_pctl = false,
.has_accounting = true,
+ .version = 1,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
@@ -4880,10 +5016,11 @@ static const struct mtk_soc_data mt7981_data = {
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7981_CLKS_BITMAP,
.required_pctl = false,
+ .version = 2,
.offload_version = 2,
.hash_offset = 4,
- .foe_entry_size = sizeof(struct mtk_foe_entry),
.has_accounting = true,
+ .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma_v2),
.rxd_size = sizeof(struct mtk_rx_dma_v2),
@@ -4901,10 +5038,29 @@ static const struct mtk_soc_data mt7986_data = {
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7986_CLKS_BITMAP,
.required_pctl = false,
+ .version = 2,
.offload_version = 2,
.hash_offset = 4,
- .foe_entry_size = sizeof(struct mtk_foe_entry),
.has_accounting = true,
+ .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma_v2),
+ .rxd_size = sizeof(struct mtk_rx_dma_v2),
+ .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ .dma_len_offset = 8,
+ },
+};
+
+static const struct mtk_soc_data mt7988_data = {
+ .reg_map = &mt7988_reg_map,
+ .ana_rgc3 = 0x128,
+ .caps = MT7988_CAPS,
+ .hw_features = MTK_HW_FEATURES,
+ .required_clks = MT7988_CLKS_BITMAP,
+ .required_pctl = false,
+ .version = 3,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma_v2),
.rxd_size = sizeof(struct mtk_rx_dma_v2),
@@ -4921,6 +5077,7 @@ static const struct mtk_soc_data rt5350_data = {
.hw_features = MTK_HW_FEATURES_MT7628,
.required_clks = MT7628_CLKS_BITMAP,
.required_pctl = false,
+ .version = 1,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
@@ -4932,14 +5089,15 @@ static const struct mtk_soc_data rt5350_data = {
};
const struct of_device_id of_mtk_match[] = {
- { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
- { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
- { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
- { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
- { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
- { .compatible = "mediatek,mt7981-eth", .data = &mt7981_data},
- { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
- { .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
+ { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data },
+ { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data },
+ { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data },
+ { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data },
+ { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data },
+ { .compatible = "mediatek,mt7981-eth", .data = &mt7981_data },
+ { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data },
+ { .compatible = "mediatek,mt7988-eth", .data = &mt7988_data },
+ { .compatible = "ralink,rt5350-eth", .data = &rt5350_data },
{},
};
MODULE_DEVICE_TABLE(of, of_mtk_match);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 707445f6bcb1..80d17729e557 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -33,7 +33,6 @@
#define MTK_TX_DMA_BUF_LEN_V2 0xffff
#define MTK_QDMA_RING_SIZE 2048
#define MTK_DMA_SIZE 512
-#define MTK_MAC_COUNT 2
#define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN)
#define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
#define MTK_DMA_DUMMY_DESC 0xffffffff
@@ -118,14 +117,21 @@
#define MTK_CDMP_EG_CTRL 0x404
/* GDM Exgress Control Register */
-#define MTK_GDMA_FWD_CFG(x) (0x500 + (x * 0x1000))
+#define MTK_GDMA_FWD_CFG(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \
+ 0x540 : 0x500 + (_x * 0x1000); })
#define MTK_GDMA_SPECIAL_TAG BIT(24)
#define MTK_GDMA_ICS_EN BIT(22)
#define MTK_GDMA_TCS_EN BIT(21)
#define MTK_GDMA_UCS_EN BIT(20)
+#define MTK_GDMA_STRP_CRC BIT(16)
#define MTK_GDMA_TO_PDMA 0x0
#define MTK_GDMA_DROP_ALL 0x7777
+/* GDM Egress Control Register */
+#define MTK_GDMA_EG_CTRL(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \
+ 0x544 : 0x504 + (_x * 0x1000); })
+#define MTK_GDMA_XGDM_SEL BIT(31)
+
/* Unicast Filter MAC Address Register - Low */
#define MTK_GDMA_MAC_ADRL(x) (0x508 + (x * 0x1000))
@@ -288,8 +294,6 @@
/* QDMA Interrupt grouping registers */
#define MTK_RLS_DONE_INT BIT(0)
-#define MTK_STAT_OFFSET 0x40
-
/* QDMA TX NUM */
#define QID_BITS_V2(x) (((x) & 0x3f) << 16)
#define MTK_QDMA_GMAC2_QID 8
@@ -302,6 +306,8 @@
#define TX_DMA_CHKSUM_V2 (0x7 << 28)
#define TX_DMA_TSO_V2 BIT(31)
+#define TX_DMA_SPTAG_V3 BIT(27)
+
/* QDMA V2 descriptor txd4 */
#define TX_DMA_FPORT_SHIFT_V2 8
#define TX_DMA_FPORT_MASK_V2 0xf
@@ -389,7 +395,26 @@
#define PHY_IAC_TIMEOUT HZ
#define MTK_MAC_MISC 0x1000c
+#define MTK_MAC_MISC_V3 0x10010
#define MTK_MUX_TO_ESW BIT(0)
+#define MISC_MDC_TURBO BIT(4)
+
+/* XMAC status registers */
+#define MTK_XGMAC_STS(x) (((x) == MTK_GMAC3_ID) ? 0x1001C : 0x1000C)
+#define MTK_XGMAC_FORCE_LINK(x) (((x) == MTK_GMAC2_ID) ? BIT(31) : BIT(15))
+#define MTK_USXGMII_PCS_LINK BIT(8)
+#define MTK_XGMAC_RX_FC BIT(5)
+#define MTK_XGMAC_TX_FC BIT(4)
+#define MTK_USXGMII_PCS_MODE GENMASK(3, 1)
+#define MTK_XGMAC_LINK_STS BIT(0)
+
+/* GSW bridge registers */
+#define MTK_GSW_CFG (0x10080)
+#define GSWTX_IPG_MASK GENMASK(19, 16)
+#define GSWTX_IPG_SHIFT 16
+#define GSWRX_IPG_MASK GENMASK(3, 0)
+#define GSWRX_IPG_SHIFT 0
+#define GSW_IPG_11 11
/* Mac control registers */
#define MTK_MAC_MCR(x) (0x10100 + (x * 0x100))
@@ -635,12 +660,6 @@ enum mtk_tx_flags {
*/
MTK_TX_FLAGS_SINGLE0 = 0x01,
MTK_TX_FLAGS_PAGE0 = 0x02,
-
- /* MTK_TX_FLAGS_FPORTx allows tracking which port the transmitted
- * SKB out instead of looking up through hardware TX descriptor.
- */
- MTK_TX_FLAGS_FPORT0 = 0x04,
- MTK_TX_FLAGS_FPORT1 = 0x08,
};
/* This enum allows us to identify how the clock is defined on the array of the
@@ -653,6 +672,11 @@ enum mtk_clks_map {
MTK_CLK_GP0,
MTK_CLK_GP1,
MTK_CLK_GP2,
+ MTK_CLK_GP3,
+ MTK_CLK_XGP1,
+ MTK_CLK_XGP2,
+ MTK_CLK_XGP3,
+ MTK_CLK_CRYPTO,
MTK_CLK_FE,
MTK_CLK_TRGPLL,
MTK_CLK_SGMII_TX_250M,
@@ -669,63 +693,145 @@ enum mtk_clks_map {
MTK_CLK_WOCPU1,
MTK_CLK_NETSYS0,
MTK_CLK_NETSYS1,
+ MTK_CLK_ETHWARP_WOCPU2,
+ MTK_CLK_ETHWARP_WOCPU1,
+ MTK_CLK_ETHWARP_WOCPU0,
+ MTK_CLK_TOP_USXGMII_SBUS_0_SEL,
+ MTK_CLK_TOP_USXGMII_SBUS_1_SEL,
+ MTK_CLK_TOP_SGM_0_SEL,
+ MTK_CLK_TOP_SGM_1_SEL,
+ MTK_CLK_TOP_XFI_PHY_0_XTAL_SEL,
+ MTK_CLK_TOP_XFI_PHY_1_XTAL_SEL,
+ MTK_CLK_TOP_ETH_GMII_SEL,
+ MTK_CLK_TOP_ETH_REFCK_50M_SEL,
+ MTK_CLK_TOP_ETH_SYS_200M_SEL,
+ MTK_CLK_TOP_ETH_SYS_SEL,
+ MTK_CLK_TOP_ETH_XGMII_SEL,
+ MTK_CLK_TOP_ETH_MII_SEL,
+ MTK_CLK_TOP_NETSYS_SEL,
+ MTK_CLK_TOP_NETSYS_500M_SEL,
+ MTK_CLK_TOP_NETSYS_PAO_2X_SEL,
+ MTK_CLK_TOP_NETSYS_SYNC_250M_SEL,
+ MTK_CLK_TOP_NETSYS_PPEFB_250M_SEL,
+ MTK_CLK_TOP_NETSYS_WARP_SEL,
MTK_CLK_MAX
};
-#define MT7623_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \
- BIT(MTK_CLK_GP1) | BIT(MTK_CLK_GP2) | \
- BIT(MTK_CLK_TRGPLL))
-#define MT7622_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \
- BIT(MTK_CLK_GP0) | BIT(MTK_CLK_GP1) | \
- BIT(MTK_CLK_GP2) | \
- BIT(MTK_CLK_SGMII_TX_250M) | \
- BIT(MTK_CLK_SGMII_RX_250M) | \
- BIT(MTK_CLK_SGMII_CDR_REF) | \
- BIT(MTK_CLK_SGMII_CDR_FB) | \
- BIT(MTK_CLK_SGMII_CK) | \
- BIT(MTK_CLK_ETH2PLL))
+#define MT7623_CLKS_BITMAP (BIT_ULL(MTK_CLK_ETHIF) | BIT_ULL(MTK_CLK_ESW) | \
+ BIT_ULL(MTK_CLK_GP1) | BIT_ULL(MTK_CLK_GP2) | \
+ BIT_ULL(MTK_CLK_TRGPLL))
+#define MT7622_CLKS_BITMAP (BIT_ULL(MTK_CLK_ETHIF) | BIT_ULL(MTK_CLK_ESW) | \
+ BIT_ULL(MTK_CLK_GP0) | BIT_ULL(MTK_CLK_GP1) | \
+ BIT_ULL(MTK_CLK_GP2) | \
+ BIT_ULL(MTK_CLK_SGMII_TX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII_RX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII_CDR_REF) | \
+ BIT_ULL(MTK_CLK_SGMII_CDR_FB) | \
+ BIT_ULL(MTK_CLK_SGMII_CK) | \
+ BIT_ULL(MTK_CLK_ETH2PLL))
#define MT7621_CLKS_BITMAP (0)
#define MT7628_CLKS_BITMAP (0)
-#define MT7629_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \
- BIT(MTK_CLK_GP0) | BIT(MTK_CLK_GP1) | \
- BIT(MTK_CLK_GP2) | BIT(MTK_CLK_FE) | \
- BIT(MTK_CLK_SGMII_TX_250M) | \
- BIT(MTK_CLK_SGMII_RX_250M) | \
- BIT(MTK_CLK_SGMII_CDR_REF) | \
- BIT(MTK_CLK_SGMII_CDR_FB) | \
- BIT(MTK_CLK_SGMII2_TX_250M) | \
- BIT(MTK_CLK_SGMII2_RX_250M) | \
- BIT(MTK_CLK_SGMII2_CDR_REF) | \
- BIT(MTK_CLK_SGMII2_CDR_FB) | \
- BIT(MTK_CLK_SGMII_CK) | \
- BIT(MTK_CLK_ETH2PLL) | BIT(MTK_CLK_SGMIITOP))
-#define MT7981_CLKS_BITMAP (BIT(MTK_CLK_FE) | BIT(MTK_CLK_GP2) | BIT(MTK_CLK_GP1) | \
- BIT(MTK_CLK_WOCPU0) | \
- BIT(MTK_CLK_SGMII_TX_250M) | \
- BIT(MTK_CLK_SGMII_RX_250M) | \
- BIT(MTK_CLK_SGMII_CDR_REF) | \
- BIT(MTK_CLK_SGMII_CDR_FB) | \
- BIT(MTK_CLK_SGMII2_TX_250M) | \
- BIT(MTK_CLK_SGMII2_RX_250M) | \
- BIT(MTK_CLK_SGMII2_CDR_REF) | \
- BIT(MTK_CLK_SGMII2_CDR_FB) | \
- BIT(MTK_CLK_SGMII_CK))
-#define MT7986_CLKS_BITMAP (BIT(MTK_CLK_FE) | BIT(MTK_CLK_GP2) | BIT(MTK_CLK_GP1) | \
- BIT(MTK_CLK_WOCPU1) | BIT(MTK_CLK_WOCPU0) | \
- BIT(MTK_CLK_SGMII_TX_250M) | \
- BIT(MTK_CLK_SGMII_RX_250M) | \
- BIT(MTK_CLK_SGMII_CDR_REF) | \
- BIT(MTK_CLK_SGMII_CDR_FB) | \
- BIT(MTK_CLK_SGMII2_TX_250M) | \
- BIT(MTK_CLK_SGMII2_RX_250M) | \
- BIT(MTK_CLK_SGMII2_CDR_REF) | \
- BIT(MTK_CLK_SGMII2_CDR_FB))
+#define MT7629_CLKS_BITMAP (BIT_ULL(MTK_CLK_ETHIF) | BIT_ULL(MTK_CLK_ESW) | \
+ BIT_ULL(MTK_CLK_GP0) | BIT_ULL(MTK_CLK_GP1) | \
+ BIT_ULL(MTK_CLK_GP2) | BIT_ULL(MTK_CLK_FE) | \
+ BIT_ULL(MTK_CLK_SGMII_TX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII_RX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII_CDR_REF) | \
+ BIT_ULL(MTK_CLK_SGMII_CDR_FB) | \
+ BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII2_CDR_REF) | \
+ BIT_ULL(MTK_CLK_SGMII2_CDR_FB) | \
+ BIT_ULL(MTK_CLK_SGMII_CK) | \
+ BIT_ULL(MTK_CLK_ETH2PLL) | BIT_ULL(MTK_CLK_SGMIITOP))
+#define MT7981_CLKS_BITMAP (BIT_ULL(MTK_CLK_FE) | BIT_ULL(MTK_CLK_GP2) | \
+ BIT_ULL(MTK_CLK_GP1) | \
+ BIT_ULL(MTK_CLK_WOCPU0) | \
+ BIT_ULL(MTK_CLK_SGMII_TX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII_RX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII_CDR_REF) | \
+ BIT_ULL(MTK_CLK_SGMII_CDR_FB) | \
+ BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII2_CDR_REF) | \
+ BIT_ULL(MTK_CLK_SGMII2_CDR_FB) | \
+ BIT_ULL(MTK_CLK_SGMII_CK))
+#define MT7986_CLKS_BITMAP (BIT_ULL(MTK_CLK_FE) | BIT_ULL(MTK_CLK_GP2) | \
+ BIT_ULL(MTK_CLK_GP1) | \
+ BIT_ULL(MTK_CLK_WOCPU1) | BIT_ULL(MTK_CLK_WOCPU0) | \
+ BIT_ULL(MTK_CLK_SGMII_TX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII_RX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII_CDR_REF) | \
+ BIT_ULL(MTK_CLK_SGMII_CDR_FB) | \
+ BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII2_CDR_REF) | \
+ BIT_ULL(MTK_CLK_SGMII2_CDR_FB))
+#define MT7988_CLKS_BITMAP (BIT_ULL(MTK_CLK_FE) | BIT_ULL(MTK_CLK_ESW) | \
+ BIT_ULL(MTK_CLK_GP1) | BIT_ULL(MTK_CLK_GP2) | \
+ BIT_ULL(MTK_CLK_GP3) | BIT_ULL(MTK_CLK_XGP1) | \
+ BIT_ULL(MTK_CLK_XGP2) | BIT_ULL(MTK_CLK_XGP3) | \
+ BIT_ULL(MTK_CLK_CRYPTO) | \
+ BIT_ULL(MTK_CLK_SGMII_TX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII_RX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \
+ BIT_ULL(MTK_CLK_ETHWARP_WOCPU2) | \
+ BIT_ULL(MTK_CLK_ETHWARP_WOCPU1) | \
+ BIT_ULL(MTK_CLK_ETHWARP_WOCPU0) | \
+ BIT_ULL(MTK_CLK_TOP_USXGMII_SBUS_0_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_USXGMII_SBUS_1_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_SGM_0_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_SGM_1_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_XFI_PHY_0_XTAL_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_XFI_PHY_1_XTAL_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_ETH_GMII_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_ETH_REFCK_50M_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_ETH_SYS_200M_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_ETH_SYS_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_ETH_XGMII_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_ETH_MII_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_NETSYS_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_NETSYS_500M_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_NETSYS_PAO_2X_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_NETSYS_SYNC_250M_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_NETSYS_PPEFB_250M_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_NETSYS_WARP_SEL))
enum mtk_dev_state {
MTK_HW_INIT,
MTK_RESETTING
};
+/* PSE Port Definition */
+enum mtk_pse_port {
+ PSE_ADMA_PORT = 0,
+ PSE_GDM1_PORT,
+ PSE_GDM2_PORT,
+ PSE_PPE0_PORT,
+ PSE_PPE1_PORT,
+ PSE_QDMA_TX_PORT,
+ PSE_QDMA_RX_PORT,
+ PSE_DROP_PORT,
+ PSE_WDMA0_PORT,
+ PSE_WDMA1_PORT,
+ PSE_TDMA_PORT,
+ PSE_NONE_PORT,
+ PSE_PPE2_PORT,
+ PSE_WDMA2_PORT,
+ PSE_EIP197_PORT,
+ PSE_GDM3_PORT,
+ PSE_PORT_MAX
+};
+
+/* GMAC Identifier */
+enum mtk_gmac_id {
+ MTK_GMAC1_ID = 0,
+ MTK_GMAC2_ID,
+ MTK_GMAC3_ID,
+ MTK_GMAC_ID_MAX
+};
+
enum mtk_tx_buf_type {
MTK_TYPE_SKB,
MTK_TYPE_XDP_TX,
@@ -744,7 +850,8 @@ struct mtk_tx_buf {
enum mtk_tx_buf_type type;
void *data;
- u32 flags;
+ u16 mac_id;
+ u16 flags;
DEFINE_DMA_UNMAP_ADDR(dma_addr0);
DEFINE_DMA_UNMAP_LEN(dma_len0);
DEFINE_DMA_UNMAP_ADDR(dma_addr1);
@@ -820,7 +927,6 @@ enum mkt_eth_capabilities {
MTK_SHARED_INT_BIT,
MTK_TRGMII_MT7621_CLK_BIT,
MTK_QDMA_BIT,
- MTK_NETSYS_V2_BIT,
MTK_SOC_MT7628_BIT,
MTK_RSTCTRL_PPE1_BIT,
MTK_U3_COPHY_V2_BIT,
@@ -843,42 +949,41 @@ enum mkt_eth_capabilities {
};
/* Supported hardware group on SoCs */
-#define MTK_RGMII BIT(MTK_RGMII_BIT)
-#define MTK_TRGMII BIT(MTK_TRGMII_BIT)
-#define MTK_SGMII BIT(MTK_SGMII_BIT)
-#define MTK_ESW BIT(MTK_ESW_BIT)
-#define MTK_GEPHY BIT(MTK_GEPHY_BIT)
-#define MTK_MUX BIT(MTK_MUX_BIT)
-#define MTK_INFRA BIT(MTK_INFRA_BIT)
-#define MTK_SHARED_SGMII BIT(MTK_SHARED_SGMII_BIT)
-#define MTK_HWLRO BIT(MTK_HWLRO_BIT)
-#define MTK_SHARED_INT BIT(MTK_SHARED_INT_BIT)
-#define MTK_TRGMII_MT7621_CLK BIT(MTK_TRGMII_MT7621_CLK_BIT)
-#define MTK_QDMA BIT(MTK_QDMA_BIT)
-#define MTK_NETSYS_V2 BIT(MTK_NETSYS_V2_BIT)
-#define MTK_SOC_MT7628 BIT(MTK_SOC_MT7628_BIT)
-#define MTK_RSTCTRL_PPE1 BIT(MTK_RSTCTRL_PPE1_BIT)
-#define MTK_U3_COPHY_V2 BIT(MTK_U3_COPHY_V2_BIT)
+#define MTK_RGMII BIT_ULL(MTK_RGMII_BIT)
+#define MTK_TRGMII BIT_ULL(MTK_TRGMII_BIT)
+#define MTK_SGMII BIT_ULL(MTK_SGMII_BIT)
+#define MTK_ESW BIT_ULL(MTK_ESW_BIT)
+#define MTK_GEPHY BIT_ULL(MTK_GEPHY_BIT)
+#define MTK_MUX BIT_ULL(MTK_MUX_BIT)
+#define MTK_INFRA BIT_ULL(MTK_INFRA_BIT)
+#define MTK_SHARED_SGMII BIT_ULL(MTK_SHARED_SGMII_BIT)
+#define MTK_HWLRO BIT_ULL(MTK_HWLRO_BIT)
+#define MTK_SHARED_INT BIT_ULL(MTK_SHARED_INT_BIT)
+#define MTK_TRGMII_MT7621_CLK BIT_ULL(MTK_TRGMII_MT7621_CLK_BIT)
+#define MTK_QDMA BIT_ULL(MTK_QDMA_BIT)
+#define MTK_SOC_MT7628 BIT_ULL(MTK_SOC_MT7628_BIT)
+#define MTK_RSTCTRL_PPE1 BIT_ULL(MTK_RSTCTRL_PPE1_BIT)
+#define MTK_U3_COPHY_V2 BIT_ULL(MTK_U3_COPHY_V2_BIT)
#define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW \
- BIT(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT)
+ BIT_ULL(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT)
#define MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY \
- BIT(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT)
+ BIT_ULL(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT)
#define MTK_ETH_MUX_U3_GMAC2_TO_QPHY \
- BIT(MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT)
+ BIT_ULL(MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT)
#define MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII \
- BIT(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT)
+ BIT_ULL(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT)
#define MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII \
- BIT(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT)
+ BIT_ULL(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT)
/* Supported path present on SoCs */
-#define MTK_ETH_PATH_GMAC1_RGMII BIT(MTK_ETH_PATH_GMAC1_RGMII_BIT)
-#define MTK_ETH_PATH_GMAC1_TRGMII BIT(MTK_ETH_PATH_GMAC1_TRGMII_BIT)
-#define MTK_ETH_PATH_GMAC1_SGMII BIT(MTK_ETH_PATH_GMAC1_SGMII_BIT)
-#define MTK_ETH_PATH_GMAC2_RGMII BIT(MTK_ETH_PATH_GMAC2_RGMII_BIT)
-#define MTK_ETH_PATH_GMAC2_SGMII BIT(MTK_ETH_PATH_GMAC2_SGMII_BIT)
-#define MTK_ETH_PATH_GMAC2_GEPHY BIT(MTK_ETH_PATH_GMAC2_GEPHY_BIT)
-#define MTK_ETH_PATH_GDM1_ESW BIT(MTK_ETH_PATH_GDM1_ESW_BIT)
+#define MTK_ETH_PATH_GMAC1_RGMII BIT_ULL(MTK_ETH_PATH_GMAC1_RGMII_BIT)
+#define MTK_ETH_PATH_GMAC1_TRGMII BIT_ULL(MTK_ETH_PATH_GMAC1_TRGMII_BIT)
+#define MTK_ETH_PATH_GMAC1_SGMII BIT_ULL(MTK_ETH_PATH_GMAC1_SGMII_BIT)
+#define MTK_ETH_PATH_GMAC2_RGMII BIT_ULL(MTK_ETH_PATH_GMAC2_RGMII_BIT)
+#define MTK_ETH_PATH_GMAC2_SGMII BIT_ULL(MTK_ETH_PATH_GMAC2_SGMII_BIT)
+#define MTK_ETH_PATH_GMAC2_GEPHY BIT_ULL(MTK_ETH_PATH_GMAC2_GEPHY_BIT)
+#define MTK_ETH_PATH_GDM1_ESW BIT_ULL(MTK_ETH_PATH_GDM1_ESW_BIT)
#define MTK_GMAC1_RGMII (MTK_ETH_PATH_GMAC1_RGMII | MTK_RGMII)
#define MTK_GMAC1_TRGMII (MTK_ETH_PATH_GMAC1_TRGMII | MTK_TRGMII)
@@ -934,11 +1039,13 @@ enum mkt_eth_capabilities {
#define MT7981_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \
MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
MTK_MUX_U3_GMAC2_TO_QPHY | MTK_U3_COPHY_V2 | \
- MTK_NETSYS_V2 | MTK_RSTCTRL_PPE1)
+ MTK_RSTCTRL_PPE1)
#define MT7986_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | \
MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
- MTK_NETSYS_V2 | MTK_RSTCTRL_PPE1)
+ MTK_RSTCTRL_PPE1)
+
+#define MT7988_CAPS (MTK_GDM1_ESW | MTK_QDMA | MTK_RSTCTRL_PPE1)
struct mtk_tx_dma_desc_info {
dma_addr_t addr;
@@ -1009,6 +1116,7 @@ struct mtk_reg_map {
* @required_pctl A bool value to show whether the SoC requires
* the extra setup for those pins used by GMAC.
* @hash_offset Flow table hash offset.
+ * @version SoC version.
* @foe_entry_size Foe table entry size.
* @has_accounting Bool indicating support for accounting of
* offloaded flows.
@@ -1022,14 +1130,16 @@ struct mtk_reg_map {
struct mtk_soc_data {
const struct mtk_reg_map *reg_map;
u32 ana_rgc3;
- u32 caps;
- u32 required_clks;
+ u64 caps;
+ u64 required_clks;
bool required_pctl;
u8 offload_version;
u8 hash_offset;
+ u8 version;
u16 foe_entry_size;
netdev_features_t hw_features;
bool has_accounting;
+ bool disable_pll_modes;
struct {
u32 txd_size;
u32 rxd_size;
@@ -1042,8 +1152,8 @@ struct mtk_soc_data {
#define MTK_DMA_MONITOR_TIMEOUT msecs_to_jiffies(1000)
-/* currently no SoC has more than 2 macs */
-#define MTK_MAX_DEVS 2
+/* currently no SoC has more than 3 macs */
+#define MTK_MAX_DEVS 3
/* struct mtk_eth - This is the main datasructure for holding the state
* of the driver
@@ -1182,6 +1292,21 @@ struct mtk_mac {
/* the struct describing the SoC. these are declared in the soc_xyz.c files */
extern const struct of_device_id of_mtk_match[];
+static inline bool mtk_is_netsys_v1(struct mtk_eth *eth)
+{
+ return eth->soc->version == 1;
+}
+
+static inline bool mtk_is_netsys_v2_or_greater(struct mtk_eth *eth)
+{
+ return eth->soc->version > 1;
+}
+
+static inline bool mtk_is_netsys_v3_or_greater(struct mtk_eth *eth)
+{
+ return eth->soc->version > 2;
+}
+
static inline struct mtk_foe_entry *
mtk_foe_get_entry(struct mtk_ppe *ppe, u16 hash)
{
@@ -1192,7 +1317,7 @@ mtk_foe_get_entry(struct mtk_ppe *ppe, u16 hash)
static inline u32 mtk_get_ib1_ts_mask(struct mtk_eth *eth)
{
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v2_or_greater(eth))
return MTK_FOE_IB1_BIND_TIMESTAMP_V2;
return MTK_FOE_IB1_BIND_TIMESTAMP;
@@ -1200,7 +1325,7 @@ static inline u32 mtk_get_ib1_ts_mask(struct mtk_eth *eth)
static inline u32 mtk_get_ib1_ppoe_mask(struct mtk_eth *eth)
{
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v2_or_greater(eth))
return MTK_FOE_IB1_BIND_PPPOE_V2;
return MTK_FOE_IB1_BIND_PPPOE;
@@ -1208,7 +1333,7 @@ static inline u32 mtk_get_ib1_ppoe_mask(struct mtk_eth *eth)
static inline u32 mtk_get_ib1_vlan_tag_mask(struct mtk_eth *eth)
{
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v2_or_greater(eth))
return MTK_FOE_IB1_BIND_VLAN_TAG_V2;
return MTK_FOE_IB1_BIND_VLAN_TAG;
@@ -1216,7 +1341,7 @@ static inline u32 mtk_get_ib1_vlan_tag_mask(struct mtk_eth *eth)
static inline u32 mtk_get_ib1_vlan_layer_mask(struct mtk_eth *eth)
{
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v2_or_greater(eth))
return MTK_FOE_IB1_BIND_VLAN_LAYER_V2;
return MTK_FOE_IB1_BIND_VLAN_LAYER;
@@ -1224,7 +1349,7 @@ static inline u32 mtk_get_ib1_vlan_layer_mask(struct mtk_eth *eth)
static inline u32 mtk_prep_ib1_vlan_layer(struct mtk_eth *eth, u32 val)
{
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v2_or_greater(eth))
return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val);
return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, val);
@@ -1232,7 +1357,7 @@ static inline u32 mtk_prep_ib1_vlan_layer(struct mtk_eth *eth, u32 val)
static inline u32 mtk_get_ib1_vlan_layer(struct mtk_eth *eth, u32 val)
{
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v2_or_greater(eth))
return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val);
return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, val);
@@ -1240,7 +1365,7 @@ static inline u32 mtk_get_ib1_vlan_layer(struct mtk_eth *eth, u32 val)
static inline u32 mtk_get_ib1_pkt_type_mask(struct mtk_eth *eth)
{
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v2_or_greater(eth))
return MTK_FOE_IB1_PACKET_TYPE_V2;
return MTK_FOE_IB1_PACKET_TYPE;
@@ -1248,7 +1373,7 @@ static inline u32 mtk_get_ib1_pkt_type_mask(struct mtk_eth *eth)
static inline u32 mtk_get_ib1_pkt_type(struct mtk_eth *eth, u32 val)
{
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v2_or_greater(eth))
return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE_V2, val);
return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, val);
@@ -1256,7 +1381,7 @@ static inline u32 mtk_get_ib1_pkt_type(struct mtk_eth *eth, u32 val)
static inline u32 mtk_get_ib2_multicast_mask(struct mtk_eth *eth)
{
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v2_or_greater(eth))
return MTK_FOE_IB2_MULTICAST_V2;
return MTK_FOE_IB2_MULTICAST;
@@ -1267,6 +1392,7 @@ void mtk_stats_update_mac(struct mtk_mac *mac);
void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg);
u32 mtk_r32(struct mtk_eth *eth, unsigned reg);
+u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg);
int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id);
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index 9129821f3ab8..2f0e682449ef 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -208,7 +208,7 @@ int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry,
memset(entry, 0, sizeof(*entry));
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ if (mtk_is_netsys_v2_or_greater(eth)) {
val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE_V2, type) |
FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
@@ -272,7 +272,7 @@ int mtk_foe_entry_set_pse_port(struct mtk_eth *eth,
u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
u32 val = *ib2;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ if (mtk_is_netsys_v2_or_greater(eth)) {
val &= ~MTK_FOE_IB2_DEST_PORT_V2;
val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, port);
} else {
@@ -423,7 +423,7 @@ int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ if (mtk_is_netsys_v2_or_greater(eth)) {
*ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
*ib2 |= FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) |
MTK_FOE_IB2_WDMA_WINFO_V2;
@@ -447,7 +447,7 @@ int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry,
{
u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ if (mtk_is_netsys_v2_or_greater(eth)) {
*ib2 &= ~MTK_FOE_IB2_QID_V2;
*ib2 |= FIELD_PREP(MTK_FOE_IB2_QID_V2, queue);
*ib2 |= MTK_FOE_IB2_PSE_QOS_V2;
@@ -603,7 +603,7 @@ __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
struct mtk_foe_entry *hwe;
u32 val;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ if (mtk_is_netsys_v2_or_greater(eth)) {
entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP_V2;
entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP_V2,
timestamp);
@@ -619,7 +619,7 @@ __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
hwe->ib1 = entry->ib1;
if (ppe->accounting) {
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v2_or_greater(eth))
val = MTK_FOE_IB2_MIB_CNT_V2;
else
val = MTK_FOE_IB2_MIB_CNT;
@@ -979,7 +979,7 @@ void mtk_ppe_start(struct mtk_ppe *ppe)
MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
MTK_PPE_ENTRIES_SHIFT);
- if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v2_or_greater(ppe->eth))
val |= MTK_PPE_TB_CFG_INFO_SEL;
ppe_w32(ppe, MTK_PPE_TB_CFG, val);
@@ -995,7 +995,7 @@ void mtk_ppe_start(struct mtk_ppe *ppe)
MTK_PPE_FLOW_CFG_IP4_NAPT |
MTK_PPE_FLOW_CFG_IP4_DSLITE |
MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
- if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v2_or_greater(ppe->eth))
val |= MTK_PPE_MD_TOAP_BYP_CRSN0 |
MTK_PPE_MD_TOAP_BYP_CRSN1 |
MTK_PPE_MD_TOAP_BYP_CRSN2 |
@@ -1037,7 +1037,7 @@ void mtk_ppe_start(struct mtk_ppe *ppe)
ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
- if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2)) {
+ if (mtk_is_netsys_v2_or_greater(ppe->eth)) {
ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT1, 0xcb777);
ppe_w32(ppe, MTK_PPE_SBW_CTRL, 0x7f);
}
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
index e51de31a52ec..fb6bf58172d9 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
@@ -216,6 +216,9 @@ struct mtk_foe_ipv6_6rd {
struct mtk_foe_mac_info l2;
};
+#define MTK_FOE_ENTRY_V1_SIZE 80
+#define MTK_FOE_ENTRY_V2_SIZE 96
+
struct mtk_foe_entry {
u32 ib1;
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index 02eebff02d45..a70a5417c173 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -193,7 +193,7 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
mtk_foe_entry_set_wdma(eth, foe, info.wdma_idx, info.queue,
info.bss, info.wcid);
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ if (mtk_is_netsys_v2_or_greater(eth)) {
switch (info.wdma_idx) {
case 0:
pse_port = 8;
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index 985cff910f30..5f062ecb402c 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -1091,7 +1091,7 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev)
} else {
struct mtk_eth *eth = dev->hw->eth;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v2_or_greater(eth))
wed_set(dev, MTK_WED_RESET_IDX,
MTK_WED_RESET_IDX_RX_V2);
else
@@ -1907,7 +1907,7 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
hw->wdma = wdma;
hw->index = index;
hw->irq = irq;
- hw->version = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
+ hw->version = mtk_is_netsys_v1(eth) ? 1 : 2;
if (hw->version == 1) {
hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index f1716a83a4d3..24d0c7c46878 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -294,7 +294,7 @@ static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
struct mlx4_promisc_qp *dqp, *tmp_dqp;
if (port < 1 || port > dev->caps.num_ports)
- return NULL;
+ return false;
s_steer = &mlx4_priv(dev)->steer[port - 1];
@@ -375,7 +375,7 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
bool ret = false;
if (port < 1 || port > dev->caps.num_ports)
- return NULL;
+ return false;
s_steer = &mlx4_priv(dev)->steer[port - 1];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index 1874c2f0587f..244bc15a42ab 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -379,9 +379,9 @@ int mlx5e_htb_setup_tc(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb_
if (!htb && htb_qopt->command != TC_HTB_CREATE)
return -EINVAL;
- if (htb_qopt->prio) {
+ if (htb_qopt->prio || htb_qopt->quantum) {
NL_SET_ERR_MSG_MOD(htb_qopt->extack,
- "prio parameter is not supported by device with HTB offload enabled.");
+ "prio and quantum parameters are not supported by device with HTB offload enabled.");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 891d39b4bfd4..658b7d8d50c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -354,6 +354,12 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
mlx5e_ipsec_init_limits(sa_entry, attrs);
mlx5e_ipsec_init_macs(sa_entry, attrs);
+
+ if (x->encap) {
+ attrs->encap = true;
+ attrs->sport = x->encap->encap_sport;
+ attrs->dport = x->encap->encap_dport;
+ }
}
static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
@@ -387,8 +393,25 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
return -EINVAL;
}
if (x->encap) {
- NL_SET_ERR_MSG_MOD(extack, "Encapsulated xfrm state may not be offloaded");
- return -EINVAL;
+ if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ESPINUDP)) {
+ NL_SET_ERR_MSG_MOD(extack, "Encapsulation is not supported");
+ return -EINVAL;
+ }
+
+ if (x->encap->encap_type != UDP_ENCAP_ESPINUDP) {
+ NL_SET_ERR_MSG_MOD(extack, "Encapsulation other than UDP is not supported");
+ return -EINVAL;
+ }
+
+ if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET) {
+ NL_SET_ERR_MSG_MOD(extack, "Encapsulation is supported in packet offload mode only");
+ return -EINVAL;
+ }
+
+ if (x->props.mode != XFRM_MODE_TRANSPORT) {
+ NL_SET_ERR_MSG_MOD(extack, "Encapsulation is supported in transport mode only");
+ return -EINVAL;
+ }
}
if (!x->aead) {
NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without aead");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 4e9887171508..7a7047263618 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -94,13 +94,20 @@ struct mlx5_accel_esp_xfrm_attrs {
u8 dir : 2;
u8 type : 2;
u8 drop : 1;
+ u8 encap : 1;
u8 family;
struct mlx5_replay_esn replay_esn;
u32 authsize;
u32 reqid;
struct mlx5_ipsec_lft lft;
- u8 smac[ETH_ALEN];
- u8 dmac[ETH_ALEN];
+ union {
+ u8 smac[ETH_ALEN];
+ __be16 sport;
+ };
+ union {
+ u8 dmac[ETH_ALEN];
+ __be16 dport;
+ };
};
enum mlx5_ipsec_cap {
@@ -110,6 +117,7 @@ enum mlx5_ipsec_cap {
MLX5_IPSEC_CAP_ROCE = 1 << 3,
MLX5_IPSEC_CAP_PRIO = 1 << 4,
MLX5_IPSEC_CAP_TUNNEL = 1 << 5,
+ MLX5_IPSEC_CAP_ESPINUDP = 1 << 6,
};
struct mlx5e_priv;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index dbe87bf89c0d..47baf983147f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -951,37 +951,70 @@ free_reformatbf:
return -EINVAL;
}
+static int get_reformat_type(struct mlx5_accel_esp_xfrm_attrs *attrs)
+{
+ switch (attrs->dir) {
+ case XFRM_DEV_OFFLOAD_IN:
+ if (attrs->encap)
+ return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP;
+ return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT;
+ case XFRM_DEV_OFFLOAD_OUT:
+ if (attrs->family == AF_INET) {
+ if (attrs->encap)
+ return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV4;
+ return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4;
+ }
+
+ if (attrs->encap)
+ return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV6;
+ return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6;
+ default:
+ WARN_ON(true);
+ }
+
+ return -EINVAL;
+}
+
static int
setup_pkt_transport_reformat(struct mlx5_accel_esp_xfrm_attrs *attrs,
struct mlx5_pkt_reformat_params *reformat_params)
{
- u8 *reformatbf;
+ struct udphdr *udphdr;
+ char *reformatbf;
+ size_t bfflen;
__be32 spi;
+ void *hdr;
+
+ reformat_params->type = get_reformat_type(attrs);
+ if (reformat_params->type < 0)
+ return reformat_params->type;
switch (attrs->dir) {
case XFRM_DEV_OFFLOAD_IN:
- reformat_params->type = MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT;
break;
case XFRM_DEV_OFFLOAD_OUT:
- if (attrs->family == AF_INET)
- reformat_params->type =
- MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4;
- else
- reformat_params->type =
- MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6;
-
- reformatbf = kzalloc(MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE,
- GFP_KERNEL);
+ bfflen = MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE;
+ if (attrs->encap)
+ bfflen += sizeof(*udphdr);
+
+ reformatbf = kzalloc(bfflen, GFP_KERNEL);
if (!reformatbf)
return -ENOMEM;
+ hdr = reformatbf;
+ if (attrs->encap) {
+ udphdr = (struct udphdr *)reformatbf;
+ udphdr->source = attrs->sport;
+ udphdr->dest = attrs->dport;
+ hdr += sizeof(*udphdr);
+ }
+
/* convert to network format */
spi = htonl(attrs->spi);
- memcpy(reformatbf, &spi, sizeof(spi));
+ memcpy(hdr, &spi, sizeof(spi));
reformat_params->param_0 = attrs->authsize;
- reformat_params->size =
- MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE;
+ reformat_params->size = bfflen;
reformat_params->data = reformatbf;
break;
default:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index a3554bde3e07..5ff06263c5bd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -54,6 +54,12 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
reformat_l3_esp_tunnel_to_l2))
caps |= MLX5_IPSEC_CAP_TUNNEL;
+
+ if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
+ reformat_add_esp_transport_over_udp) &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ reformat_del_esp_transport_over_udp))
+ caps |= MLX5_IPSEC_CAP_ESPINUDP;
}
if (mlx5_get_roce_state(mdev) &&
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 3ca9fce759ea..71cad6bb6e62 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -29,7 +29,7 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_nve.o spectrum_nve_vxlan.o \
spectrum_dpipe.o spectrum_trap.o \
spectrum_ethtool.o spectrum_policer.o \
- spectrum_pgt.o
+ spectrum_pgt.o spectrum_port_range.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
mlxsw_spectrum-$(CONFIG_PTP_1588_CLOCK) += spectrum_ptp.o
obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
index f0b2963ebac3..7870327d921b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
@@ -43,6 +43,7 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_32_63, 0x38, 4),
MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_0_31, 0x3C, 4),
MLXSW_AFK_ELEMENT_INFO_U32(FDB_MISS, 0x40, 0, 1),
+ MLXSW_AFK_ELEMENT_INFO_U32(L4_PORT_RANGE, 0x40, 1, 16),
};
struct mlxsw_afk {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
index 65a4abadc7db..2eac7582c31a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
@@ -36,6 +36,7 @@ enum mlxsw_afk_element {
MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB,
MLXSW_AFK_ELEMENT_VIRT_ROUTER_LSB,
MLXSW_AFK_ELEMENT_FDB_MISS,
+ MLXSW_AFK_ELEMENT_L4_PORT_RANGE,
MLXSW_AFK_ELEMENT_MAX,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index 0107cbc32fc7..d637c0348fa1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -32,6 +32,7 @@ struct mlxsw_env {
const struct mlxsw_bus_info *bus_info;
u8 max_module_count; /* Maximum number of modules per-slot. */
u8 num_of_slots; /* Including the main board. */
+ u8 max_eeprom_len; /* Maximum module EEPROM transaction length. */
struct mutex line_cards_lock; /* Protects line cards. */
struct mlxsw_env_line_card *line_cards[];
};
@@ -111,7 +112,7 @@ mlxsw_env_validate_cable_ident(struct mlxsw_core *core, u8 slot_index, int id,
if (err)
return err;
- mlxsw_reg_mcia_pack(mcia_pl, slot_index, id, 0,
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, id,
MLXSW_REG_MCIA_PAGE0_LO_OFF, 0, 1,
MLXSW_REG_MCIA_I2C_ADDR_LOW);
err = mlxsw_reg_query(core, MLXSW_REG(mcia), mcia_pl);
@@ -146,6 +147,7 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, u8 slot_index,
int module, u16 offset, u16 size, void *data,
bool qsfp, unsigned int *p_read_size)
{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
char mcia_pl[MLXSW_REG_MCIA_LEN];
char *eeprom_tmp;
u16 i2c_addr;
@@ -153,11 +155,7 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, u8 slot_index,
int status;
int err;
- /* MCIA register accepts buffer size <= 48. Page of size 128 should be
- * read by chunks of size 48, 48, 32. Align the size of the last chunk
- * to avoid reading after the end of the page.
- */
- size = min_t(u16, size, MLXSW_REG_MCIA_EEPROM_SIZE);
+ size = min_t(u16, size, mlxsw_env->max_eeprom_len);
if (offset < MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH &&
offset + size > MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH)
@@ -188,7 +186,7 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, u8 slot_index,
}
}
- mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0, page, offset, size,
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, page, offset, size,
i2c_addr);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcia), mcia_pl);
@@ -266,12 +264,12 @@ mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, u8 slot_index,
page = MLXSW_REG_MCIA_TH_PAGE_CMIS_NUM;
else
page = MLXSW_REG_MCIA_TH_PAGE_NUM;
- mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0, page,
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, page,
MLXSW_REG_MCIA_TH_PAGE_OFF + off,
MLXSW_REG_MCIA_TH_ITEM_SIZE,
MLXSW_REG_MCIA_I2C_ADDR_LOW);
} else {
- mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0,
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, module,
MLXSW_REG_MCIA_PAGE0_LO,
off, MLXSW_REG_MCIA_TH_ITEM_SIZE,
MLXSW_REG_MCIA_I2C_ADDR_HIGH);
@@ -489,9 +487,9 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core,
u8 size;
size = min_t(u8, page->length - bytes_read,
- MLXSW_REG_MCIA_EEPROM_SIZE);
+ mlxsw_env->max_eeprom_len);
- mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0, page->page,
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, page->page,
device_addr + bytes_read, size,
page->i2c_address);
mlxsw_reg_mcia_bank_number_set(mcia_pl, page->bank);
@@ -1359,6 +1357,26 @@ static struct mlxsw_linecards_event_ops mlxsw_env_event_ops = {
.got_inactive = mlxsw_env_got_inactive,
};
+static int mlxsw_env_max_module_eeprom_len_query(struct mlxsw_env *mlxsw_env)
+{
+ char mcam_pl[MLXSW_REG_MCAM_LEN];
+ bool mcia_128b_supported;
+ int err;
+
+ mlxsw_reg_mcam_pack(mcam_pl,
+ MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES);
+ err = mlxsw_reg_query(mlxsw_env->core, MLXSW_REG(mcam), mcam_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_MCIA_128B,
+ &mcia_128b_supported);
+
+ mlxsw_env->max_eeprom_len = mcia_128b_supported ? 128 : 48;
+
+ return 0;
+}
+
int mlxsw_env_init(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *bus_info,
struct mlxsw_env **p_env)
@@ -1427,10 +1445,15 @@ int mlxsw_env_init(struct mlxsw_core *mlxsw_core,
if (err)
goto err_type_set;
+ err = mlxsw_env_max_module_eeprom_len_query(env);
+ if (err)
+ goto err_eeprom_len_query;
+
env->line_cards[0]->active = true;
return 0;
+err_eeprom_len_query:
err_type_set:
mlxsw_env_module_event_disable(env, 0);
err_mlxsw_env_module_event_enable:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 8165bf31a99a..4b90ae44b476 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -2799,6 +2799,78 @@ static inline void mlxsw_reg_ptar_unpack(char *payload, char *tcam_region_info)
mlxsw_reg_ptar_tcam_region_info_memcpy_from(payload, tcam_region_info);
}
+/* PPRR - Policy-Engine Port Range Register
+ * ----------------------------------------
+ * This register is used for configuring port range identification.
+ */
+#define MLXSW_REG_PPRR_ID 0x3008
+#define MLXSW_REG_PPRR_LEN 0x14
+
+MLXSW_REG_DEFINE(pprr, MLXSW_REG_PPRR_ID, MLXSW_REG_PPRR_LEN);
+
+/* reg_pprr_ipv4
+ * Apply port range register to IPv4 packets.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pprr, ipv4, 0x00, 31, 1);
+
+/* reg_pprr_ipv6
+ * Apply port range register to IPv6 packets.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pprr, ipv6, 0x00, 30, 1);
+
+/* reg_pprr_src
+ * Apply port range register to source L4 ports.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pprr, src, 0x00, 29, 1);
+
+/* reg_pprr_dst
+ * Apply port range register to destination L4 ports.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pprr, dst, 0x00, 28, 1);
+
+/* reg_pprr_tcp
+ * Apply port range register to TCP packets.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pprr, tcp, 0x00, 27, 1);
+
+/* reg_pprr_udp
+ * Apply port range register to UDP packets.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pprr, udp, 0x00, 26, 1);
+
+/* reg_pprr_register_index
+ * Index of Port Range Register being accessed.
+ * Range is 0..cap_max_acl_l4_port_range-1.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pprr, register_index, 0x00, 0, 8);
+
+/* reg_prrr_port_range_min
+ * Minimum port range for comparison.
+ * Match is defined as:
+ * port_range_min <= packet_port <= port_range_max.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pprr, port_range_min, 0x04, 16, 16);
+
+/* reg_prrr_port_range_max
+ * Maximum port range for comparison.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pprr, port_range_max, 0x04, 0, 16);
+
+static inline void mlxsw_reg_pprr_pack(char *payload, u8 register_index)
+{
+ MLXSW_REG_ZERO(pprr, payload);
+ mlxsw_reg_pprr_register_index_set(payload, register_index);
+}
+
/* PPBS - Policy-Engine Policy Based Switching Register
* ----------------------------------------------------
* This register retrieves and sets Policy Based Switching Table entries.
@@ -9568,18 +9640,10 @@ static inline void mlxsw_reg_mtbr_temp_unpack(char *payload, int rec_ind,
*/
#define MLXSW_REG_MCIA_ID 0x9014
-#define MLXSW_REG_MCIA_LEN 0x40
+#define MLXSW_REG_MCIA_LEN 0x94
MLXSW_REG_DEFINE(mcia, MLXSW_REG_MCIA_ID, MLXSW_REG_MCIA_LEN);
-/* reg_mcia_l
- * Lock bit. Setting this bit will lock the access to the specific
- * cable. Used for updating a full page in a cable EPROM. Any access
- * other then subsequence writes will fail while the port is locked.
- * Access: RW
- */
-MLXSW_ITEM32(reg, mcia, l, 0x00, 31, 1);
-
/* reg_mcia_module
* Module number.
* Access: Index
@@ -9644,7 +9708,6 @@ MLXSW_ITEM32(reg, mcia, size, 0x08, 0, 16);
#define MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH 256
#define MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH 128
-#define MLXSW_REG_MCIA_EEPROM_SIZE 48
#define MLXSW_REG_MCIA_I2C_ADDR_LOW 0x50
#define MLXSW_REG_MCIA_I2C_ADDR_HIGH 0x51
#define MLXSW_REG_MCIA_PAGE0_LO_OFF 0xa0
@@ -9681,7 +9744,7 @@ enum mlxsw_reg_mcia_eeprom_module_info {
* Bytes to read/write.
* Access: RW
*/
-MLXSW_ITEM_BUF(reg, mcia, eeprom, 0x10, MLXSW_REG_MCIA_EEPROM_SIZE);
+MLXSW_ITEM_BUF(reg, mcia, eeprom, 0x10, 128);
/* This is used to access the optional upper pages (1-3) in the QSFP+
* memory map. Page 1 is available on offset 256 through 383, page 2 -
@@ -9692,14 +9755,12 @@ MLXSW_ITEM_BUF(reg, mcia, eeprom, 0x10, MLXSW_REG_MCIA_EEPROM_SIZE);
MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH + 1)
static inline void mlxsw_reg_mcia_pack(char *payload, u8 slot_index, u8 module,
- u8 lock, u8 page_number,
- u16 device_addr, u8 size,
+ u8 page_number, u16 device_addr, u8 size,
u8 i2c_device_addr)
{
MLXSW_REG_ZERO(mcia, payload);
mlxsw_reg_mcia_slot_set(payload, slot_index);
mlxsw_reg_mcia_module_set(payload, module);
- mlxsw_reg_mcia_l_set(payload, lock);
mlxsw_reg_mcia_page_number_set(payload, page_number);
mlxsw_reg_mcia_device_address_set(payload, device_addr);
mlxsw_reg_mcia_size_set(payload, size);
@@ -10509,6 +10570,79 @@ static inline void mlxsw_reg_mcda_pack(char *payload, u32 update_handle,
mlxsw_reg_mcda_data_set(payload, i, *(u32 *) &data[i * 4]);
}
+/* MCAM - Management Capabilities Mask Register
+ * --------------------------------------------
+ * Reports the device supported management features.
+ */
+#define MLXSW_REG_MCAM_ID 0x907F
+#define MLXSW_REG_MCAM_LEN 0x48
+
+MLXSW_REG_DEFINE(mcam, MLXSW_REG_MCAM_ID, MLXSW_REG_MCAM_LEN);
+
+enum mlxsw_reg_mcam_feature_group {
+ /* Enhanced features. */
+ MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES,
+};
+
+/* reg_mcam_feature_group
+ * Feature list mask index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mcam, feature_group, 0x00, 16, 8);
+
+enum mlxsw_reg_mcam_mng_feature_cap_mask_bits {
+ /* If set, MCIA supports 128 bytes payloads. Otherwise, 48 bytes. */
+ MLXSW_REG_MCAM_MCIA_128B = 34,
+};
+
+#define MLXSW_REG_BYTES_PER_DWORD 0x4
+
+/* reg_mcam_mng_feature_cap_mask
+ * Supported port's enhanced features.
+ * Based on feature_group index.
+ * When bit is set, the feature is supported in the device.
+ * Access: RO
+ */
+#define MLXSW_REG_MCAM_MNG_FEATURE_CAP_MASK_DWORD(_dw_num, _offset) \
+ MLXSW_ITEM_BIT_ARRAY(reg, mcam, mng_feature_cap_mask_dw##_dw_num, \
+ _offset, MLXSW_REG_BYTES_PER_DWORD, 1)
+
+/* The access to the bits in the field 'mng_feature_cap_mask' is not same to
+ * other mask fields in other registers. In most of the cases bit #0 is the
+ * first one in the last dword. In MCAM register, the first dword contains bits
+ * #0-#31 and so on, so the access to the bits is simpler using bit array per
+ * dword. Declare each dword of 'mng_feature_cap_mask' field separately.
+ */
+MLXSW_REG_MCAM_MNG_FEATURE_CAP_MASK_DWORD(0, 0x28);
+MLXSW_REG_MCAM_MNG_FEATURE_CAP_MASK_DWORD(1, 0x2C);
+MLXSW_REG_MCAM_MNG_FEATURE_CAP_MASK_DWORD(2, 0x30);
+MLXSW_REG_MCAM_MNG_FEATURE_CAP_MASK_DWORD(3, 0x34);
+
+static inline void
+mlxsw_reg_mcam_pack(char *payload, enum mlxsw_reg_mcam_feature_group feat_group)
+{
+ MLXSW_REG_ZERO(mcam, payload);
+ mlxsw_reg_mcam_feature_group_set(payload, feat_group);
+}
+
+static inline void
+mlxsw_reg_mcam_unpack(char *payload,
+ enum mlxsw_reg_mcam_mng_feature_cap_mask_bits bit,
+ bool *p_mng_feature_cap_val)
+{
+ int offset = bit % (MLXSW_REG_BYTES_PER_DWORD * BITS_PER_BYTE);
+ int dword = bit / (MLXSW_REG_BYTES_PER_DWORD * BITS_PER_BYTE);
+ u8 (*getters[])(const char *, u16) = {
+ mlxsw_reg_mcam_mng_feature_cap_mask_dw0_get,
+ mlxsw_reg_mcam_mng_feature_cap_mask_dw1_get,
+ mlxsw_reg_mcam_mng_feature_cap_mask_dw2_get,
+ mlxsw_reg_mcam_mng_feature_cap_mask_dw3_get,
+ };
+
+ if (!WARN_ON_ONCE(dword >= ARRAY_SIZE(getters)))
+ *p_mng_feature_cap_val = getters[dword](payload, offset);
+}
+
/* MPSC - Monitoring Packet Sampling Configuration Register
* --------------------------------------------------------
* MPSC Register is used to configure the Packet Sampling mechanism.
@@ -12819,6 +12953,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(pacl),
MLXSW_REG(pagt),
MLXSW_REG(ptar),
+ MLXSW_REG(pprr),
MLXSW_REG(ppbs),
MLXSW_REG(prcr),
MLXSW_REG(pefa),
@@ -12901,10 +13036,11 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(mcion),
MLXSW_REG(mtpps),
MLXSW_REG(mtutc),
- MLXSW_REG(mpsc),
MLXSW_REG(mcqi),
MLXSW_REG(mcc),
MLXSW_REG(mcda),
+ MLXSW_REG(mcam),
+ MLXSW_REG(mpsc),
MLXSW_REG(mgpc),
MLXSW_REG(mprs),
MLXSW_REG(mogcr),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index 19ae0d1c74a8..89dd2777ec4d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -39,6 +39,7 @@ enum mlxsw_res_id {
MLXSW_RES_ID_ACL_FLEX_KEYS,
MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE,
MLXSW_RES_ID_ACL_ACTIONS_PER_SET,
+ MLXSW_RES_ID_ACL_MAX_L4_PORT_RANGE,
MLXSW_RES_ID_ACL_MAX_ERPT_BANKS,
MLXSW_RES_ID_ACL_MAX_ERPT_BANK_SIZE,
MLXSW_RES_ID_ACL_MAX_LARGE_KEY_ID,
@@ -99,6 +100,7 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_ACL_FLEX_KEYS] = 0x2910,
[MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE] = 0x2911,
[MLXSW_RES_ID_ACL_ACTIONS_PER_SET] = 0x2912,
+ [MLXSW_RES_ID_ACL_MAX_L4_PORT_RANGE] = 0x2920,
[MLXSW_RES_ID_ACL_MAX_ERPT_BANKS] = 0x2940,
[MLXSW_RES_ID_ACL_MAX_ERPT_BANK_SIZE] = 0x2941,
[MLXSW_RES_ID_ACL_MAX_LARGE_KEY_ID] = 0x2942,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 25a01dafde1b..f0f6af3ec7c5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1132,8 +1132,8 @@ static int mlxsw_sp_port_add_vid(struct net_device *dev,
return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid));
}
-static int mlxsw_sp_port_kill_vid(struct net_device *dev,
- __be16 __always_unused proto, u16 vid)
+int mlxsw_sp_port_kill_vid(struct net_device *dev,
+ __be16 __always_unused proto, u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
@@ -3188,6 +3188,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_nve_init;
}
+ err = mlxsw_sp_port_range_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize port ranges\n");
+ goto err_port_range_init;
+ }
+
err = mlxsw_sp_acl_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
@@ -3280,6 +3286,8 @@ err_ptp_clock_init:
err_router_init:
mlxsw_sp_acl_fini(mlxsw_sp);
err_acl_init:
+ mlxsw_sp_port_range_fini(mlxsw_sp);
+err_port_range_init:
mlxsw_sp_nve_fini(mlxsw_sp);
err_nve_init:
mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
@@ -3462,6 +3470,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
}
mlxsw_sp_router_fini(mlxsw_sp);
mlxsw_sp_acl_fini(mlxsw_sp);
+ mlxsw_sp_port_range_fini(mlxsw_sp);
mlxsw_sp_nve_fini(mlxsw_sp);
mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
mlxsw_sp_afa_fini(mlxsw_sp);
@@ -3730,6 +3739,26 @@ static int mlxsw_sp_resources_rifs_register(struct mlxsw_core *mlxsw_core)
&size_params);
}
+static int
+mlxsw_sp_resources_port_range_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ struct devlink_resource_size_params size_params;
+ u64 max;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, ACL_MAX_L4_PORT_RANGE))
+ return -EIO;
+
+ max = MLXSW_CORE_RES_GET(mlxsw_core, ACL_MAX_L4_PORT_RANGE);
+ devlink_resource_size_params_init(&size_params, max, max, 1,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ return devl_resource_register(devlink, "port_range_registers", max,
+ MLXSW_SP_RESOURCE_PORT_RANGE_REGISTERS,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
+}
+
static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
{
int err;
@@ -3758,8 +3787,13 @@ static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
if (err)
goto err_resources_rifs_register;
+ err = mlxsw_sp_resources_port_range_register(mlxsw_core);
+ if (err)
+ goto err_resources_port_range_register;
+
return 0;
+err_resources_port_range_register:
err_resources_rifs_register:
err_resources_rif_mac_profile_register:
err_policer_resources_register:
@@ -3797,8 +3831,13 @@ static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
if (err)
goto err_resources_rifs_register;
+ err = mlxsw_sp_resources_port_range_register(mlxsw_core);
+ if (err)
+ goto err_resources_port_range_register;
+
return 0;
+err_resources_port_range_register:
err_resources_rifs_register:
err_resources_rif_mac_profile_register:
err_policer_resources_register:
@@ -4298,6 +4337,88 @@ static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
return -EBUSY;
}
+static int mlxsw_sp_lag_uppers_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *lag_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *upper_dev;
+ struct net_device *master;
+ struct list_head *iter;
+ int done = 0;
+ int err;
+
+ master = netdev_master_upper_dev_get(lag_dev);
+ if (master && netif_is_bridge_master(master)) {
+ err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, lag_dev, master,
+ extack);
+ if (err)
+ return err;
+ }
+
+ netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
+ if (!is_vlan_dev(upper_dev))
+ continue;
+
+ master = netdev_master_upper_dev_get(upper_dev);
+ if (master && netif_is_bridge_master(master)) {
+ err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
+ upper_dev, master,
+ extack);
+ if (err)
+ goto err_port_bridge_join;
+ }
+
+ ++done;
+ }
+
+ return 0;
+
+err_port_bridge_join:
+ netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
+ if (!is_vlan_dev(upper_dev))
+ continue;
+
+ master = netdev_master_upper_dev_get(upper_dev);
+ if (!master || !netif_is_bridge_master(master))
+ continue;
+
+ if (!done--)
+ break;
+
+ mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, master);
+ }
+
+ master = netdev_master_upper_dev_get(lag_dev);
+ if (master && netif_is_bridge_master(master))
+ mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, master);
+
+ return err;
+}
+
+static void
+mlxsw_sp_lag_uppers_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *lag_dev)
+{
+ struct net_device *upper_dev;
+ struct net_device *master;
+ struct list_head *iter;
+
+ netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
+ if (!is_vlan_dev(upper_dev))
+ continue;
+
+ master = netdev_master_upper_dev_get(upper_dev);
+ if (!master)
+ continue;
+
+ mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, master);
+ }
+
+ master = netdev_master_upper_dev_get(lag_dev);
+ if (master)
+ mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, master);
+}
+
static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *lag_dev,
struct netlink_ext_ack *extack)
@@ -4322,6 +4443,12 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
if (err)
return err;
+
+ err = mlxsw_sp_lag_uppers_bridge_join(mlxsw_sp_port, lag_dev,
+ extack);
+ if (err)
+ goto err_lag_uppers_bridge_join;
+
err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
if (err)
goto err_col_port_add;
@@ -4342,8 +4469,14 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
if (err)
goto err_router_join;
+ err = mlxsw_sp_netdevice_enslavement_replay(mlxsw_sp, lag_dev, extack);
+ if (err)
+ goto err_replay;
+
return 0;
+err_replay:
+ mlxsw_sp_router_port_leave_lag(mlxsw_sp_port, lag_dev);
err_router_join:
lag->ref_count--;
mlxsw_sp_port->lagged = 0;
@@ -4351,6 +4484,8 @@ err_router_join:
mlxsw_sp_port->local_port);
mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
err_col_port_add:
+ mlxsw_sp_lag_uppers_bridge_leave(mlxsw_sp_port, lag_dev);
+err_lag_uppers_bridge_join:
if (!lag->ref_count)
mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
return err;
@@ -4600,9 +4735,62 @@ static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev,
return true;
}
+static bool mlxsw_sp_netdev_is_master(struct net_device *upper_dev,
+ struct net_device *dev)
+{
+ return upper_dev == netdev_master_upper_dev_get(dev);
+}
+
+static int __mlxsw_sp_netdevice_event(struct mlxsw_sp *mlxsw_sp,
+ unsigned long event, void *ptr,
+ bool process_foreign);
+
+static int mlxsw_sp_netdevice_validate_uppers(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *dev,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *upper_dev;
+ struct list_head *iter;
+ int err;
+
+ netdev_for_each_upper_dev_rcu(dev, upper_dev, iter) {
+ struct netdev_notifier_changeupper_info info = {
+ .info = {
+ .dev = dev,
+ .extack = extack,
+ },
+ .master = mlxsw_sp_netdev_is_master(upper_dev, dev),
+ .upper_dev = upper_dev,
+ .linking = true,
+
+ /* upper_info is relevant for LAG devices. But we would
+ * only need this if LAG were a valid upper above
+ * another upper (e.g. a bridge that is a member of a
+ * LAG), and that is never a valid configuration. So we
+ * can keep this as NULL.
+ */
+ .upper_info = NULL,
+ };
+
+ err = __mlxsw_sp_netdevice_event(mlxsw_sp,
+ NETDEV_PRECHANGEUPPER,
+ &info, true);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp, upper_dev,
+ extack);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
struct net_device *dev,
- unsigned long event, void *ptr)
+ unsigned long event, void *ptr,
+ bool replay_deslavement)
{
struct netdev_notifier_changeupper_info *info;
struct mlxsw_sp_port *mlxsw_sp_port;
@@ -4640,8 +4828,11 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
(!netif_is_bridge_master(upper_dev) ||
!mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
upper_dev))) {
- NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
- return -EINVAL;
+ err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp,
+ upper_dev,
+ extack);
+ if (err)
+ return err;
}
if (netif_is_lag_master(upper_dev) &&
!mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
@@ -4656,11 +4847,6 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
return -EINVAL;
}
- if (netif_is_macvlan(upper_dev) &&
- !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) {
- NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
- return -EOPNOTSUPP;
- }
if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
return -EINVAL;
@@ -4707,15 +4893,20 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
if (netif_is_bridge_master(upper_dev)) {
- if (info->linking)
+ if (info->linking) {
err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
lower_dev,
upper_dev,
extack);
- else
+ } else {
mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
lower_dev,
upper_dev);
+ if (!replay_deslavement)
+ break;
+ mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp,
+ lower_dev);
+ }
} else if (netif_is_lag_master(upper_dev)) {
if (info->linking) {
err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
@@ -4724,6 +4915,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
mlxsw_sp_port_lag_leave(mlxsw_sp_port,
upper_dev);
+ mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp,
+ dev);
}
} else if (netif_is_ovs_master(upper_dev)) {
if (info->linking)
@@ -4776,13 +4969,15 @@ static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
struct net_device *port_dev,
- unsigned long event, void *ptr)
+ unsigned long event, void *ptr,
+ bool replay_deslavement)
{
switch (event) {
case NETDEV_PRECHANGEUPPER:
case NETDEV_CHANGEUPPER:
return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
- event, ptr);
+ event, ptr,
+ replay_deslavement);
case NETDEV_CHANGELOWERSTATE:
return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
ptr);
@@ -4791,6 +4986,30 @@ static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
return 0;
}
+/* Called for LAG or its upper VLAN after the per-LAG-lower processing was done,
+ * to do any per-LAG / per-LAG-upper processing.
+ */
+static int mlxsw_sp_netdevice_post_lag_event(struct net_device *dev,
+ unsigned long event,
+ void *ptr)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(dev);
+ struct netdev_notifier_changeupper_info *info = ptr;
+
+ if (!mlxsw_sp)
+ return 0;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ if (info->linking)
+ break;
+ if (netif_is_bridge_master(info->upper_dev))
+ mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp, dev);
+ break;
+ }
+ return 0;
+}
+
static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
unsigned long event, void *ptr)
{
@@ -4801,19 +5020,19 @@ static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
netdev_for_each_lower_dev(lag_dev, dev, iter) {
if (mlxsw_sp_port_dev_check(dev)) {
ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
- ptr);
+ ptr, false);
if (ret)
return ret;
}
}
- return 0;
+ return mlxsw_sp_netdevice_post_lag_event(lag_dev, event, ptr);
}
static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
struct net_device *dev,
unsigned long event, void *ptr,
- u16 vid)
+ u16 vid, bool replay_deslavement)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
@@ -4844,27 +5063,30 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
(!netif_is_bridge_master(upper_dev) ||
!mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
upper_dev))) {
- NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
- return -EINVAL;
- }
- if (netif_is_macvlan(upper_dev) &&
- !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
- NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
- return -EOPNOTSUPP;
+ err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp,
+ upper_dev,
+ extack);
+ if (err)
+ return err;
}
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
if (netif_is_bridge_master(upper_dev)) {
- if (info->linking)
+ if (info->linking) {
err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
vlan_dev,
upper_dev,
extack);
- else
+ } else {
mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
vlan_dev,
upper_dev);
+ if (!replay_deslavement)
+ break;
+ mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp,
+ vlan_dev);
+ }
} else if (netif_is_macvlan(upper_dev)) {
if (!info->linking)
mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
@@ -4888,26 +5110,26 @@ static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
if (mlxsw_sp_port_dev_check(dev)) {
ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
event, ptr,
- vid);
+ vid, false);
if (ret)
return ret;
}
}
- return 0;
+ return mlxsw_sp_netdevice_post_lag_event(vlan_dev, event, ptr);
}
-static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
+static int mlxsw_sp_netdevice_bridge_vlan_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *vlan_dev,
struct net_device *br_dev,
unsigned long event, void *ptr,
- u16 vid)
+ u16 vid, bool process_foreign)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
struct netdev_notifier_changeupper_info *info = ptr;
struct netlink_ext_ack *extack;
struct net_device *upper_dev;
- if (!mlxsw_sp)
+ if (!process_foreign && !mlxsw_sp_lower_get(vlan_dev))
return 0;
extack = netdev_notifier_info_to_extack(&info->info);
@@ -4920,13 +5142,6 @@ static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
return -EOPNOTSUPP;
}
- if (!info->linking)
- break;
- if (netif_is_macvlan(upper_dev) &&
- !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
- NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
- return -EOPNOTSUPP;
- }
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
@@ -4940,36 +5155,42 @@ static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
return 0;
}
-static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
- unsigned long event, void *ptr)
+static int mlxsw_sp_netdevice_vlan_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *vlan_dev,
+ unsigned long event, void *ptr,
+ bool process_foreign)
{
struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
u16 vid = vlan_dev_vlan_id(vlan_dev);
if (mlxsw_sp_port_dev_check(real_dev))
return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
- event, ptr, vid);
+ event, ptr, vid,
+ true);
else if (netif_is_lag_master(real_dev))
return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
real_dev, event,
ptr, vid);
else if (netif_is_bridge_master(real_dev))
- return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev,
- event, ptr, vid);
+ return mlxsw_sp_netdevice_bridge_vlan_event(mlxsw_sp, vlan_dev,
+ real_dev, event,
+ ptr, vid,
+ process_foreign);
return 0;
}
-static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
- unsigned long event, void *ptr)
+static int mlxsw_sp_netdevice_bridge_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *br_dev,
+ unsigned long event, void *ptr,
+ bool process_foreign)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev);
struct netdev_notifier_changeupper_info *info = ptr;
struct netlink_ext_ack *extack;
struct net_device *upper_dev;
u16 proto;
- if (!mlxsw_sp)
+ if (!process_foreign && !mlxsw_sp_lower_get(br_dev))
return 0;
extack = netdev_notifier_info_to_extack(&info->info);
@@ -4997,11 +5218,6 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
return -EOPNOTSUPP;
}
- if (netif_is_macvlan(upper_dev) &&
- !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) {
- NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
- return -EOPNOTSUPP;
- }
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
@@ -5107,35 +5323,48 @@ static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
- unsigned long event, void *ptr)
+static int __mlxsw_sp_netdevice_event(struct mlxsw_sp *mlxsw_sp,
+ unsigned long event, void *ptr,
+ bool process_foreign)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct mlxsw_sp_span_entry *span_entry;
- struct mlxsw_sp *mlxsw_sp;
int err = 0;
- mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
if (event == NETDEV_UNREGISTER) {
span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev);
if (span_entry)
mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry);
}
- mlxsw_sp_span_respin(mlxsw_sp);
if (netif_is_vxlan(dev))
err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr);
else if (mlxsw_sp_port_dev_check(dev))
- err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
+ err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr, true);
else if (netif_is_lag_master(dev))
err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
else if (is_vlan_dev(dev))
- err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
+ err = mlxsw_sp_netdevice_vlan_event(mlxsw_sp, dev, event, ptr,
+ process_foreign);
else if (netif_is_bridge_master(dev))
- err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
+ err = mlxsw_sp_netdevice_bridge_event(mlxsw_sp, dev, event, ptr,
+ process_foreign);
else if (netif_is_macvlan(dev))
err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr);
+ return err;
+}
+
+static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct mlxsw_sp *mlxsw_sp;
+ int err;
+
+ mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
+ mlxsw_sp_span_respin(mlxsw_sp);
+ err = __mlxsw_sp_netdevice_event(mlxsw_sp, event, ptr, false);
+
return notifier_from_errno(err);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 231e364cbb7c..65eaa181e0aa 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -69,6 +69,7 @@ enum mlxsw_sp_resource_id {
MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS,
MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
MLXSW_SP_RESOURCE_RIFS,
+ MLXSW_SP_RESOURCE_PORT_RANGE_REGISTERS,
};
struct mlxsw_sp_port;
@@ -175,6 +176,7 @@ struct mlxsw_sp {
struct mlxsw_sp_acl *acl;
struct mlxsw_sp_fid_core *fid_core;
struct mlxsw_sp_policer_core *policer_core;
+ struct mlxsw_sp_port_range_core *pr_core;
struct mlxsw_sp_kvdl *kvdl;
struct mlxsw_sp_nve *nve;
struct notifier_block netdevice_nb;
@@ -698,6 +700,8 @@ int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
struct mlxsw_sp_port_vlan *
mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
+int mlxsw_sp_port_kill_vid(struct net_device *dev,
+ __be16 __always_unused proto, u16 vid);
int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
u16 vid_end, bool is_member, bool untagged);
int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
@@ -865,9 +869,13 @@ struct mlxsw_sp_acl_rule_info {
egress_bind_blocker:1,
counter_valid:1,
policer_index_valid:1,
- ipv6_valid:1;
+ ipv6_valid:1,
+ src_port_range_reg_valid:1,
+ dst_port_range_reg_valid:1;
unsigned int counter_index;
u16 policer_index;
+ u8 src_port_range_reg_index;
+ u8 dst_port_range_reg_index;
struct {
u32 prev_val;
enum mlxsw_sp_acl_mangle_field prev_field;
@@ -992,7 +1000,8 @@ void mlxsw_sp_acl_ruleset_prio_get(struct mlxsw_sp_acl_ruleset *ruleset,
struct mlxsw_sp_acl_rule_info *
mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl,
struct mlxsw_afa_block *afa_block);
-void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei);
+void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei);
void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
unsigned int priority);
@@ -1484,4 +1493,18 @@ int mlxsw_sp_pgt_entry_port_set(struct mlxsw_sp *mlxsw_sp, u16 mid,
int mlxsw_sp_pgt_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_pgt_fini(struct mlxsw_sp *mlxsw_sp);
+/* spectrum_port_range.c */
+struct mlxsw_sp_port_range {
+ u16 min;
+ u16 max;
+ u8 source:1; /* Source or destination */
+};
+
+int mlxsw_sp_port_range_reg_get(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_port_range *range,
+ struct netlink_ext_ack *extack,
+ u8 *p_prr_index);
+void mlxsw_sp_port_range_reg_put(struct mlxsw_sp *mlxsw_sp, u8 prr_index);
+int mlxsw_sp_port_range_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_port_range_fini(struct mlxsw_sp *mlxsw_sp);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c
index 3a636f753607..dfcdd37e797b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c
@@ -90,7 +90,7 @@ mlxsw_sp1_acl_ctcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp,
err_entry_add:
err_rulei_commit:
err_rulei_act_continue:
- mlxsw_sp_acl_rulei_destroy(rulei);
+ mlxsw_sp_acl_rulei_destroy(mlxsw_sp, rulei);
err_rulei_create:
mlxsw_sp_acl_ctcam_chunk_fini(&region->catchall.cchunk);
return err;
@@ -105,7 +105,7 @@ mlxsw_sp1_acl_ctcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, &region->cregion,
&region->catchall.cchunk,
&region->catchall.centry);
- mlxsw_sp_acl_rulei_destroy(rulei);
+ mlxsw_sp_acl_rulei_destroy(mlxsw_sp, rulei);
mlxsw_sp_acl_ctcam_chunk_fini(&region->catchall.cchunk);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 0423ac262d89..186161a3459d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -339,10 +339,17 @@ err_afa_block_create:
return ERR_PTR(err);
}
-void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei)
+void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei)
{
if (rulei->action_created)
mlxsw_afa_block_destroy(rulei->act_block);
+ if (rulei->src_port_range_reg_valid)
+ mlxsw_sp_port_range_reg_put(mlxsw_sp,
+ rulei->src_port_range_reg_index);
+ if (rulei->dst_port_range_reg_valid)
+ mlxsw_sp_port_range_reg_put(mlxsw_sp,
+ rulei->dst_port_range_reg_index);
kfree(rulei);
}
@@ -834,7 +841,7 @@ void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
- mlxsw_sp_acl_rulei_destroy(rule->rulei);
+ mlxsw_sp_acl_rulei_destroy(mlxsw_sp, rule->rulei);
kfree(rule);
mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
index 4dea39f2b304..b7f58605b6c7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
@@ -31,12 +31,14 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = {
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_sip[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(L4_PORT_RANGE, 0x04, 16, 16),
MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = {
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(L4_PORT_RANGE, 0x04, 16, 16),
MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
};
@@ -205,6 +207,7 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_0[] = {
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_2[] = {
MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x04, 16, 9), /* TCP_CONTROL + TCP_ECN */
+ MLXSW_AFK_ELEMENT_INST_U32(L4_PORT_RANGE, 0x04, 0, 16),
};
static const struct mlxsw_afk_block mlxsw_sp2_afk_blocks[] = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 72917f09e806..8329100479b3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -418,6 +418,68 @@ static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
return 0;
}
+static int
+mlxsw_sp_flower_parse_ports_range(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct flow_cls_offload *f, u8 ip_proto)
+{
+ const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct flow_match_ports_range match;
+ u32 key_mask_value = 0;
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS_RANGE))
+ return 0;
+
+ if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "Only UDP and TCP keys are supported");
+ return -EINVAL;
+ }
+
+ flow_rule_match_ports_range(rule, &match);
+
+ if (match.mask->tp_min.src) {
+ struct mlxsw_sp_port_range range = {
+ .min = ntohs(match.key->tp_min.src),
+ .max = ntohs(match.key->tp_max.src),
+ .source = true,
+ };
+ u8 prr_index;
+ int err;
+
+ err = mlxsw_sp_port_range_reg_get(mlxsw_sp, &range,
+ f->common.extack, &prr_index);
+ if (err)
+ return err;
+
+ rulei->src_port_range_reg_index = prr_index;
+ rulei->src_port_range_reg_valid = true;
+ key_mask_value |= BIT(prr_index);
+ }
+
+ if (match.mask->tp_min.dst) {
+ struct mlxsw_sp_port_range range = {
+ .min = ntohs(match.key->tp_min.dst),
+ .max = ntohs(match.key->tp_max.dst),
+ };
+ u8 prr_index;
+ int err;
+
+ err = mlxsw_sp_port_range_reg_get(mlxsw_sp, &range,
+ f->common.extack, &prr_index);
+ if (err)
+ return err;
+
+ rulei->dst_port_range_reg_index = prr_index;
+ rulei->dst_port_range_reg_valid = true;
+ key_mask_value |= BIT(prr_index);
+ }
+
+ mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_L4_PORT_RANGE,
+ key_mask_value, key_mask_value);
+
+ return 0;
+}
+
static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
struct flow_cls_offload *f,
@@ -503,6 +565,7 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS_RANGE) |
BIT(FLOW_DISSECTOR_KEY_TCP) |
BIT(FLOW_DISSECTOR_KEY_IP) |
BIT(FLOW_DISSECTOR_KEY_VLAN))) {
@@ -604,6 +667,11 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto);
if (err)
return err;
+
+ err = mlxsw_sp_flower_parse_ports_range(mlxsw_sp, rulei, f, ip_proto);
+ if (err)
+ return err;
+
err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_port_range.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_port_range.c
new file mode 100644
index 000000000000..2d193de12be6
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_port_range.c
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <linux/bits.h>
+#include <linux/netlink.h>
+#include <linux/refcount.h>
+#include <linux/xarray.h>
+#include <net/devlink.h>
+
+#include "spectrum.h"
+
+struct mlxsw_sp_port_range_reg {
+ struct mlxsw_sp_port_range range;
+ refcount_t refcount;
+ u32 index;
+};
+
+struct mlxsw_sp_port_range_core {
+ struct xarray prr_xa;
+ struct xa_limit prr_ids;
+ atomic_t prr_count;
+};
+
+static int
+mlxsw_sp_port_range_reg_configure(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_port_range_reg *prr)
+{
+ char pprr_pl[MLXSW_REG_PPRR_LEN];
+
+ /* We do not care if packet is IPv4/IPv6 and TCP/UDP, so set all four
+ * fields.
+ */
+ mlxsw_reg_pprr_pack(pprr_pl, prr->index);
+ mlxsw_reg_pprr_ipv4_set(pprr_pl, true);
+ mlxsw_reg_pprr_ipv6_set(pprr_pl, true);
+ mlxsw_reg_pprr_src_set(pprr_pl, prr->range.source);
+ mlxsw_reg_pprr_dst_set(pprr_pl, !prr->range.source);
+ mlxsw_reg_pprr_tcp_set(pprr_pl, true);
+ mlxsw_reg_pprr_udp_set(pprr_pl, true);
+ mlxsw_reg_pprr_port_range_min_set(pprr_pl, prr->range.min);
+ mlxsw_reg_pprr_port_range_max_set(pprr_pl, prr->range.max);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pprr), pprr_pl);
+}
+
+static struct mlxsw_sp_port_range_reg *
+mlxsw_sp_port_range_reg_create(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_port_range *range,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_port_range_core *pr_core = mlxsw_sp->pr_core;
+ struct mlxsw_sp_port_range_reg *prr;
+ int err;
+
+ prr = kzalloc(sizeof(*prr), GFP_KERNEL);
+ if (!prr)
+ return ERR_PTR(-ENOMEM);
+
+ prr->range = *range;
+ refcount_set(&prr->refcount, 1);
+
+ err = xa_alloc(&pr_core->prr_xa, &prr->index, prr, pr_core->prr_ids,
+ GFP_KERNEL);
+ if (err) {
+ if (err == -EBUSY)
+ NL_SET_ERR_MSG_MOD(extack, "Exceeded number of port range registers");
+ goto err_xa_alloc;
+ }
+
+ err = mlxsw_sp_port_range_reg_configure(mlxsw_sp, prr);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to configure port range register");
+ goto err_reg_configure;
+ }
+
+ atomic_inc(&pr_core->prr_count);
+
+ return prr;
+
+err_reg_configure:
+ xa_erase(&pr_core->prr_xa, prr->index);
+err_xa_alloc:
+ kfree(prr);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_port_range_reg_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_port_range_reg *prr)
+{
+ struct mlxsw_sp_port_range_core *pr_core = mlxsw_sp->pr_core;
+
+ atomic_dec(&pr_core->prr_count);
+ xa_erase(&pr_core->prr_xa, prr->index);
+ kfree(prr);
+}
+
+static struct mlxsw_sp_port_range_reg *
+mlxsw_sp_port_range_reg_find(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_port_range *range)
+{
+ struct mlxsw_sp_port_range_core *pr_core = mlxsw_sp->pr_core;
+ struct mlxsw_sp_port_range_reg *prr;
+ unsigned long index;
+
+ xa_for_each(&pr_core->prr_xa, index, prr) {
+ if (prr->range.min == range->min &&
+ prr->range.max == range->max &&
+ prr->range.source == range->source)
+ return prr;
+ }
+
+ return NULL;
+}
+
+int mlxsw_sp_port_range_reg_get(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_port_range *range,
+ struct netlink_ext_ack *extack,
+ u8 *p_prr_index)
+{
+ struct mlxsw_sp_port_range_reg *prr;
+
+ prr = mlxsw_sp_port_range_reg_find(mlxsw_sp, range);
+ if (prr) {
+ refcount_inc(&prr->refcount);
+ *p_prr_index = prr->index;
+ return 0;
+ }
+
+ prr = mlxsw_sp_port_range_reg_create(mlxsw_sp, range, extack);
+ if (IS_ERR(prr))
+ return PTR_ERR(prr);
+
+ *p_prr_index = prr->index;
+
+ return 0;
+}
+
+void mlxsw_sp_port_range_reg_put(struct mlxsw_sp *mlxsw_sp, u8 prr_index)
+{
+ struct mlxsw_sp_port_range_core *pr_core = mlxsw_sp->pr_core;
+ struct mlxsw_sp_port_range_reg *prr;
+
+ prr = xa_load(&pr_core->prr_xa, prr_index);
+ if (WARN_ON(!prr))
+ return;
+
+ if (!refcount_dec_and_test(&prr->refcount))
+ return;
+
+ mlxsw_sp_port_range_reg_destroy(mlxsw_sp, prr);
+}
+
+static u64 mlxsw_sp_port_range_reg_occ_get(void *priv)
+{
+ struct mlxsw_sp_port_range_core *pr_core = priv;
+
+ return atomic_read(&pr_core->prr_count);
+}
+
+int mlxsw_sp_port_range_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_port_range_core *pr_core;
+ struct mlxsw_core *core = mlxsw_sp->core;
+ u64 max;
+
+ if (!MLXSW_CORE_RES_VALID(core, ACL_MAX_L4_PORT_RANGE))
+ return -EIO;
+ max = MLXSW_CORE_RES_GET(core, ACL_MAX_L4_PORT_RANGE);
+
+ /* Each port range register is represented using a single bit in the
+ * two bytes "l4_port_range" ACL key element.
+ */
+ WARN_ON(max > BITS_PER_BYTE * sizeof(u16));
+
+ pr_core = kzalloc(sizeof(*mlxsw_sp->pr_core), GFP_KERNEL);
+ if (!pr_core)
+ return -ENOMEM;
+ mlxsw_sp->pr_core = pr_core;
+
+ pr_core->prr_ids.max = max - 1;
+ xa_init_flags(&pr_core->prr_xa, XA_FLAGS_ALLOC);
+
+ devl_resource_occ_get_register(priv_to_devlink(core),
+ MLXSW_SP_RESOURCE_PORT_RANGE_REGISTERS,
+ mlxsw_sp_port_range_reg_occ_get,
+ pr_core);
+
+ return 0;
+}
+
+void mlxsw_sp_port_range_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_port_range_core *pr_core = mlxsw_sp->pr_core;
+
+ devl_resource_occ_get_unregister(priv_to_devlink(mlxsw_sp->core),
+ MLXSW_SP_RESOURCE_PORT_RANGE_REGISTERS);
+ WARN_ON(!xa_empty(&pr_core->prr_xa));
+ xa_destroy(&pr_core->prr_xa);
+ kfree(pr_core);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index b32adf277a22..57f0faac836c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -139,6 +139,7 @@ struct mlxsw_sp_rif_ops {
struct netlink_ext_ack *extack);
void (*deconfigure)(struct mlxsw_sp_rif *rif);
struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
+ const struct mlxsw_sp_rif_params *params,
struct netlink_ext_ack *extack);
void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
};
@@ -2871,6 +2872,21 @@ static bool mlxsw_sp_dev_lower_is_port(struct net_device *dev)
return !!mlxsw_sp_port;
}
+static int mlxsw_sp_router_schedule_neigh_work(struct mlxsw_sp_router *router,
+ struct neighbour *n)
+{
+ struct net *net;
+
+ net = neigh_parms_net(n->parms);
+
+ /* Take a reference to ensure the neighbour won't be destructed until we
+ * drop the reference in delayed work.
+ */
+ neigh_clone(n);
+ return mlxsw_sp_router_schedule_work(net, router, n,
+ mlxsw_sp_router_neigh_event_work);
+}
+
static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
@@ -2878,7 +2894,6 @@ static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
unsigned long interval;
struct neigh_parms *p;
struct neighbour *n;
- struct net *net;
router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
@@ -2902,7 +2917,6 @@ static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
break;
case NETEVENT_NEIGH_UPDATE:
n = ptr;
- net = neigh_parms_net(n->parms);
if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
return NOTIFY_DONE;
@@ -2910,13 +2924,7 @@ static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
if (!mlxsw_sp_dev_lower_is_port(n->dev))
return NOTIFY_DONE;
- /* Take a reference to ensure the neighbour won't be
- * destructed until we drop the reference in delayed
- * work.
- */
- neigh_clone(n);
- return mlxsw_sp_router_schedule_work(net, router, n,
- mlxsw_sp_router_neigh_event_work);
+ return mlxsw_sp_router_schedule_neigh_work(router, n);
case NETEVENT_IPV4_MPATH_HASH_UPDATE:
case NETEVENT_IPV6_MPATH_HASH_UPDATE:
@@ -2975,6 +2983,52 @@ static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
}
}
+struct mlxsw_sp_neigh_rif_made_sync {
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_rif *rif;
+ int err;
+};
+
+static void mlxsw_sp_neigh_rif_made_sync_each(struct neighbour *n, void *data)
+{
+ struct mlxsw_sp_neigh_rif_made_sync *rms = data;
+ int rc;
+
+ if (rms->err)
+ return;
+ if (n->dev != mlxsw_sp_rif_dev(rms->rif))
+ return;
+ rc = mlxsw_sp_router_schedule_neigh_work(rms->mlxsw_sp->router, n);
+ if (rc != NOTIFY_DONE)
+ rms->err = -ENOMEM;
+}
+
+static int mlxsw_sp_neigh_rif_made_sync(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp_neigh_rif_made_sync rms = {
+ .mlxsw_sp = mlxsw_sp,
+ .rif = rif,
+ };
+
+ neigh_for_each(&arp_tbl, mlxsw_sp_neigh_rif_made_sync_each, &rms);
+ if (rms.err)
+ goto err_arp;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ neigh_for_each(&nd_tbl, mlxsw_sp_neigh_rif_made_sync_each, &rms);
+#endif
+ if (rms.err)
+ goto err_nd;
+
+ return 0;
+
+err_nd:
+err_arp:
+ mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
+ return rms.err;
+}
+
enum mlxsw_sp_nexthop_type {
MLXSW_SP_NEXTHOP_TYPE_ETH,
MLXSW_SP_NEXTHOP_TYPE_IPIP,
@@ -4396,6 +4450,19 @@ err_neigh_init:
return err;
}
+static int mlxsw_sp_nexthop_type_rif_made(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ switch (nh->type) {
+ case MLXSW_SP_NEXTHOP_TYPE_ETH:
+ return mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
+ case MLXSW_SP_NEXTHOP_TYPE_IPIP:
+ break;
+ }
+
+ return 0;
+}
+
static void mlxsw_sp_nexthop_type_rif_gone(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh)
{
@@ -4524,6 +4591,35 @@ static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
}
}
+static int mlxsw_sp_nexthop_rif_made_sync(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp_nexthop *nh, *tmp;
+ unsigned int n = 0;
+ int err;
+
+ list_for_each_entry_safe(nh, tmp, &rif->crif->nexthop_list,
+ crif_list_node) {
+ err = mlxsw_sp_nexthop_type_rif_made(mlxsw_sp, nh);
+ if (err)
+ goto err_nexthop_type_rif;
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
+ n++;
+ }
+
+ return 0;
+
+err_nexthop_type_rif:
+ list_for_each_entry_safe(nh, tmp, &rif->crif->nexthop_list,
+ crif_list_node) {
+ if (!n--)
+ break;
+ mlxsw_sp_nexthop_type_rif_gone(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
+ }
+ return err;
+}
+
static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif)
{
@@ -7884,6 +7980,26 @@ static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
+static int mlxsw_sp_router_rif_made_sync(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif)
+{
+ int err;
+
+ err = mlxsw_sp_neigh_rif_made_sync(mlxsw_sp, rif);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_nexthop_rif_made_sync(mlxsw_sp, rif);
+ if (err)
+ goto err_nexthop;
+
+ return 0;
+
+err_nexthop:
+ mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
+ return err;
+}
+
static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif)
{
@@ -8300,7 +8416,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
rif->rif_entries = rif_entries;
if (ops->fid_get) {
- fid = ops->fid_get(rif, extack);
+ fid = ops->fid_get(rif, params, extack);
if (IS_ERR(fid)) {
err = PTR_ERR(fid);
goto err_fid_get;
@@ -8321,6 +8437,10 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
goto err_mr_rif_add;
}
+ err = mlxsw_sp_router_rif_made_sync(mlxsw_sp, rif);
+ if (err)
+ goto err_rif_made_sync;
+
if (netdev_offload_xstats_enabled(params->dev,
NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
err = mlxsw_sp_router_port_l3_stats_enable(rif);
@@ -8335,6 +8455,8 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
return rif;
err_stats_enable:
+ mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
+err_rif_made_sync:
err_mr_rif_add:
for (i--; i >= 0; i--)
mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
@@ -8410,6 +8532,110 @@ out:
mutex_unlock(&mlxsw_sp->router->lock);
}
+static void mlxsw_sp_rif_destroy_vlan_upper(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *br_dev,
+ u16 vid)
+{
+ struct net_device *upper_dev;
+ struct mlxsw_sp_crif *crif;
+
+ rcu_read_lock();
+ upper_dev = __vlan_find_dev_deep_rcu(br_dev, htons(ETH_P_8021Q), vid);
+ rcu_read_unlock();
+
+ if (!upper_dev)
+ return;
+
+ crif = mlxsw_sp_crif_lookup(mlxsw_sp->router, upper_dev);
+ if (!crif || !crif->rif)
+ return;
+
+ mlxsw_sp_rif_destroy(crif->rif);
+}
+
+static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *l3_dev,
+ int lower_pvid,
+ unsigned long event,
+ struct netlink_ext_ack *extack);
+
+int mlxsw_sp_router_bridge_vlan_add(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *br_dev,
+ u16 new_vid, bool is_pvid,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_rif *old_rif;
+ struct mlxsw_sp_rif *new_rif;
+ struct net_device *upper_dev;
+ u16 old_pvid = 0;
+ u16 new_pvid;
+ int err = 0;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+ old_rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev);
+ if (old_rif) {
+ /* If the RIF on the bridge is not a VLAN RIF, we shouldn't have
+ * gotten a PVID notification.
+ */
+ if (WARN_ON(old_rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN))
+ old_rif = NULL;
+ else
+ old_pvid = mlxsw_sp_fid_8021q_vid(old_rif->fid);
+ }
+
+ if (is_pvid)
+ new_pvid = new_vid;
+ else if (old_pvid == new_vid)
+ new_pvid = 0;
+ else
+ goto out;
+
+ if (old_pvid == new_pvid)
+ goto out;
+
+ if (new_pvid) {
+ struct mlxsw_sp_rif_params params = {
+ .dev = br_dev,
+ .vid = new_pvid,
+ };
+
+ /* If there is a VLAN upper with the same VID as the new PVID,
+ * kill its RIF, if there is one.
+ */
+ mlxsw_sp_rif_destroy_vlan_upper(mlxsw_sp, br_dev, new_pvid);
+
+ if (mlxsw_sp_dev_addr_list_empty(br_dev))
+ goto out;
+ new_rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
+ if (IS_ERR(new_rif)) {
+ err = PTR_ERR(new_rif);
+ goto out;
+ }
+
+ if (old_pvid)
+ mlxsw_sp_rif_migrate_destroy(mlxsw_sp, old_rif, new_rif,
+ true);
+ } else {
+ mlxsw_sp_rif_destroy(old_rif);
+ }
+
+ if (old_pvid) {
+ rcu_read_lock();
+ upper_dev = __vlan_find_dev_deep_rcu(br_dev, htons(ETH_P_8021Q),
+ old_pvid);
+ rcu_read_unlock();
+ if (upper_dev)
+ err = mlxsw_sp_inetaddr_bridge_event(mlxsw_sp,
+ upper_dev,
+ new_pvid,
+ NETDEV_UP, extack);
+ }
+
+out:
+ mutex_unlock(&mlxsw_sp->router->lock);
+ return err;
+}
+
static void
mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
@@ -8664,21 +8890,24 @@ __mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct mlxsw_sp_rif_params params = {
- .dev = l3_dev,
- };
+ struct mlxsw_sp_rif_params params;
u16 vid = mlxsw_sp_port_vlan->vid;
struct mlxsw_sp_rif *rif;
struct mlxsw_sp_fid *fid;
int err;
+ params = (struct mlxsw_sp_rif_params) {
+ .dev = l3_dev,
+ .vid = vid,
+ };
+
mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
rif = mlxsw_sp_rif_subport_get(mlxsw_sp, &params, extack);
if (IS_ERR(rif))
return PTR_ERR(rif);
/* FID was already created, just take a reference */
- fid = rif->ops->fid_get(rif, extack);
+ fid = rif->ops->fid_get(rif, &params, extack);
err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
if (err)
goto err_fid_port_vid_map;
@@ -8776,10 +9005,11 @@ static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
}
static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
- unsigned long event,
+ unsigned long event, bool nomaster,
struct netlink_ext_ack *extack)
{
- if (netif_is_any_bridge_port(port_dev) || netif_is_lag_port(port_dev))
+ if (!nomaster && (netif_is_any_bridge_port(port_dev) ||
+ netif_is_lag_port(port_dev)))
return 0;
return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
@@ -8810,10 +9040,10 @@ static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
}
static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
- unsigned long event,
+ unsigned long event, bool nomaster,
struct netlink_ext_ack *extack)
{
- if (netif_is_bridge_port(lag_dev))
+ if (!nomaster && netif_is_bridge_port(lag_dev))
return 0;
return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event,
@@ -8822,6 +9052,7 @@ static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
struct net_device *l3_dev,
+ int lower_pvid,
unsigned long event,
struct netlink_ext_ack *extack)
{
@@ -8829,6 +9060,7 @@ static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
.dev = l3_dev,
};
struct mlxsw_sp_rif *rif;
+ int err;
switch (event) {
case NETDEV_UP:
@@ -8840,7 +9072,21 @@ static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
NL_SET_ERR_MSG_MOD(extack, "Adding an IP address to 802.1ad bridge is not supported");
return -EOPNOTSUPP;
}
+ err = br_vlan_get_pvid(l3_dev, &params.vid);
+ if (err)
+ return err;
+ if (!params.vid)
+ return 0;
+ } else if (is_vlan_dev(l3_dev)) {
+ params.vid = vlan_dev_vlan_id(l3_dev);
+
+ /* If the VID matches PVID of the bridge below, the
+ * bridge owns the RIF for this VLAN. Don't do anything.
+ */
+ if ((int)params.vid == lower_pvid)
+ return 0;
}
+
rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
if (IS_ERR(rif))
return PTR_ERR(rif);
@@ -8856,24 +9102,32 @@ static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp *mlxsw_sp,
struct net_device *vlan_dev,
- unsigned long event,
+ unsigned long event, bool nomaster,
struct netlink_ext_ack *extack)
{
struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
u16 vid = vlan_dev_vlan_id(vlan_dev);
+ u16 lower_pvid;
+ int err;
- if (netif_is_bridge_port(vlan_dev))
+ if (!nomaster && netif_is_bridge_port(vlan_dev))
return 0;
- if (mlxsw_sp_port_dev_check(real_dev))
+ if (mlxsw_sp_port_dev_check(real_dev)) {
return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
event, vid, extack);
- else if (netif_is_lag_master(real_dev))
+ } else if (netif_is_lag_master(real_dev)) {
return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
vid, extack);
- else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
- return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev, event,
+ } else if (netif_is_bridge_master(real_dev) &&
+ br_vlan_enabled(real_dev)) {
+ err = br_vlan_get_pvid(real_dev, &lower_pvid);
+ if (err)
+ return err;
+ return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev,
+ lower_pvid, event,
extack);
+ }
return 0;
}
@@ -8927,10 +9181,8 @@ static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
int err;
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
- if (!rif) {
- NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
- return -EOPNOTSUPP;
- }
+ if (!rif)
+ return 0;
err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
mlxsw_sp_fid_index(rif->fid), true);
@@ -9000,19 +9252,21 @@ static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev,
- unsigned long event,
+ unsigned long event, bool nomaster,
struct netlink_ext_ack *extack)
{
if (mlxsw_sp_port_dev_check(dev))
- return mlxsw_sp_inetaddr_port_event(dev, event, extack);
+ return mlxsw_sp_inetaddr_port_event(dev, event, nomaster,
+ extack);
else if (netif_is_lag_master(dev))
- return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
+ return mlxsw_sp_inetaddr_lag_event(dev, event, nomaster,
+ extack);
else if (netif_is_bridge_master(dev))
- return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, event,
+ return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, -1, event,
extack);
else if (is_vlan_dev(dev))
return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event,
- extack);
+ nomaster, extack);
else if (netif_is_macvlan(dev))
return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event,
extack);
@@ -9039,7 +9293,8 @@ static int mlxsw_sp_inetaddr_event(struct notifier_block *nb,
if (!mlxsw_sp_rif_should_config(rif, dev, event))
goto out;
- err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, NULL);
+ err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, false,
+ NULL);
out:
mutex_unlock(&router->lock);
return notifier_from_errno(err);
@@ -9063,7 +9318,8 @@ static int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
if (!mlxsw_sp_rif_should_config(rif, dev, event))
goto out;
- err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, ivi->extack);
+ err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, false,
+ ivi->extack);
out:
mutex_unlock(&mlxsw_sp->router->lock);
return notifier_from_errno(err);
@@ -9092,7 +9348,7 @@ static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
if (!mlxsw_sp_rif_should_config(rif, dev, event))
goto out;
- __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, NULL);
+ __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, false, NULL);
out:
mutex_unlock(&mlxsw_sp->router->lock);
rtnl_unlock();
@@ -9146,7 +9402,8 @@ static int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
if (!mlxsw_sp_rif_should_config(rif, dev, event))
goto out;
- err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, i6vi->extack);
+ err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, false,
+ i6vi->extack);
out:
mutex_unlock(&mlxsw_sp->router->lock);
return notifier_from_errno(err);
@@ -9466,10 +9723,11 @@ static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
*/
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
if (rif)
- __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN,
+ __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, false,
extack);
- return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, extack);
+ return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, false,
+ extack);
}
static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
@@ -9480,7 +9738,7 @@ static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
if (!rif)
return;
- __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, NULL);
+ __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, false, NULL);
}
static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
@@ -9523,6 +9781,116 @@ mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
return err;
}
+struct mlxsw_sp_router_replay_inetaddr_up {
+ struct mlxsw_sp *mlxsw_sp;
+ struct netlink_ext_ack *extack;
+ unsigned int done;
+ bool deslavement;
+};
+
+static int mlxsw_sp_router_replay_inetaddr_up(struct net_device *dev,
+ struct netdev_nested_priv *priv)
+{
+ struct mlxsw_sp_router_replay_inetaddr_up *ctx = priv->data;
+ bool nomaster = ctx->deslavement;
+ struct mlxsw_sp_crif *crif;
+ int err;
+
+ if (mlxsw_sp_dev_addr_list_empty(dev))
+ return 0;
+
+ crif = mlxsw_sp_crif_lookup(ctx->mlxsw_sp->router, dev);
+ if (!crif || crif->rif)
+ return 0;
+
+ if (!mlxsw_sp_rif_should_config(crif->rif, dev, NETDEV_UP))
+ return 0;
+
+ err = __mlxsw_sp_inetaddr_event(ctx->mlxsw_sp, dev, NETDEV_UP,
+ nomaster, ctx->extack);
+ if (err)
+ return err;
+
+ ctx->done++;
+ return 0;
+}
+
+static int mlxsw_sp_router_unreplay_inetaddr_up(struct net_device *dev,
+ struct netdev_nested_priv *priv)
+{
+ struct mlxsw_sp_router_replay_inetaddr_up *ctx = priv->data;
+ bool nomaster = ctx->deslavement;
+ struct mlxsw_sp_crif *crif;
+
+ if (!ctx->done)
+ return 0;
+
+ if (mlxsw_sp_dev_addr_list_empty(dev))
+ return 0;
+
+ crif = mlxsw_sp_crif_lookup(ctx->mlxsw_sp->router, dev);
+ if (!crif || !crif->rif)
+ return 0;
+
+ /* We are rolling back NETDEV_UP, so ask for that. */
+ if (!mlxsw_sp_rif_should_config(crif->rif, dev, NETDEV_UP))
+ return 0;
+
+ __mlxsw_sp_inetaddr_event(ctx->mlxsw_sp, dev, NETDEV_DOWN, nomaster,
+ NULL);
+
+ ctx->done--;
+ return 0;
+}
+
+int mlxsw_sp_netdevice_enslavement_replay(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *upper_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_router_replay_inetaddr_up ctx = {
+ .mlxsw_sp = mlxsw_sp,
+ .extack = extack,
+ .deslavement = false,
+ };
+ struct netdev_nested_priv priv = {
+ .data = &ctx,
+ };
+ int err;
+
+ err = mlxsw_sp_router_replay_inetaddr_up(upper_dev, &priv);
+ if (err)
+ return err;
+
+ err = netdev_walk_all_upper_dev_rcu(upper_dev,
+ mlxsw_sp_router_replay_inetaddr_up,
+ &priv);
+ if (err)
+ goto err_replay_up;
+
+ return 0;
+
+err_replay_up:
+ netdev_walk_all_upper_dev_rcu(upper_dev,
+ mlxsw_sp_router_unreplay_inetaddr_up,
+ &priv);
+ mlxsw_sp_router_unreplay_inetaddr_up(upper_dev, &priv);
+ return err;
+}
+
+void mlxsw_sp_netdevice_deslavement_replay(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *dev)
+{
+ struct mlxsw_sp_router_replay_inetaddr_up ctx = {
+ .mlxsw_sp = mlxsw_sp,
+ .deslavement = true,
+ };
+ struct netdev_nested_priv priv = {
+ .data = &ctx,
+ };
+
+ mlxsw_sp_router_replay_inetaddr_up(dev, &priv);
+}
+
static int
mlxsw_sp_port_vid_router_join_existing(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid, struct net_device *dev,
@@ -9539,15 +9907,84 @@ mlxsw_sp_port_vid_router_join_existing(struct mlxsw_sp_port *mlxsw_sp_port,
dev, extack);
}
+static void
+mlxsw_sp_port_vid_router_leave(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
+ struct net_device *dev)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port,
+ vid);
+ if (WARN_ON(!mlxsw_sp_port_vlan))
+ return;
+
+ __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
+}
+
static int __mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *lag_dev,
struct netlink_ext_ack *extack)
{
u16 default_vid = MLXSW_SP_DEFAULT_VID;
+ struct net_device *upper_dev;
+ struct list_head *iter;
+ int done = 0;
+ u16 vid;
+ int err;
- return mlxsw_sp_port_vid_router_join_existing(mlxsw_sp_port,
- default_vid, lag_dev,
- extack);
+ err = mlxsw_sp_port_vid_router_join_existing(mlxsw_sp_port, default_vid,
+ lag_dev, extack);
+ if (err)
+ return err;
+
+ netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
+ if (!is_vlan_dev(upper_dev))
+ continue;
+
+ vid = vlan_dev_vlan_id(upper_dev);
+ err = mlxsw_sp_port_vid_router_join_existing(mlxsw_sp_port, vid,
+ upper_dev, extack);
+ if (err)
+ goto err_router_join_dev;
+
+ ++done;
+ }
+
+ return 0;
+
+err_router_join_dev:
+ netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
+ if (!is_vlan_dev(upper_dev))
+ continue;
+ if (!done--)
+ break;
+
+ vid = vlan_dev_vlan_id(upper_dev);
+ mlxsw_sp_port_vid_router_leave(mlxsw_sp_port, vid, upper_dev);
+ }
+
+ mlxsw_sp_port_vid_router_leave(mlxsw_sp_port, default_vid, lag_dev);
+ return err;
+}
+
+static void
+__mlxsw_sp_router_port_leave_lag(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *lag_dev)
+{
+ u16 default_vid = MLXSW_SP_DEFAULT_VID;
+ struct net_device *upper_dev;
+ struct list_head *iter;
+ u16 vid;
+
+ netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
+ if (!is_vlan_dev(upper_dev))
+ continue;
+
+ vid = vlan_dev_vlan_id(upper_dev);
+ mlxsw_sp_port_vid_router_leave(mlxsw_sp_port, vid, upper_dev);
+ }
+
+ mlxsw_sp_port_vid_router_leave(mlxsw_sp_port, default_vid, lag_dev);
}
int mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -9563,6 +10000,14 @@ int mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port *mlxsw_sp_port,
return err;
}
+void mlxsw_sp_router_port_leave_lag(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *lag_dev)
+{
+ mutex_lock(&mlxsw_sp_port->mlxsw_sp->router->lock);
+ __mlxsw_sp_router_port_leave_lag(mlxsw_sp_port, lag_dev);
+ mutex_unlock(&mlxsw_sp_port->mlxsw_sp->router->lock);
+}
+
static int mlxsw_sp_router_netdevice_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
@@ -9608,6 +10053,40 @@ out:
return notifier_from_errno(err);
}
+struct mlxsw_sp_macvlan_replay {
+ struct mlxsw_sp *mlxsw_sp;
+ struct netlink_ext_ack *extack;
+};
+
+static int mlxsw_sp_macvlan_replay_upper(struct net_device *dev,
+ struct netdev_nested_priv *priv)
+{
+ const struct mlxsw_sp_macvlan_replay *rms = priv->data;
+ struct netlink_ext_ack *extack = rms->extack;
+ struct mlxsw_sp *mlxsw_sp = rms->mlxsw_sp;
+
+ if (!netif_is_macvlan(dev))
+ return 0;
+
+ return mlxsw_sp_rif_macvlan_add(mlxsw_sp, dev, extack);
+}
+
+static int mlxsw_sp_macvlan_replay(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_macvlan_replay rms = {
+ .mlxsw_sp = rif->mlxsw_sp,
+ .extack = extack,
+ };
+ struct netdev_nested_priv priv = {
+ .data = &rms,
+ };
+
+ return netdev_walk_all_upper_dev_rcu(mlxsw_sp_rif_dev(rif),
+ mlxsw_sp_macvlan_replay_upper,
+ &priv);
+}
+
static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev,
struct netdev_nested_priv *priv)
{
@@ -9630,7 +10109,6 @@ static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
if (!netif_is_macvlan_port(dev))
return 0;
- netdev_warn(dev, "Router interface is deleted. Upper macvlans will not work\n");
return netdev_walk_all_upper_dev_rcu(dev,
__mlxsw_sp_rif_macvlan_flush, &priv);
}
@@ -9688,6 +10166,10 @@ static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif,
if (err)
goto err_rif_subport_op;
+ err = mlxsw_sp_macvlan_replay(rif, extack);
+ if (err)
+ goto err_macvlan_replay;
+
err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
mlxsw_sp_fid_index(rif->fid), true);
if (err)
@@ -9703,6 +10185,8 @@ err_fid_rif_set:
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
mlxsw_sp_fid_index(rif->fid), false);
err_rif_fdb_op:
+ mlxsw_sp_rif_macvlan_flush(rif);
+err_macvlan_replay:
mlxsw_sp_rif_subport_op(rif, false);
err_rif_subport_op:
mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, mac_profile);
@@ -9724,6 +10208,7 @@ static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
static struct mlxsw_sp_fid *
mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
+ const struct mlxsw_sp_rif_params *params,
struct netlink_ext_ack *extack)
{
return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
@@ -9788,6 +10273,10 @@ static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif,
if (err)
goto err_fid_bc_flood_set;
+ err = mlxsw_sp_macvlan_replay(rif, extack);
+ if (err)
+ goto err_macvlan_replay;
+
err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
mlxsw_sp_fid_index(rif->fid), true);
if (err)
@@ -9803,6 +10292,8 @@ err_fid_rif_set:
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
mlxsw_sp_fid_index(rif->fid), false);
err_rif_fdb_op:
+ mlxsw_sp_rif_macvlan_flush(rif);
+err_macvlan_replay:
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
mlxsw_sp_router_port(mlxsw_sp), false);
err_fid_bc_flood_set:
@@ -9836,6 +10327,7 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
static struct mlxsw_sp_fid *
mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
+ const struct mlxsw_sp_rif_params *params,
struct netlink_ext_ack *extack)
{
int rif_ifindex = mlxsw_sp_rif_dev_ifindex(rif);
@@ -9869,27 +10361,22 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
static struct mlxsw_sp_fid *
mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
+ const struct mlxsw_sp_rif_params *params,
struct netlink_ext_ack *extack)
{
struct net_device *dev = mlxsw_sp_rif_dev(rif);
struct net_device *br_dev;
- u16 vid;
- int err;
+
+ if (WARN_ON(!params->vid))
+ return ERR_PTR(-EINVAL);
if (is_vlan_dev(dev)) {
- vid = vlan_dev_vlan_id(dev);
br_dev = vlan_dev_real_dev(dev);
if (WARN_ON(!netif_is_bridge_master(br_dev)))
return ERR_PTR(-EINVAL);
- } else {
- err = br_vlan_get_pvid(dev, &vid);
- if (err < 0 || !vid) {
- NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID");
- return ERR_PTR(-EINVAL);
- }
}
- return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
+ return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, params->vid);
}
static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
@@ -9954,6 +10441,10 @@ static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif, u16 efid,
if (err)
goto err_fid_bc_flood_set;
+ err = mlxsw_sp_macvlan_replay(rif, extack);
+ if (err)
+ goto err_macvlan_replay;
+
err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
mlxsw_sp_fid_index(rif->fid), true);
if (err)
@@ -9969,6 +10460,8 @@ err_fid_rif_set:
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
mlxsw_sp_fid_index(rif->fid), false);
err_rif_fdb_op:
+ mlxsw_sp_rif_macvlan_flush(rif);
+err_macvlan_replay:
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
mlxsw_sp_router_port(mlxsw_sp), false);
err_fid_bc_flood_set:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index 9a2669a08480..ed3b628caafe 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -171,8 +171,19 @@ int mlxsw_sp_ipip_ecn_encap_init(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_ipip_ecn_decap_init(struct mlxsw_sp *mlxsw_sp);
struct net_device *
mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev);
+int mlxsw_sp_router_bridge_vlan_add(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *dev,
+ u16 new_vid, bool is_pvid,
+ struct netlink_ext_ack *extack);
int mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *lag_dev,
struct netlink_ext_ack *extack);
+void mlxsw_sp_router_port_leave_lag(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *lag_dev);
+int mlxsw_sp_netdevice_enslavement_replay(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *upper_dev,
+ struct netlink_ext_ack *extack);
+void mlxsw_sp_netdevice_deslavement_replay(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *dev);
#endif /* _MLXSW_ROUTER_H_*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index d88e62bc759f..dffb67c1038e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -384,6 +384,91 @@ mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
}
+static int mlxsw_sp_port_obj_add(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack);
+static int mlxsw_sp_port_obj_del(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj);
+
+struct mlxsw_sp_bridge_port_replay_switchdev_objs {
+ struct net_device *brport_dev;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ int done;
+};
+
+static int
+mlxsw_sp_bridge_port_replay_switchdev_objs(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ struct switchdev_notifier_port_obj_info *port_obj_info = ptr;
+ struct netlink_ext_ack *extack = port_obj_info->info.extack;
+ struct mlxsw_sp_bridge_port_replay_switchdev_objs *rso;
+ int err = 0;
+
+ rso = (void *)port_obj_info->info.ctx;
+
+ if (event != SWITCHDEV_PORT_OBJ_ADD ||
+ dev != rso->brport_dev)
+ goto out;
+
+ /* When a port is joining the bridge through a LAG, there likely are
+ * VLANs configured on that LAG already. The replay will thus attempt to
+ * have the given port-vlans join the corresponding FIDs. But the LAG
+ * netdevice has already called the ndo_vlan_rx_add_vid NDO for its VLAN
+ * memberships, back before CHANGEUPPER was distributed and netdevice
+ * master set. So now before propagating the VLAN events further, we
+ * first need to kill the corresponding VID at the mlxsw_sp_port.
+ *
+ * Note that this doesn't need to be rolled back on failure -- if the
+ * replay fails, the enslavement is off, and the VIDs would be killed by
+ * LAG anyway as part of its rollback.
+ */
+ if (port_obj_info->obj->id == SWITCHDEV_OBJ_ID_PORT_VLAN) {
+ u16 vid = SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj)->vid;
+
+ err = mlxsw_sp_port_kill_vid(rso->mlxsw_sp_port->dev, 0, vid);
+ if (err)
+ goto out;
+ }
+
+ ++rso->done;
+ err = mlxsw_sp_port_obj_add(rso->mlxsw_sp_port->dev, NULL,
+ port_obj_info->obj, extack);
+
+out:
+ return notifier_from_errno(err);
+}
+
+static struct notifier_block mlxsw_sp_bridge_port_replay_switchdev_objs_nb = {
+ .notifier_call = mlxsw_sp_bridge_port_replay_switchdev_objs,
+};
+
+static int
+mlxsw_sp_bridge_port_unreplay_switchdev_objs(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ struct switchdev_notifier_port_obj_info *port_obj_info = ptr;
+ struct mlxsw_sp_bridge_port_replay_switchdev_objs *rso;
+
+ rso = (void *)port_obj_info->info.ctx;
+
+ if (event != SWITCHDEV_PORT_OBJ_ADD ||
+ dev != rso->brport_dev)
+ return NOTIFY_DONE;
+ if (!rso->done--)
+ return NOTIFY_STOP;
+
+ mlxsw_sp_port_obj_del(rso->mlxsw_sp_port->dev, NULL,
+ port_obj_info->obj);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block mlxsw_sp_bridge_port_unreplay_switchdev_objs_nb = {
+ .notifier_call = mlxsw_sp_bridge_port_unreplay_switchdev_objs,
+};
+
static struct mlxsw_sp_bridge_port *
mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
struct net_device *brport_dev,
@@ -1479,29 +1564,15 @@ err_port_vlan_set:
}
static int
-mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *br_dev,
- const struct switchdev_obj_port_vlan *vlan)
+mlxsw_sp_br_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *br_dev,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
{
- u16 pvid;
-
- pvid = mlxsw_sp_rif_vid(mlxsw_sp, br_dev);
- if (!pvid)
- return 0;
-
- if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
- if (vlan->vid != pvid) {
- netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
- return -EBUSY;
- }
- } else {
- if (vlan->vid == pvid) {
- netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
- return -EBUSY;
- }
- }
+ bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
- return 0;
+ return mlxsw_sp_router_bridge_vlan_add(mlxsw_sp, br_dev, vlan->vid,
+ flag_pvid, extack);
}
static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -1518,8 +1589,8 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
int err = 0;
if (br_vlan_enabled(orig_dev))
- err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
- orig_dev, vlan);
+ err = mlxsw_sp_br_rif_pvid_change(mlxsw_sp, orig_dev,
+ vlan, extack);
if (!err)
err = -EOPNOTSUPP;
return err;
@@ -2365,6 +2436,33 @@ static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
}
static int
+mlxsw_sp_bridge_port_replay(struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_bridge_port_replay_switchdev_objs rso = {
+ .brport_dev = bridge_port->dev,
+ .mlxsw_sp_port = mlxsw_sp_port,
+ };
+ struct notifier_block *nb;
+ int err;
+
+ nb = &mlxsw_sp_bridge_port_replay_switchdev_objs_nb;
+ err = switchdev_bridge_port_replay(bridge_port->dev, mlxsw_sp_port->dev,
+ &rso, NULL, nb, extack);
+ if (err)
+ goto err_replay;
+
+ return 0;
+
+err_replay:
+ nb = &mlxsw_sp_bridge_port_unreplay_switchdev_objs_nb;
+ switchdev_bridge_port_replay(bridge_port->dev, mlxsw_sp_port->dev,
+ &rso, NULL, nb, extack);
+ return err;
+}
+
+static int
mlxsw_sp_bridge_vlan_aware_port_join(struct mlxsw_sp_bridge_port *bridge_port,
struct mlxsw_sp_port *mlxsw_sp_port,
struct netlink_ext_ack *extack)
@@ -2378,7 +2476,7 @@ mlxsw_sp_bridge_vlan_aware_port_join(struct mlxsw_sp_bridge_port *bridge_port,
if (mlxsw_sp_port->default_vlan->fid)
mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
- return 0;
+ return mlxsw_sp_bridge_port_replay(bridge_port, mlxsw_sp_port, extack);
}
static int
@@ -2550,6 +2648,7 @@ mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
struct net_device *dev = bridge_port->dev;
u16 vid;
+ int err;
vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
@@ -2565,8 +2664,20 @@ mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
if (mlxsw_sp_port_vlan->fid)
mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
- return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
- extack);
+ err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
+ extack);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_bridge_port_replay(bridge_port, mlxsw_sp_port, extack);
+ if (err)
+ goto err_replay;
+
+ return 0;
+
+err_replay:
+ mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
+ return err;
}
static void
@@ -2783,8 +2894,15 @@ int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
if (err)
goto err_port_join;
+ err = mlxsw_sp_netdevice_enslavement_replay(mlxsw_sp, br_dev, extack);
+ if (err)
+ goto err_replay;
+
return 0;
+err_replay:
+ bridge_device->ops->port_leave(bridge_device, bridge_port,
+ mlxsw_sp_port);
err_port_join:
mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
return err;
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 8f3f78b68592..3765d3389a9a 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -300,8 +300,11 @@ static void mana_gd_ring_doorbell(struct gdma_context *gc, u32 db_index,
void mana_gd_wq_ring_doorbell(struct gdma_context *gc, struct gdma_queue *queue)
{
+ /* Hardware Spec specifies that software client should set 0 for
+ * wqe_cnt for Receive Queues. This value is not used in Send Queues.
+ */
mana_gd_ring_doorbell(gc, queue->gdma_dev->doorbell, queue->type,
- queue->id, queue->head * GDMA_WQE_BU_SIZE, 1);
+ queue->id, queue->head * GDMA_WQE_BU_SIZE, 0);
}
void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit)
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index a499e460594b..ac2acc9aca9d 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -1386,8 +1386,8 @@ static void mana_post_pkt_rxq(struct mana_rxq *rxq)
recv_buf_oob = &rxq->rx_oobs[curr_index];
- err = mana_gd_post_and_ring(rxq->gdma_rq, &recv_buf_oob->wqe_req,
- &recv_buf_oob->wqe_inf);
+ err = mana_gd_post_work_request(rxq->gdma_rq, &recv_buf_oob->wqe_req,
+ &recv_buf_oob->wqe_inf);
if (WARN_ON_ONCE(err))
return;
@@ -1657,6 +1657,12 @@ static void mana_poll_rx_cq(struct mana_cq *cq)
mana_process_rx_cqe(rxq, cq, &comp[i]);
}
+ if (comp_read > 0) {
+ struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
+
+ mana_gd_wq_ring_doorbell(gc, rxq->gdma_rq);
+ }
+
if (rxq->xdp_flush)
xdp_do_flush();
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 6b1fb5708434..f18c791cf698 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -924,7 +924,7 @@ static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *addr)
*/
static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
{
- u32 new_ctrl, update;
+ u32 new_ctrl, new_ctrl_w1, update;
unsigned int r;
int err;
@@ -937,14 +937,29 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
- nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
- nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
+ if (!(nn->cap_w1 & NFP_NET_CFG_CTRL_FREELIST_EN)) {
+ nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
+ nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
+ }
nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
err = nfp_net_reconfig(nn, update);
if (err)
nn_err(nn, "Could not disable device: %d\n", err);
+ if (nn->cap_w1 & NFP_NET_CFG_CTRL_FREELIST_EN) {
+ new_ctrl_w1 = nn->dp.ctrl_w1;
+ new_ctrl_w1 &= ~NFP_NET_CFG_CTRL_FREELIST_EN;
+ nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
+ nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
+
+ nn_writel(nn, NFP_NET_CFG_CTRL_WORD1, new_ctrl_w1);
+ err = nfp_net_reconfig(nn, update);
+ if (err)
+ nn_err(nn, "Could not disable FREELIST_EN: %d\n", err);
+ nn->dp.ctrl_w1 = new_ctrl_w1;
+ }
+
for (r = 0; r < nn->dp.num_rx_rings; r++) {
nfp_net_rx_ring_reset(&nn->dp.rx_rings[r]);
if (nfp_net_has_xsk_pool_slow(&nn->dp, nn->dp.rx_rings[r].idx))
@@ -964,11 +979,12 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
*/
static int nfp_net_set_config_and_enable(struct nfp_net *nn)
{
- u32 bufsz, new_ctrl, update = 0;
+ u32 bufsz, new_ctrl, new_ctrl_w1, update = 0;
unsigned int r;
int err;
new_ctrl = nn->dp.ctrl;
+ new_ctrl_w1 = nn->dp.ctrl_w1;
if (nn->dp.ctrl & NFP_NET_CFG_CTRL_RSS_ANY) {
nfp_net_rss_write_key(nn);
@@ -1001,16 +1017,25 @@ static int nfp_net_set_config_and_enable(struct nfp_net *nn)
bufsz = nn->dp.fl_bufsz - nn->dp.rx_dma_off - NFP_NET_RX_BUF_NON_DATA;
nn_writel(nn, NFP_NET_CFG_FLBUFSZ, bufsz);
- /* Enable device */
- new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
+ /* Enable device
+ * Step 1: Replace the CTRL_ENABLE by NFP_NET_CFG_CTRL_FREELIST_EN if
+ * FREELIST_EN exits.
+ */
+ if (nn->cap_w1 & NFP_NET_CFG_CTRL_FREELIST_EN)
+ new_ctrl_w1 |= NFP_NET_CFG_CTRL_FREELIST_EN;
+ else
+ new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
update |= NFP_NET_CFG_UPDATE_GEN;
update |= NFP_NET_CFG_UPDATE_MSIX;
update |= NFP_NET_CFG_UPDATE_RING;
if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
+ /* Step 2: Send the configuration and write the freelist.
+ * - The freelist only need to be written once.
+ */
nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
- nn_writel(nn, NFP_NET_CFG_CTRL_WORD1, nn->dp.ctrl_w1);
+ nn_writel(nn, NFP_NET_CFG_CTRL_WORD1, new_ctrl_w1);
err = nfp_net_reconfig(nn, update);
if (err) {
nfp_net_clear_config_and_disable(nn);
@@ -1018,10 +1043,25 @@ static int nfp_net_set_config_and_enable(struct nfp_net *nn)
}
nn->dp.ctrl = new_ctrl;
+ nn->dp.ctrl_w1 = new_ctrl_w1;
for (r = 0; r < nn->dp.num_rx_rings; r++)
nfp_net_rx_ring_fill_freelist(&nn->dp, &nn->dp.rx_rings[r]);
+ /* Step 3: Do the NFP_NET_CFG_CTRL_ENABLE. Send the configuration.
+ */
+ if (nn->cap_w1 & NFP_NET_CFG_CTRL_FREELIST_EN) {
+ new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
+ nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
+
+ err = nfp_net_reconfig(nn, update);
+ if (err) {
+ nfp_net_clear_config_and_disable(nn);
+ return err;
+ }
+ nn->dp.ctrl = new_ctrl;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 669b9dccb6a9..3e63f6d6a563 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -268,6 +268,7 @@
#define NFP_NET_CFG_CTRL_PKT_TYPE (0x1 << 0) /* Pkttype offload */
#define NFP_NET_CFG_CTRL_IPSEC (0x1 << 1) /* IPsec offload */
#define NFP_NET_CFG_CTRL_MCAST_FILTER (0x1 << 2) /* Multicast Filter */
+#define NFP_NET_CFG_CTRL_FREELIST_EN (0x1 << 6) /* Freelist enable flag bit */
#define NFP_NET_CFG_CAP_WORD1 0x00a4
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index ab7d217b98b3..d6ce113a4210 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -213,29 +213,18 @@ out:
return ret;
}
-static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static void ionic_clear_pci(struct ionic *ionic)
{
- struct device *dev = &pdev->dev;
- struct ionic *ionic;
- int num_vfs;
- int err;
-
- ionic = ionic_devlink_alloc(dev);
- if (!ionic)
- return -ENOMEM;
-
- ionic->pdev = pdev;
- ionic->dev = dev;
- pci_set_drvdata(pdev, ionic);
- mutex_init(&ionic->dev_cmd_lock);
+ ionic_unmap_bars(ionic);
+ pci_release_regions(ionic->pdev);
+ pci_disable_device(ionic->pdev);
+}
- /* Query system for DMA addressing limitation for the device. */
- err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(IONIC_ADDR_LEN));
- if (err) {
- dev_err(dev, "Unable to obtain 64-bit DMA for consistent allocations, aborting. err=%d\n",
- err);
- goto err_out_clear_drvdata;
- }
+static int ionic_setup_one(struct ionic *ionic)
+{
+ struct pci_dev *pdev = ionic->pdev;
+ struct device *dev = ionic->dev;
+ int err;
ionic_debugfs_add_dev(ionic);
@@ -249,20 +238,19 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = pci_request_regions(pdev, IONIC_DRV_NAME);
if (err) {
dev_err(dev, "Cannot request PCI regions: %d, aborting\n", err);
- goto err_out_pci_disable_device;
+ goto err_out_clear_pci;
}
-
pcie_print_link_status(pdev);
err = ionic_map_bars(ionic);
if (err)
- goto err_out_pci_release_regions;
+ goto err_out_clear_pci;
/* Configure the device */
err = ionic_setup(ionic);
if (err) {
dev_err(dev, "Cannot setup device: %d, aborting\n", err);
- goto err_out_unmap_bars;
+ goto err_out_clear_pci;
}
pci_set_master(pdev);
@@ -279,24 +267,64 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_teardown;
}
- /* Configure the ports */
+ /* Configure the port */
err = ionic_port_identify(ionic);
if (err) {
dev_err(dev, "Cannot identify port: %d, aborting\n", err);
- goto err_out_reset;
+ goto err_out_teardown;
}
err = ionic_port_init(ionic);
if (err) {
dev_err(dev, "Cannot init port: %d, aborting\n", err);
- goto err_out_reset;
+ goto err_out_teardown;
+ }
+
+ return 0;
+
+err_out_teardown:
+ ionic_dev_teardown(ionic);
+err_out_clear_pci:
+ ionic_clear_pci(ionic);
+err_out_debugfs_del_dev:
+ ionic_debugfs_del_dev(ionic);
+
+ return err;
+}
+
+static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct ionic *ionic;
+ int num_vfs;
+ int err;
+
+ ionic = ionic_devlink_alloc(dev);
+ if (!ionic)
+ return -ENOMEM;
+
+ ionic->pdev = pdev;
+ ionic->dev = dev;
+ pci_set_drvdata(pdev, ionic);
+ mutex_init(&ionic->dev_cmd_lock);
+
+ /* Query system for DMA addressing limitation for the device. */
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(IONIC_ADDR_LEN));
+ if (err) {
+ dev_err(dev, "Unable to obtain 64-bit DMA for consistent allocations, aborting. err=%d\n",
+ err);
+ goto err_out;
}
+ err = ionic_setup_one(ionic);
+ if (err)
+ goto err_out;
+
/* Allocate and init the LIF */
err = ionic_lif_size(ionic);
if (err) {
dev_err(dev, "Cannot size LIF: %d, aborting\n", err);
- goto err_out_port_reset;
+ goto err_out_pci;
}
err = ionic_lif_alloc(ionic);
@@ -347,21 +375,10 @@ err_out_free_lifs:
ionic->lif = NULL;
err_out_free_irqs:
ionic_bus_free_irq_vectors(ionic);
-err_out_port_reset:
- ionic_port_reset(ionic);
-err_out_reset:
- ionic_reset(ionic);
-err_out_teardown:
+err_out_pci:
ionic_dev_teardown(ionic);
-err_out_unmap_bars:
- ionic_unmap_bars(ionic);
-err_out_pci_release_regions:
- pci_release_regions(pdev);
-err_out_pci_disable_device:
- pci_disable_device(pdev);
-err_out_debugfs_del_dev:
- ionic_debugfs_del_dev(ionic);
-err_out_clear_drvdata:
+ ionic_clear_pci(ionic);
+err_out:
mutex_destroy(&ionic->dev_cmd_lock);
ionic_devlink_free(ionic);
@@ -386,20 +403,71 @@ static void ionic_remove(struct pci_dev *pdev)
ionic_port_reset(ionic);
ionic_reset(ionic);
ionic_dev_teardown(ionic);
- ionic_unmap_bars(ionic);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
+ ionic_clear_pci(ionic);
ionic_debugfs_del_dev(ionic);
mutex_destroy(&ionic->dev_cmd_lock);
ionic_devlink_free(ionic);
}
+static void ionic_reset_prepare(struct pci_dev *pdev)
+{
+ struct ionic *ionic = pci_get_drvdata(pdev);
+ struct ionic_lif *lif = ionic->lif;
+
+ dev_dbg(ionic->dev, "%s: device stopping\n", __func__);
+
+ del_timer_sync(&ionic->watchdog_timer);
+ cancel_work_sync(&lif->deferred.work);
+
+ mutex_lock(&lif->queue_lock);
+ ionic_stop_queues_reconfig(lif);
+ ionic_txrx_free(lif);
+ ionic_lif_deinit(lif);
+ ionic_qcqs_free(lif);
+ mutex_unlock(&lif->queue_lock);
+
+ ionic_dev_teardown(ionic);
+ ionic_clear_pci(ionic);
+ ionic_debugfs_del_dev(ionic);
+}
+
+static void ionic_reset_done(struct pci_dev *pdev)
+{
+ struct ionic *ionic = pci_get_drvdata(pdev);
+ struct ionic_lif *lif = ionic->lif;
+ int err;
+
+ err = ionic_setup_one(ionic);
+ if (err)
+ goto err_out;
+
+ ionic_debugfs_add_sizes(ionic);
+ ionic_debugfs_add_lif(ionic->lif);
+
+ err = ionic_restart_lif(lif);
+ if (err)
+ goto err_out;
+
+ mod_timer(&ionic->watchdog_timer, jiffies + 1);
+
+err_out:
+ dev_dbg(ionic->dev, "%s: device recovery %s\n",
+ __func__, err ? "failed" : "done");
+}
+
+static const struct pci_error_handlers ionic_err_handler = {
+ /* FLR handling */
+ .reset_prepare = ionic_reset_prepare,
+ .reset_done = ionic_reset_done,
+};
+
static struct pci_driver ionic_driver = {
.name = IONIC_DRV_NAME,
.id_table = ionic_id_table,
.probe = ionic_probe,
.remove = ionic_remove,
.sriov_configure = ionic_sriov_configure,
+ .err_handler = &ionic_err_handler
};
int ionic_bus_register_driver(void)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 612b0015dc43..adc05f944c14 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -434,7 +434,7 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
}
}
-static void ionic_qcqs_free(struct ionic_lif *lif)
+void ionic_qcqs_free(struct ionic_lif *lif)
{
struct device *dev = lif->ionic->dev;
struct ionic_qcq *adminqcq;
@@ -1754,7 +1754,7 @@ static int ionic_set_mac_address(struct net_device *netdev, void *sa)
return ionic_lif_addr_add(netdev_priv(netdev), mac);
}
-static void ionic_stop_queues_reconfig(struct ionic_lif *lif)
+void ionic_stop_queues_reconfig(struct ionic_lif *lif)
{
/* Stop and clean the queues before reconfiguration */
netif_device_detach(lif->netdev);
@@ -2009,7 +2009,7 @@ static void ionic_txrx_deinit(struct ionic_lif *lif)
}
}
-static void ionic_txrx_free(struct ionic_lif *lif)
+void ionic_txrx_free(struct ionic_lif *lif)
{
unsigned int i;
@@ -3266,27 +3266,11 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
dev_info(ionic->dev, "FW Down: LIFs stopped\n");
}
-static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
+int ionic_restart_lif(struct ionic_lif *lif)
{
struct ionic *ionic = lif->ionic;
int err;
- if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
- return;
-
- dev_info(ionic->dev, "FW Up: restarting LIFs\n");
-
- ionic_init_devinfo(ionic);
- err = ionic_identify(ionic);
- if (err)
- goto err_out;
- err = ionic_port_identify(ionic);
- if (err)
- goto err_out;
- err = ionic_port_init(ionic);
- if (err)
- goto err_out;
-
mutex_lock(&lif->queue_lock);
if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state))
@@ -3322,12 +3306,8 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
clear_bit(IONIC_LIF_F_FW_RESET, lif->state);
ionic_link_status_check_request(lif, CAN_SLEEP);
netif_device_attach(lif->netdev);
- dev_info(ionic->dev, "FW Up: LIFs restarted\n");
-
- /* restore the hardware timestamping queues */
- ionic_lif_hwstamp_replay(lif);
- return;
+ return 0;
err_txrx_free:
ionic_txrx_free(lif);
@@ -3337,6 +3317,46 @@ err_qcqs_free:
ionic_qcqs_free(lif);
err_unlock:
mutex_unlock(&lif->queue_lock);
+
+ return err;
+}
+
+static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
+{
+ struct ionic *ionic = lif->ionic;
+ int err;
+
+ if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ return;
+
+ dev_info(ionic->dev, "FW Up: restarting LIFs\n");
+
+ /* This is a little different from what happens at
+ * probe time because the LIF already exists so we
+ * just need to reanimate it.
+ */
+ ionic_init_devinfo(ionic);
+ err = ionic_identify(ionic);
+ if (err)
+ goto err_out;
+ err = ionic_port_identify(ionic);
+ if (err)
+ goto err_out;
+ err = ionic_port_init(ionic);
+ if (err)
+ goto err_out;
+
+ err = ionic_restart_lif(lif);
+ if (err)
+ goto err_out;
+
+ dev_info(ionic->dev, "FW Up: LIFs restarted\n");
+
+ /* restore the hardware timestamping queues */
+ ionic_lif_hwstamp_replay(lif);
+
+ return;
+
err_out:
dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err);
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index fd2ea670e7d8..457c24195ca6 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -325,6 +325,11 @@ void ionic_lif_deinit(struct ionic_lif *lif);
int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr);
int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr);
+void ionic_stop_queues_reconfig(struct ionic_lif *lif);
+void ionic_txrx_free(struct ionic_lif *lif);
+void ionic_qcqs_free(struct ionic_lif *lif);
+int ionic_restart_lif(struct ionic_lif *lif);
+
int ionic_lif_register(struct ionic_lif *lif);
void ionic_lif_unregister(struct ionic_lif *lif);
int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 174dc8908b72..c362bff3cb83 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -552,7 +552,7 @@ static void smsc911x_mac_write(struct smsc911x_data *pdata,
/* Get a phy register */
static int smsc911x_mii_read(struct mii_bus *bus, int phyaddr, int regidx)
{
- struct smsc911x_data *pdata = (struct smsc911x_data *)bus->priv;
+ struct smsc911x_data *pdata = bus->priv;
unsigned long flags;
unsigned int addr;
int i, reg;
@@ -591,7 +591,7 @@ out:
static int smsc911x_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
u16 val)
{
- struct smsc911x_data *pdata = (struct smsc911x_data *)bus->priv;
+ struct smsc911x_data *pdata = bus->priv;
unsigned long flags;
unsigned int addr;
int i, reg;
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index 71fbb358bb7d..3b26f1d86beb 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -102,7 +102,7 @@ static inline void smsc9420_pci_flush_write(struct smsc9420_pdata *pd)
static int smsc9420_mii_read(struct mii_bus *bus, int phyaddr, int regidx)
{
- struct smsc9420_pdata *pd = (struct smsc9420_pdata *)bus->priv;
+ struct smsc9420_pdata *pd = bus->priv;
unsigned long flags;
u32 addr;
int i, reg = -EIO;
@@ -140,7 +140,7 @@ out:
static int smsc9420_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
u16 val)
{
- struct smsc9420_pdata *pd = (struct smsc9420_pdata *)bus->priv;
+ struct smsc9420_pdata *pd = bus->priv;
unsigned long flags;
u32 addr;
int i, reg = -EIO;
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 16e67c18b6f7..57f2137bbe9d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -59,13 +59,25 @@
/* #define FRAME_FILTER_DEBUG */
struct stmmac_txq_stats {
- unsigned long tx_pkt_n;
- unsigned long tx_normal_irq_n;
+ u64 tx_bytes;
+ u64 tx_packets;
+ u64 tx_pkt_n;
+ u64 tx_normal_irq_n;
+ u64 napi_poll;
+ u64 tx_clean;
+ u64 tx_set_ic_bit;
+ u64 tx_tso_frames;
+ u64 tx_tso_nfrags;
+ struct u64_stats_sync syncp;
};
struct stmmac_rxq_stats {
- unsigned long rx_pkt_n;
- unsigned long rx_normal_irq_n;
+ u64 rx_bytes;
+ u64 rx_packets;
+ u64 rx_pkt_n;
+ u64 rx_normal_irq_n;
+ u64 napi_poll;
+ struct u64_stats_sync syncp;
};
/* Extra statistic and debug information exposed by ethtool */
@@ -81,6 +93,7 @@ struct stmmac_extra_stats {
unsigned long tx_frame_flushed;
unsigned long tx_payload_error;
unsigned long tx_ip_header_error;
+ unsigned long tx_collision;
/* Receive errors */
unsigned long rx_desc;
unsigned long sa_filter_fail;
@@ -113,14 +126,6 @@ struct stmmac_extra_stats {
/* Tx/Rx IRQ Events */
unsigned long rx_early_irq;
unsigned long threshold;
- unsigned long tx_pkt_n;
- unsigned long rx_pkt_n;
- unsigned long normal_irq_n;
- unsigned long rx_normal_irq_n;
- unsigned long napi_poll;
- unsigned long tx_normal_irq_n;
- unsigned long tx_clean;
- unsigned long tx_set_ic_bit;
unsigned long irq_receive_pmt_irq_n;
/* MMC info */
unsigned long mmc_tx_irq_n;
@@ -190,18 +195,16 @@ struct stmmac_extra_stats {
unsigned long mtl_rx_fifo_ctrl_active;
unsigned long mac_rx_frame_ctrl_fifo;
unsigned long mac_gmii_rx_proto_engine;
- /* TSO */
- unsigned long tx_tso_frames;
- unsigned long tx_tso_nfrags;
/* EST */
unsigned long mtl_est_cgce;
unsigned long mtl_est_hlbs;
unsigned long mtl_est_hlbf;
unsigned long mtl_est_btre;
unsigned long mtl_est_btrlm;
- /* per queue statistics */
- struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES];
- struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES];
+ unsigned long rx_dropped;
+ unsigned long rx_errors;
+ unsigned long tx_dropped;
+ unsigned long tx_errors;
};
/* Safety Feature statistics exposed by ethtool */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index 9f88530c5e8c..b5efd9c2eac7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -113,7 +113,7 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
/* dwc-qos needs GMAC4, AAL, TSO and PMT */
plat_dat->has_gmac4 = 1;
plat_dat->dma_cfg->aal = 1;
- plat_dat->tso_en = 1;
+ plat_dat->flags |= STMMAC_FLAG_TSO_EN;
plat_dat->pmt = 1;
return 0;
@@ -359,7 +359,7 @@ bypass_clk_reset_gpio:
data->fix_mac_speed = tegra_eqos_fix_speed;
data->init = tegra_eqos_init;
data->bsp_priv = eqos;
- data->sph_disable = 1;
+ data->flags |= STMMAC_FLAG_SPH_DISABLE;
err = tegra_eqos_init(pdev, eqos);
if (err < 0)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index ab9f876b6df7..0ffae785d8bd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -326,10 +326,10 @@ static int intel_crosststamp(ktime_t *device,
/* Both internal crosstimestamping and external triggered event
* timestamping cannot be run concurrently.
*/
- if (priv->plat->ext_snapshot_en)
+ if (priv->plat->flags & STMMAC_FLAG_EXT_SNAPSHOT_EN)
return -EBUSY;
- priv->plat->int_snapshot_en = 1;
+ priv->plat->flags |= STMMAC_FLAG_INT_SNAPSHOT_EN;
mutex_lock(&priv->aux_ts_lock);
/* Enable Internal snapshot trigger */
@@ -350,7 +350,7 @@ static int intel_crosststamp(ktime_t *device,
break;
default:
mutex_unlock(&priv->aux_ts_lock);
- priv->plat->int_snapshot_en = 0;
+ priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
return -EINVAL;
}
writel(acr_value, ptpaddr + PTP_ACR);
@@ -376,7 +376,7 @@ static int intel_crosststamp(ktime_t *device,
if (!wait_event_interruptible_timeout(priv->tstamp_busy_wait,
stmmac_cross_ts_isr(priv),
HZ / 100)) {
- priv->plat->int_snapshot_en = 0;
+ priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
return -ETIMEDOUT;
}
@@ -395,7 +395,7 @@ static int intel_crosststamp(ktime_t *device,
}
system->cycles *= intel_priv->crossts_adj;
- priv->plat->int_snapshot_en = 0;
+ priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
return 0;
}
@@ -458,8 +458,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->has_gmac = 0;
plat->has_gmac4 = 1;
plat->force_sf_dma_mode = 0;
- plat->tso_en = 1;
- plat->sph_disable = 1;
+ plat->flags |= (STMMAC_FLAG_TSO_EN | STMMAC_FLAG_SPH_DISABLE);
/* Multiplying factor to the clk_eee_i clock time
* period to make it closer to 100 ns. This value
@@ -561,7 +560,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
/* Set the maxmtu to a default of JUMBO_LEN */
plat->maxmtu = JUMBO_LEN;
- plat->vlan_fail_q_en = true;
+ plat->flags |= STMMAC_FLAG_VLAN_FAIL_Q_EN;
/* Use the last Rx queue */
plat->vlan_fail_q = plat->rx_queues_to_use - 1;
@@ -610,7 +609,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->ext_snapshot_num = AUX_SNAPSHOT0;
plat->crosststamp = intel_crosststamp;
- plat->int_snapshot_en = 0;
+ plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
/* Setup MSI vector offset specific to Intel mGbE controller */
plat->msi_mac_vec = 29;
@@ -628,7 +627,7 @@ static int ehl_common_data(struct pci_dev *pdev,
{
plat->rx_queues_to_use = 8;
plat->tx_queues_to_use = 8;
- plat->use_phy_wol = 1;
+ plat->flags |= STMMAC_FLAG_USE_PHY_WOL;
plat->safety_feat_cfg->tsoee = 1;
plat->safety_feat_cfg->mrxpee = 1;
@@ -954,7 +953,7 @@ static int stmmac_config_single_msi(struct pci_dev *pdev,
res->irq = pci_irq_vector(pdev, 0);
res->wol_irq = res->irq;
- plat->multi_msi_en = 0;
+ plat->flags &= ~STMMAC_FLAG_MULTI_MSI_EN;
dev_info(&pdev->dev, "%s: Single IRQ enablement successful\n",
__func__);
@@ -1006,7 +1005,7 @@ static int stmmac_config_multi_msi(struct pci_dev *pdev,
if (plat->msi_sfty_ue_vec < STMMAC_MSI_VEC_MAX)
res->sfty_ue_irq = pci_irq_vector(pdev, plat->msi_sfty_ue_vec);
- plat->multi_msi_en = 1;
+ plat->flags |= STMMAC_FLAG_MULTI_MSI_EN;
dev_info(&pdev->dev, "%s: multi MSI enablement successful\n", __func__);
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index 73c1dfa7ecb1..4d877d75642d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -588,7 +588,10 @@ static int mediatek_dwmac_common_data(struct platform_device *pdev,
int i;
plat->interface = priv_plat->phy_mode;
- plat->use_phy_wol = priv_plat->mac_wol ? 0 : 1;
+ if (priv_plat->mac_wol)
+ plat->flags |= STMMAC_FLAG_USE_PHY_WOL;
+ else
+ plat->flags &= ~STMMAC_FLAG_USE_PHY_WOL;
plat->riwt_off = 1;
plat->maxmtu = ETH_DATA_LEN;
plat->host_dma_width = priv_plat->variant->dma_bit_mask;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index e62940414e54..735525ba8b93 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -4,10 +4,10 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_net.h>
#include <linux/platform_device.h>
#include <linux/phy.h>
#include <linux/phy/phy.h>
-#include <linux/property.h>
#include "stmmac.h"
#include "stmmac_platform.h"
@@ -104,7 +104,7 @@ struct qcom_ethqos {
struct clk *link_clk;
struct phy *serdes_phy;
unsigned int speed;
- int phy_mode;
+ phy_interface_t phy_mode;
const struct ethqos_emac_por *por;
unsigned int num_por;
@@ -706,12 +706,13 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
if (ret)
- return ret;
+ return dev_err_probe(dev, ret,
+ "Failed to get platform resources\n");
plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat)) {
- dev_err(dev, "dt configuration failed\n");
- return PTR_ERR(plat_dat);
+ return dev_err_probe(dev, PTR_ERR(plat_dat),
+ "dt configuration failed\n");
}
plat_dat->clks_config = ethqos_clks_config;
@@ -720,7 +721,9 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
if (!ethqos)
return -ENOMEM;
- ethqos->phy_mode = device_get_phy_mode(dev);
+ ret = of_get_phy_mode(np, &ethqos->phy_mode);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get phy mode\n");
switch (ethqos->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
@@ -731,16 +734,17 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
case PHY_INTERFACE_MODE_SGMII:
ethqos->configure_func = ethqos_configure_sgmii;
break;
- case -ENODEV:
- return -ENODEV;
default:
+ dev_err(dev, "Unsupported phy mode %s\n",
+ phy_modes(ethqos->phy_mode));
return -EINVAL;
}
ethqos->pdev = pdev;
ethqos->rgmii_base = devm_platform_ioremap_resource_byname(pdev, "rgmii");
if (IS_ERR(ethqos->rgmii_base))
- return PTR_ERR(ethqos->rgmii_base);
+ return dev_err_probe(dev, PTR_ERR(ethqos->rgmii_base),
+ "Failed to map rgmii resource\n");
ethqos->mac_base = stmmac_res.addr;
@@ -752,7 +756,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
ethqos->link_clk = devm_clk_get(dev, data->link_clk_name ?: "rgmii");
if (IS_ERR(ethqos->link_clk))
- return PTR_ERR(ethqos->link_clk);
+ return dev_err_probe(dev, PTR_ERR(ethqos->link_clk),
+ "Failed to get link_clk\n");
ret = ethqos_clks_config(ethqos, true);
if (ret)
@@ -764,7 +769,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
ethqos->serdes_phy = devm_phy_optional_get(dev, "serdes");
if (IS_ERR(ethqos->serdes_phy))
- return PTR_ERR(ethqos->serdes_phy);
+ return dev_err_probe(dev, PTR_ERR(ethqos->serdes_phy),
+ "Failed to get serdes phy\n");
ethqos->speed = SPEED_1000;
ethqos_update_link_clk(ethqos, SPEED_1000);
@@ -777,10 +783,12 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
if (ethqos->has_emac_ge_3)
plat_dat->dwmac4_addrs = &data->dwmac4_addrs;
plat_dat->pmt = 1;
- plat_dat->tso_en = of_property_read_bool(np, "snps,tso");
+ if (of_property_read_bool(np, "snps,tso"))
+ plat_dat->flags |= STMMAC_FLAG_TSO_EN;
if (of_device_is_compatible(np, "qcom,qcs404-ethqos"))
- plat_dat->rx_clk_runs_in_lpi = 1;
- plat_dat->has_integrated_pcs = data->has_integrated_pcs;
+ plat_dat->flags |= STMMAC_FLAG_RX_CLK_RUNS_IN_LPI;
+ if (data->has_integrated_pcs)
+ plat_dat->flags |= STMMAC_FLAG_HAS_INTEGRATED_PCS;
if (ethqos->serdes_phy) {
plat_dat->serdes_powerup = qcom_ethqos_serdes_powerup;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 1e714380d125..b607279e8cbd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -440,8 +440,10 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv,
struct stmmac_extra_stats *x, u32 chan,
u32 dir)
{
- u32 v;
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
int ret = 0;
+ u32 v;
v = readl(ioaddr + EMAC_INT_STA);
@@ -452,7 +454,9 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv,
if (v & EMAC_TX_INT) {
ret |= handle_tx;
- x->tx_normal_irq_n++;
+ u64_stats_update_begin(&tx_q->txq_stats.syncp);
+ tx_q->txq_stats.tx_normal_irq_n++;
+ u64_stats_update_end(&tx_q->txq_stats.syncp);
}
if (v & EMAC_TX_DMA_STOP_INT)
@@ -474,7 +478,9 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv,
if (v & EMAC_RX_INT) {
ret |= handle_rx;
- x->rx_normal_irq_n++;
+ u64_stats_update_begin(&rx_q->rxq_stats.syncp);
+ rx_q->rxq_stats.rx_normal_irq_n++;
+ u64_stats_update_end(&rx_q->rxq_stats.syncp);
}
if (v & EMAC_RX_BUF_UA_INT)
@@ -1227,7 +1233,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
plat_dat->interface = interface;
plat_dat->rx_coe = STMMAC_RX_COE_TYPE2;
plat_dat->tx_coe = 1;
- plat_dat->has_sun8i = true;
+ plat_dat->flags |= STMMAC_FLAG_HAS_SUN8I;
plat_dat->bsp_priv = gmac;
plat_dat->init = sun8i_dwmac_init;
plat_dat->exit = sun8i_dwmac_exit;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
index f8367c5b490b..99e2e5a5cd60 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
@@ -290,7 +290,7 @@ static int tegra_mgbe_probe(struct platform_device *pdev)
}
plat->has_xgmac = 1;
- plat->tso_en = 1;
+ plat->flags |= STMMAC_FLAG_TSO_EN;
plat->pmt = 1;
plat->bsp_priv = mgbe;
@@ -337,7 +337,7 @@ static int tegra_mgbe_probe(struct platform_device *pdev)
/* Program SID */
writel(MGBE_SID, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
- plat->serdes_up_after_phy_linkup = 1;
+ plat->flags |= STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP;
err = stmmac_dvr_probe(&pdev->dev, plat, &res);
if (err < 0)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index 1c32b1788f02..dea270f60cc3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -82,29 +82,24 @@ static void dwmac100_dump_dma_regs(struct stmmac_priv *priv,
}
/* DMA controller has two counters to track the number of the missed frames. */
-static void dwmac100_dma_diagnostic_fr(struct net_device_stats *stats,
- struct stmmac_extra_stats *x,
+static void dwmac100_dma_diagnostic_fr(struct stmmac_extra_stats *x,
void __iomem *ioaddr)
{
u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR);
if (unlikely(csr8)) {
if (csr8 & DMA_MISSED_FRAME_OVE) {
- stats->rx_over_errors += 0x800;
x->rx_overflow_cntr += 0x800;
} else {
unsigned int ove_cntr;
ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17);
- stats->rx_over_errors += ove_cntr;
x->rx_overflow_cntr += ove_cntr;
}
if (csr8 & DMA_MISSED_FRAME_OVE_M) {
- stats->rx_missed_errors += 0xffff;
x->rx_missed_cntr += 0xffff;
} else {
unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR);
- stats->rx_missed_errors += miss_f;
x->rx_missed_cntr += miss_f;
}
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 6a011d8633e8..89a14084c611 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -13,8 +13,7 @@
#include "dwmac4.h"
#include "dwmac4_descs.h"
-static int dwmac4_wrback_get_tx_status(struct net_device_stats *stats,
- struct stmmac_extra_stats *x,
+static int dwmac4_wrback_get_tx_status(struct stmmac_extra_stats *x,
struct dma_desc *p,
void __iomem *ioaddr)
{
@@ -40,15 +39,13 @@ static int dwmac4_wrback_get_tx_status(struct net_device_stats *stats,
x->tx_frame_flushed++;
if (unlikely(tdes3 & TDES3_LOSS_CARRIER)) {
x->tx_losscarrier++;
- stats->tx_carrier_errors++;
}
if (unlikely(tdes3 & TDES3_NO_CARRIER)) {
x->tx_carrier++;
- stats->tx_carrier_errors++;
}
if (unlikely((tdes3 & TDES3_LATE_COLLISION) ||
(tdes3 & TDES3_EXCESSIVE_COLLISION)))
- stats->collisions +=
+ x->tx_collision +=
(tdes3 & TDES3_COLLISION_COUNT_MASK)
>> TDES3_COLLISION_COUNT_SHIFT;
@@ -73,8 +70,7 @@ static int dwmac4_wrback_get_tx_status(struct net_device_stats *stats,
return ret;
}
-static int dwmac4_wrback_get_rx_status(struct net_device_stats *stats,
- struct stmmac_extra_stats *x,
+static int dwmac4_wrback_get_rx_status(struct stmmac_extra_stats *x,
struct dma_desc *p)
{
unsigned int rdes1 = le32_to_cpu(p->des1);
@@ -93,7 +89,7 @@ static int dwmac4_wrback_get_rx_status(struct net_device_stats *stats,
if (unlikely(rdes3 & RDES3_ERROR_SUMMARY)) {
if (unlikely(rdes3 & RDES3_GIANT_PACKET))
- stats->rx_length_errors++;
+ x->rx_length++;
if (unlikely(rdes3 & RDES3_OVERFLOW_ERROR))
x->rx_gmac_overflow++;
@@ -103,10 +99,8 @@ static int dwmac4_wrback_get_rx_status(struct net_device_stats *stats,
if (unlikely(rdes3 & RDES3_RECEIVE_ERROR))
x->rx_mii++;
- if (unlikely(rdes3 & RDES3_CRC_ERROR)) {
+ if (unlikely(rdes3 & RDES3_CRC_ERROR))
x->rx_crc_errors++;
- stats->rx_crc_errors++;
- }
if (unlikely(rdes3 & RDES3_DRIBBLE_ERROR))
x->dribbling_bit++;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
index 03ceb6a94073..980e5f8a37ec 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -171,6 +171,8 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(dwmac4_addrs, chan));
u32 intr_en = readl(ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan));
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
int ret = 0;
if (dir == DMA_DIR_RX)
@@ -198,18 +200,19 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
}
}
/* TX/RX NORMAL interrupts */
- if (likely(intr_status & DMA_CHAN_STATUS_NIS))
- x->normal_irq_n++;
if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
- x->rx_normal_irq_n++;
- x->rxq_stats[chan].rx_normal_irq_n++;
+ u64_stats_update_begin(&rx_q->rxq_stats.syncp);
+ rx_q->rxq_stats.rx_normal_irq_n++;
+ u64_stats_update_end(&rx_q->rxq_stats.syncp);
ret |= handle_rx;
}
if (likely(intr_status & DMA_CHAN_STATUS_TI)) {
- x->tx_normal_irq_n++;
- x->txq_stats[chan].tx_normal_irq_n++;
+ u64_stats_update_begin(&tx_q->txq_stats.syncp);
+ tx_q->txq_stats.tx_normal_irq_n++;
+ u64_stats_update_end(&tx_q->txq_stats.syncp);
ret |= handle_tx;
}
+
if (unlikely(intr_status & DMA_CHAN_STATUS_TBU))
ret |= handle_tx;
if (unlikely(intr_status & DMA_CHAN_STATUS_ERI))
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 0b6f999a8305..aaa09b16b016 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -10,6 +10,7 @@
#include <linux/iopoll.h>
#include "common.h"
#include "dwmac_dma.h"
+#include "stmmac.h"
#define GMAC_HI_REG_AE 0x80000000
@@ -161,6 +162,8 @@ static void show_rx_process_state(unsigned int status)
int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
struct stmmac_extra_stats *x, u32 chan, u32 dir)
{
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
int ret = 0;
/* read the status register (CSR5) */
u32 intr_status = readl(ioaddr + DMA_STATUS);
@@ -208,17 +211,20 @@ int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
}
/* TX/RX NORMAL interrupts */
if (likely(intr_status & DMA_STATUS_NIS)) {
- x->normal_irq_n++;
if (likely(intr_status & DMA_STATUS_RI)) {
u32 value = readl(ioaddr + DMA_INTR_ENA);
/* to schedule NAPI on real RIE event. */
if (likely(value & DMA_INTR_ENA_RIE)) {
- x->rx_normal_irq_n++;
+ u64_stats_update_begin(&rx_q->rxq_stats.syncp);
+ rx_q->rxq_stats.rx_normal_irq_n++;
+ u64_stats_update_end(&rx_q->rxq_stats.syncp);
ret |= handle_rx;
}
}
if (likely(intr_status & DMA_STATUS_TI)) {
- x->tx_normal_irq_n++;
+ u64_stats_update_begin(&tx_q->txq_stats.syncp);
+ tx_q->txq_stats.tx_normal_irq_n++;
+ u64_stats_update_end(&tx_q->txq_stats.syncp);
ret |= handle_tx;
}
if (unlikely(intr_status & DMA_STATUS_ERI))
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index 1913385df685..153321fe42c3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -165,7 +165,7 @@
#define XGMAC_DCS_SHIFT 16
#define XGMAC_ADDRx_LOW(x) (0x00000304 + (x) * 0x8)
#define XGMAC_L3L4_ADDR_CTRL 0x00000c00
-#define XGMAC_IDDR GENMASK(15, 8)
+#define XGMAC_IDDR GENMASK(16, 8)
#define XGMAC_IDDR_SHIFT 8
#define XGMAC_IDDR_FNUM 4
#define XGMAC_TT BIT(1)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
index 13c347ee8be9..fc82862a612c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -8,8 +8,7 @@
#include "common.h"
#include "dwxgmac2.h"
-static int dwxgmac2_get_tx_status(struct net_device_stats *stats,
- struct stmmac_extra_stats *x,
+static int dwxgmac2_get_tx_status(struct stmmac_extra_stats *x,
struct dma_desc *p, void __iomem *ioaddr)
{
unsigned int tdes3 = le32_to_cpu(p->des3);
@@ -23,8 +22,7 @@ static int dwxgmac2_get_tx_status(struct net_device_stats *stats,
return ret;
}
-static int dwxgmac2_get_rx_status(struct net_device_stats *stats,
- struct stmmac_extra_stats *x,
+static int dwxgmac2_get_rx_status(struct stmmac_extra_stats *x,
struct dma_desc *p)
{
unsigned int rdes3 = le32_to_cpu(p->des3);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index 070bd912580b..b09395f5edcb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -337,6 +337,8 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
struct stmmac_extra_stats *x, u32 chan,
u32 dir)
{
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
int ret = 0;
@@ -364,16 +366,16 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
/* TX/RX NORMAL interrupts */
if (likely(intr_status & XGMAC_NIS)) {
- x->normal_irq_n++;
-
if (likely(intr_status & XGMAC_RI)) {
- x->rx_normal_irq_n++;
- x->rxq_stats[chan].rx_normal_irq_n++;
+ u64_stats_update_begin(&rx_q->rxq_stats.syncp);
+ rx_q->rxq_stats.rx_normal_irq_n++;
+ u64_stats_update_end(&rx_q->rxq_stats.syncp);
ret |= handle_rx;
}
if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) {
- x->tx_normal_irq_n++;
- x->txq_stats[chan].tx_normal_irq_n++;
+ u64_stats_update_begin(&tx_q->txq_stats.syncp);
+ tx_q->txq_stats.tx_normal_irq_n++;
+ u64_stats_update_end(&tx_q->txq_stats.syncp);
ret |= handle_tx;
}
}
@@ -408,6 +410,16 @@ static int dwxgmac2_get_hw_feature(void __iomem *ioaddr,
/* MAC HW feature 1 */
hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1);
dma_cap->l3l4fnum = (hw_cap & XGMAC_HWFEAT_L3L4FNUM) >> 27;
+ /* If L3L4FNUM < 8, then the number of L3L4 filters supported by
+ * XGMAC is equal to L3L4FNUM. From L3L4FNUM >= 8 the number of
+ * L3L4 filters goes on like 8, 16, 32, ... Current maximum of
+ * L3L4FNUM = 10.
+ */
+ if (dma_cap->l3l4fnum >= 8 && dma_cap->l3l4fnum <= 10)
+ dma_cap->l3l4fnum = 8 << (dma_cap->l3l4fnum - 8);
+ else if (dma_cap->l3l4fnum > 10)
+ dma_cap->l3l4fnum = 32;
+
dma_cap->hash_tb_sz = (hw_cap & XGMAC_HWFEAT_HASHTBLSZ) >> 24;
dma_cap->rssen = (hw_cap & XGMAC_HWFEAT_RSSEN) >> 20;
dma_cap->tsoen = (hw_cap & XGMAC_HWFEAT_TSOEN) >> 18;
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index a91d8f13a931..937b7a0466fc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -12,8 +12,7 @@
#include "common.h"
#include "descs_com.h"
-static int enh_desc_get_tx_status(struct net_device_stats *stats,
- struct stmmac_extra_stats *x,
+static int enh_desc_get_tx_status(struct stmmac_extra_stats *x,
struct dma_desc *p, void __iomem *ioaddr)
{
unsigned int tdes0 = le32_to_cpu(p->des0);
@@ -38,15 +37,13 @@ static int enh_desc_get_tx_status(struct net_device_stats *stats,
if (unlikely(tdes0 & ETDES0_LOSS_CARRIER)) {
x->tx_losscarrier++;
- stats->tx_carrier_errors++;
}
if (unlikely(tdes0 & ETDES0_NO_CARRIER)) {
x->tx_carrier++;
- stats->tx_carrier_errors++;
}
if (unlikely((tdes0 & ETDES0_LATE_COLLISION) ||
(tdes0 & ETDES0_EXCESSIVE_COLLISIONS)))
- stats->collisions +=
+ x->tx_collision +=
(tdes0 & ETDES0_COLLISION_COUNT_MASK) >> 3;
if (unlikely(tdes0 & ETDES0_EXCESSIVE_DEFERRAL))
@@ -117,8 +114,7 @@ static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
return ret;
}
-static void enh_desc_get_ext_status(struct net_device_stats *stats,
- struct stmmac_extra_stats *x,
+static void enh_desc_get_ext_status(struct stmmac_extra_stats *x,
struct dma_extended_desc *p)
{
unsigned int rdes0 = le32_to_cpu(p->basic.des0);
@@ -182,8 +178,7 @@ static void enh_desc_get_ext_status(struct net_device_stats *stats,
}
}
-static int enh_desc_get_rx_status(struct net_device_stats *stats,
- struct stmmac_extra_stats *x,
+static int enh_desc_get_rx_status(struct stmmac_extra_stats *x,
struct dma_desc *p)
{
unsigned int rdes0 = le32_to_cpu(p->des0);
@@ -193,14 +188,14 @@ static int enh_desc_get_rx_status(struct net_device_stats *stats,
return dma_own;
if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
- stats->rx_length_errors++;
+ x->rx_length++;
return discard_frame;
}
if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) {
x->rx_desc++;
- stats->rx_length_errors++;
+ x->rx_length++;
}
if (unlikely(rdes0 & RDES0_OVERFLOW_ERROR))
x->rx_gmac_overflow++;
@@ -209,7 +204,7 @@ static int enh_desc_get_rx_status(struct net_device_stats *stats,
pr_err("\tIPC Csum Error/Giant frame\n");
if (unlikely(rdes0 & RDES0_COLLISION))
- stats->collisions++;
+ x->rx_collision++;
if (unlikely(rdes0 & RDES0_RECEIVE_WATCHDOG))
x->rx_watchdog++;
@@ -218,7 +213,6 @@ static int enh_desc_get_rx_status(struct net_device_stats *stats,
if (unlikely(rdes0 & RDES0_CRC_ERROR)) {
x->rx_crc_errors++;
- stats->rx_crc_errors++;
}
ret = discard_frame;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 6ee7cf07cfd7..40147ef24963 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -57,8 +57,7 @@ struct stmmac_desc_ops {
/* Last tx segment reports the transmit status */
int (*get_tx_ls)(struct dma_desc *p);
/* Return the transmit status looking at the TDES1 */
- int (*tx_status)(struct net_device_stats *stats,
- struct stmmac_extra_stats *x,
+ int (*tx_status)(struct stmmac_extra_stats *x,
struct dma_desc *p, void __iomem *ioaddr);
/* Get the buffer size from the descriptor */
int (*get_tx_len)(struct dma_desc *p);
@@ -67,11 +66,9 @@ struct stmmac_desc_ops {
/* Get the receive frame size */
int (*get_rx_frame_len)(struct dma_desc *p, int rx_coe_type);
/* Return the reception status looking at the RDES1 */
- int (*rx_status)(struct net_device_stats *stats,
- struct stmmac_extra_stats *x,
+ int (*rx_status)(struct stmmac_extra_stats *x,
struct dma_desc *p);
- void (*rx_extended_status)(struct net_device_stats *stats,
- struct stmmac_extra_stats *x,
+ void (*rx_extended_status)(struct stmmac_extra_stats *x,
struct dma_extended_desc *p);
/* Set tx timestamp enable bit */
void (*enable_tx_timestamp) (struct dma_desc *p);
@@ -191,8 +188,7 @@ struct stmmac_dma_ops {
void (*dma_tx_mode)(struct stmmac_priv *priv, void __iomem *ioaddr,
int mode, u32 channel, int fifosz, u8 qmode);
/* To track extra statistic (if supported) */
- void (*dma_diagnostic_fr)(struct net_device_stats *stats,
- struct stmmac_extra_stats *x,
+ void (*dma_diagnostic_fr)(struct stmmac_extra_stats *x,
void __iomem *ioaddr);
void (*enable_dma_transmission) (void __iomem *ioaddr);
void (*enable_dma_irq)(struct stmmac_priv *priv, void __iomem *ioaddr,
@@ -536,6 +532,7 @@ struct stmmac_hwtimestamp {
void (*get_systime) (void __iomem *ioaddr, u64 *systime);
void (*get_ptptime)(void __iomem *ioaddr, u64 *ptp_time);
void (*timestamp_interrupt)(struct stmmac_priv *priv);
+ void (*correct_latency)(struct stmmac_priv *priv);
};
#define stmmac_config_hw_tstamping(__priv, __args...) \
@@ -554,6 +551,8 @@ struct stmmac_hwtimestamp {
stmmac_do_void_callback(__priv, ptp, get_ptptime, __args)
#define stmmac_timestamp_interrupt(__priv, __args...) \
stmmac_do_void_callback(__priv, ptp, timestamp_interrupt, __args)
+#define stmmac_correct_latency(__priv, __args...) \
+ stmmac_do_void_callback(__priv, ptp, correct_latency, __args)
struct stmmac_tx_queue;
struct stmmac_rx_queue;
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index 350e6670a576..68a7cfcb1d8f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -12,8 +12,7 @@
#include "common.h"
#include "descs_com.h"
-static int ndesc_get_tx_status(struct net_device_stats *stats,
- struct stmmac_extra_stats *x,
+static int ndesc_get_tx_status(struct stmmac_extra_stats *x,
struct dma_desc *p, void __iomem *ioaddr)
{
unsigned int tdes0 = le32_to_cpu(p->des0);
@@ -31,15 +30,12 @@ static int ndesc_get_tx_status(struct net_device_stats *stats,
if (unlikely(tdes0 & TDES0_ERROR_SUMMARY)) {
if (unlikely(tdes0 & TDES0_UNDERFLOW_ERROR)) {
x->tx_underflow++;
- stats->tx_fifo_errors++;
}
if (unlikely(tdes0 & TDES0_NO_CARRIER)) {
x->tx_carrier++;
- stats->tx_carrier_errors++;
}
if (unlikely(tdes0 & TDES0_LOSS_CARRIER)) {
x->tx_losscarrier++;
- stats->tx_carrier_errors++;
}
if (unlikely((tdes0 & TDES0_EXCESSIVE_DEFERRAL) ||
(tdes0 & TDES0_EXCESSIVE_COLLISIONS) ||
@@ -47,7 +43,7 @@ static int ndesc_get_tx_status(struct net_device_stats *stats,
unsigned int collisions;
collisions = (tdes0 & TDES0_COLLISION_COUNT_MASK) >> 3;
- stats->collisions += collisions;
+ x->tx_collision += collisions;
}
ret = tx_err;
}
@@ -70,8 +66,7 @@ static int ndesc_get_tx_len(struct dma_desc *p)
* and, if required, updates the multicast statistics.
* In case of success, it returns good_frame because the GMAC device
* is supposed to be able to compute the csum in HW. */
-static int ndesc_get_rx_status(struct net_device_stats *stats,
- struct stmmac_extra_stats *x,
+static int ndesc_get_rx_status(struct stmmac_extra_stats *x,
struct dma_desc *p)
{
int ret = good_frame;
@@ -81,7 +76,7 @@ static int ndesc_get_rx_status(struct net_device_stats *stats,
return dma_own;
if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
- stats->rx_length_errors++;
+ x->rx_length++;
return discard_frame;
}
@@ -96,11 +91,9 @@ static int ndesc_get_rx_status(struct net_device_stats *stats,
x->ipc_csum_error++;
if (unlikely(rdes0 & RDES0_COLLISION)) {
x->rx_collision++;
- stats->collisions++;
}
if (unlikely(rdes0 & RDES0_CRC_ERROR)) {
x->rx_crc_errors++;
- stats->rx_crc_errors++;
}
ret = discard_frame;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 07ea5ab0a60b..4ce5eaaae513 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -77,6 +77,7 @@ struct stmmac_tx_queue {
dma_addr_t dma_tx_phy;
dma_addr_t tx_tail_addr;
u32 mss;
+ struct stmmac_txq_stats txq_stats;
};
struct stmmac_rx_buffer {
@@ -121,6 +122,7 @@ struct stmmac_rx_queue {
unsigned int len;
unsigned int error;
} state;
+ struct stmmac_rxq_stats rxq_stats;
};
struct stmmac_channel {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 2ae73ab842d4..b7ac7abecdd3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -89,14 +89,6 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
/* Tx/Rx IRQ Events */
STMMAC_STAT(rx_early_irq),
STMMAC_STAT(threshold),
- STMMAC_STAT(tx_pkt_n),
- STMMAC_STAT(rx_pkt_n),
- STMMAC_STAT(normal_irq_n),
- STMMAC_STAT(rx_normal_irq_n),
- STMMAC_STAT(napi_poll),
- STMMAC_STAT(tx_normal_irq_n),
- STMMAC_STAT(tx_clean),
- STMMAC_STAT(tx_set_ic_bit),
STMMAC_STAT(irq_receive_pmt_irq_n),
/* MMC info */
STMMAC_STAT(mmc_tx_irq_n),
@@ -163,9 +155,6 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
STMMAC_STAT(mtl_rx_fifo_ctrl_active),
STMMAC_STAT(mac_rx_frame_ctrl_fifo),
STMMAC_STAT(mac_gmii_rx_proto_engine),
- /* TSO */
- STMMAC_STAT(tx_tso_frames),
- STMMAC_STAT(tx_tso_nfrags),
/* EST */
STMMAC_STAT(mtl_est_cgce),
STMMAC_STAT(mtl_est_hlbs),
@@ -175,6 +164,23 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
};
#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
+/* statistics collected in queue which will be summed up for all TX or RX
+ * queues, or summed up for both TX and RX queues(napi_poll, normal_irq_n).
+ */
+static const char stmmac_qstats_string[][ETH_GSTRING_LEN] = {
+ "rx_pkt_n",
+ "rx_normal_irq_n",
+ "tx_pkt_n",
+ "tx_normal_irq_n",
+ "tx_clean",
+ "tx_set_ic_bit",
+ "tx_tso_frames",
+ "tx_tso_nfrags",
+ "normal_irq_n",
+ "napi_poll",
+};
+#define STMMAC_QSTATS ARRAY_SIZE(stmmac_qstats_string)
+
/* HW MAC Management counters (if supported) */
#define STMMAC_MMC_STAT(m) \
{ #m, sizeof_field(struct stmmac_counters, m), \
@@ -535,23 +541,44 @@ static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data)
{
u32 tx_cnt = priv->plat->tx_queues_to_use;
u32 rx_cnt = priv->plat->rx_queues_to_use;
+ unsigned int start;
int q, stat;
+ u64 *pos;
char *p;
+ pos = data;
for (q = 0; q < tx_cnt; q++) {
- p = (char *)priv + offsetof(struct stmmac_priv,
- xstats.txq_stats[q].tx_pkt_n);
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[q];
+ struct stmmac_txq_stats snapshot;
+
+ data = pos;
+ do {
+ start = u64_stats_fetch_begin(&tx_q->txq_stats.syncp);
+ snapshot = tx_q->txq_stats;
+ } while (u64_stats_fetch_retry(&tx_q->txq_stats.syncp, start));
+
+ p = (char *)&snapshot + offsetof(struct stmmac_txq_stats, tx_pkt_n);
for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) {
- *data++ = (*(unsigned long *)p);
- p += sizeof(unsigned long);
+ *data++ += (*(u64 *)p);
+ p += sizeof(u64);
}
}
+
+ pos = data;
for (q = 0; q < rx_cnt; q++) {
- p = (char *)priv + offsetof(struct stmmac_priv,
- xstats.rxq_stats[q].rx_pkt_n);
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[q];
+ struct stmmac_rxq_stats snapshot;
+
+ data = pos;
+ do {
+ start = u64_stats_fetch_begin(&rx_q->rxq_stats.syncp);
+ snapshot = rx_q->rxq_stats;
+ } while (u64_stats_fetch_retry(&rx_q->rxq_stats.syncp, start));
+
+ p = (char *)&snapshot + offsetof(struct stmmac_rxq_stats, rx_pkt_n);
for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) {
- *data++ = (*(unsigned long *)p);
- p += sizeof(unsigned long);
+ *data++ += (*(u64 *)p);
+ p += sizeof(u64);
}
}
}
@@ -562,8 +589,10 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
struct stmmac_priv *priv = netdev_priv(dev);
u32 rx_queues_count = priv->plat->rx_queues_to_use;
u32 tx_queues_count = priv->plat->tx_queues_to_use;
+ u64 napi_poll = 0, normal_irq_n = 0;
+ int i, j = 0, pos, ret;
unsigned long count;
- int i, j = 0, ret;
+ unsigned int start;
if (priv->dma_cap.asp) {
for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) {
@@ -574,8 +603,7 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
}
/* Update the DMA HW counters for dwmac10/100 */
- ret = stmmac_dma_diagnostic_fr(priv, &dev->stats, (void *) &priv->xstats,
- priv->ioaddr);
+ ret = stmmac_dma_diagnostic_fr(priv, &priv->xstats, priv->ioaddr);
if (ret) {
/* If supported, for new GMAC chips expose the MMC counters */
if (priv->dma_cap.rmon) {
@@ -606,6 +634,48 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
data[j++] = (stmmac_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p);
}
+
+ pos = j;
+ for (i = 0; i < rx_queues_count; i++) {
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[i];
+ struct stmmac_rxq_stats snapshot;
+
+ j = pos;
+ do {
+ start = u64_stats_fetch_begin(&rx_q->rxq_stats.syncp);
+ snapshot = rx_q->rxq_stats;
+ } while (u64_stats_fetch_retry(&rx_q->rxq_stats.syncp, start));
+
+ data[j++] += snapshot.rx_pkt_n;
+ data[j++] += snapshot.rx_normal_irq_n;
+ normal_irq_n += snapshot.rx_normal_irq_n;
+ napi_poll += snapshot.napi_poll;
+ }
+
+ pos = j;
+ for (i = 0; i < tx_queues_count; i++) {
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[i];
+ struct stmmac_txq_stats snapshot;
+
+ j = pos;
+ do {
+ start = u64_stats_fetch_begin(&tx_q->txq_stats.syncp);
+ snapshot = tx_q->txq_stats;
+ } while (u64_stats_fetch_retry(&tx_q->txq_stats.syncp, start));
+
+ data[j++] += snapshot.tx_pkt_n;
+ data[j++] += snapshot.tx_normal_irq_n;
+ normal_irq_n += snapshot.tx_normal_irq_n;
+ data[j++] += snapshot.tx_clean;
+ data[j++] += snapshot.tx_set_ic_bit;
+ data[j++] += snapshot.tx_tso_frames;
+ data[j++] += snapshot.tx_tso_nfrags;
+ napi_poll += snapshot.napi_poll;
+ }
+ normal_irq_n += priv->xstats.rx_early_irq;
+ data[j++] = normal_irq_n;
+ data[j++] = napi_poll;
+
stmmac_get_per_qstats(priv, &data[j]);
}
@@ -618,7 +688,7 @@ static int stmmac_get_sset_count(struct net_device *netdev, int sset)
switch (sset) {
case ETH_SS_STATS:
- len = STMMAC_STATS_LEN +
+ len = STMMAC_STATS_LEN + STMMAC_QSTATS +
STMMAC_TXQ_STATS * tx_cnt +
STMMAC_RXQ_STATS * rx_cnt;
@@ -691,8 +761,11 @@ static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN;
}
for (i = 0; i < STMMAC_STATS_LEN; i++) {
- memcpy(p, stmmac_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
+ memcpy(p, stmmac_gstrings_stats[i].stat_string, ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < STMMAC_QSTATS; i++) {
+ memcpy(p, stmmac_qstats_string[i], ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
stmmac_get_qstats_string(priv, p);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index 8b50f03056b7..7e0fa024e0ad 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -60,6 +60,48 @@ static void config_sub_second_increment(void __iomem *ioaddr,
*ssinc = data;
}
+static void correct_latency(struct stmmac_priv *priv)
+{
+ void __iomem *ioaddr = priv->ptpaddr;
+ u32 reg_tsic, reg_tsicsns;
+ u32 reg_tsec, reg_tsecsns;
+ u64 scaled_ns;
+ u32 val;
+
+ /* MAC-internal ingress latency */
+ scaled_ns = readl(ioaddr + PTP_TS_INGR_LAT);
+
+ /* See section 11.7.2.5.3.1 "Ingress Correction" on page 4001 of
+ * i.MX8MP Applications Processor Reference Manual Rev. 1, 06/2021
+ */
+ val = readl(ioaddr + PTP_TCR);
+ if (val & PTP_TCR_TSCTRLSSR)
+ /* nanoseconds field is in decimal format with granularity of 1ns/bit */
+ scaled_ns = ((u64)NSEC_PER_SEC << 16) - scaled_ns;
+ else
+ /* nanoseconds field is in binary format with granularity of ~0.466ns/bit */
+ scaled_ns = ((1ULL << 31) << 16) -
+ DIV_U64_ROUND_CLOSEST(scaled_ns * PSEC_PER_NSEC, 466U);
+
+ reg_tsic = scaled_ns >> 16;
+ reg_tsicsns = scaled_ns & 0xff00;
+
+ /* set bit 31 for 2's compliment */
+ reg_tsic |= BIT(31);
+
+ writel(reg_tsic, ioaddr + PTP_TS_INGR_CORR_NS);
+ writel(reg_tsicsns, ioaddr + PTP_TS_INGR_CORR_SNS);
+
+ /* MAC-internal egress latency */
+ scaled_ns = readl(ioaddr + PTP_TS_EGR_LAT);
+
+ reg_tsec = scaled_ns >> 16;
+ reg_tsecsns = scaled_ns & 0xff00;
+
+ writel(reg_tsec, ioaddr + PTP_TS_EGR_CORR_NS);
+ writel(reg_tsecsns, ioaddr + PTP_TS_EGR_CORR_SNS);
+}
+
static int init_systime(void __iomem *ioaddr, u32 sec, u32 nsec)
{
u32 value;
@@ -180,7 +222,7 @@ static void timestamp_interrupt(struct stmmac_priv *priv)
u64 ptp_time;
int i;
- if (priv->plat->int_snapshot_en) {
+ if (priv->plat->flags & STMMAC_FLAG_INT_SNAPSHOT_EN) {
wake_up(&priv->tstamp_busy_wait);
return;
}
@@ -195,7 +237,7 @@ static void timestamp_interrupt(struct stmmac_priv *priv)
*/
ts_status = readl(priv->ioaddr + GMAC_TIMESTAMP_STATUS);
- if (!priv->plat->ext_snapshot_en)
+ if (priv->plat->flags & STMMAC_FLAG_EXT_SNAPSHOT_EN)
return;
num_snapshot = (ts_status & GMAC_TIMESTAMP_ATSNS_MASK) >>
@@ -221,4 +263,5 @@ const struct stmmac_hwtimestamp stmmac_ptp = {
.get_systime = get_systime,
.get_ptptime = get_ptptime,
.timestamp_interrupt = timestamp_interrupt,
+ .correct_latency = correct_latency,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 4727f7be4f86..19e46e8f626a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -325,7 +325,7 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
priv->clk_csr = STMMAC_CSR_250_300M;
}
- if (priv->plat->has_sun8i) {
+ if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
if (clk_rate > 160000000)
priv->clk_csr = 0x03;
else if (clk_rate > 80000000)
@@ -421,7 +421,7 @@ static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
/* Check and enter in LPI mode */
if (!priv->tx_path_in_lpi_mode)
stmmac_set_eee_mode(priv, priv->hw,
- priv->plat->en_tx_lpi_clockgating);
+ priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
return 0;
}
@@ -909,6 +909,8 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
priv->hwts_tx_en = 0;
priv->hwts_rx_en = 0;
+ stmmac_correct_latency(priv, priv);
+
return 0;
}
@@ -991,7 +993,8 @@ static void stmmac_mac_link_up(struct phylink_config *config,
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
u32 old_ctrl, ctrl;
- if (priv->plat->serdes_up_after_phy_linkup && priv->plat->serdes_powerup)
+ if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
+ priv->plat->serdes_powerup)
priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
@@ -1084,7 +1087,8 @@ static void stmmac_mac_link_up(struct phylink_config *config,
stmmac_mac_set(priv, priv->ioaddr, true);
if (phy && priv->dma_cap.eee) {
priv->eee_active =
- phy_init_eee(phy, !priv->plat->rx_clk_runs_in_lpi) >= 0;
+ phy_init_eee(phy, !(priv->plat->flags &
+ STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
priv->eee_enabled = stmmac_eee_init(priv);
priv->tx_lpi_enabled = priv->eee_enabled;
stmmac_set_eee_pls(priv, priv->hw, true);
@@ -1092,6 +1096,8 @@ static void stmmac_mac_link_up(struct phylink_config *config,
if (priv->dma_cap.fpesel)
stmmac_fpe_link_state_handle(priv, true);
+
+ stmmac_correct_latency(priv, priv);
}
static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
@@ -2432,6 +2438,8 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
struct dma_desc *tx_desc = NULL;
struct xdp_desc xdp_desc;
bool work_done = true;
+ u32 tx_set_ic_bit = 0;
+ unsigned long flags;
/* Avoids TX time-out as we are sharing with slow path */
txq_trans_cond_update(nq);
@@ -2492,7 +2500,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
if (set_ic) {
tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, tx_desc);
- priv->xstats.tx_set_ic_bit++;
+ tx_set_ic_bit++;
}
stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
@@ -2504,6 +2512,9 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
entry = tx_q->cur_tx;
}
+ flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
+ tx_q->txq_stats.tx_set_ic_bit += tx_set_ic_bit;
+ u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
if (tx_desc) {
stmmac_flush_tx_descriptors(priv, queue);
@@ -2545,11 +2556,11 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
unsigned int bytes_compl = 0, pkts_compl = 0;
unsigned int entry, xmits = 0, count = 0;
+ u32 tx_packets = 0, tx_errors = 0;
+ unsigned long flags;
__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
- priv->xstats.tx_clean++;
-
tx_q->xsk_frames_done = 0;
entry = tx_q->dirty_tx;
@@ -2580,8 +2591,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
else
p = tx_q->dma_tx + entry;
- status = stmmac_tx_status(priv, &priv->dev->stats,
- &priv->xstats, p, priv->ioaddr);
+ status = stmmac_tx_status(priv, &priv->xstats, p, priv->ioaddr);
/* Check if the descriptor is owned by the DMA */
if (unlikely(status & tx_dma_own))
break;
@@ -2597,13 +2607,11 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
if (likely(!(status & tx_not_ls))) {
/* ... verify the status error condition */
if (unlikely(status & tx_err)) {
- priv->dev->stats.tx_errors++;
+ tx_errors++;
if (unlikely(status & tx_err_bump_tc))
stmmac_bump_dma_threshold(priv, queue);
} else {
- priv->dev->stats.tx_packets++;
- priv->xstats.tx_pkt_n++;
- priv->xstats.txq_stats[queue].tx_pkt_n++;
+ tx_packets++;
}
if (skb)
stmmac_get_tx_hwtstamp(priv, p, skb);
@@ -2707,6 +2715,14 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
HRTIMER_MODE_REL);
+ flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
+ tx_q->txq_stats.tx_packets += tx_packets;
+ tx_q->txq_stats.tx_pkt_n += tx_packets;
+ tx_q->txq_stats.tx_clean++;
+ u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
+
+ priv->xstats.tx_errors += tx_errors;
+
__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
/* Combine decisions from TX clean and XSK TX */
@@ -2734,7 +2750,7 @@ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
tx_q->dma_tx_phy, chan);
stmmac_start_tx_dma(priv, chan);
- priv->dev->stats.tx_errors++;
+ priv->xstats.tx_errors++;
netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
}
@@ -3710,7 +3726,7 @@ static int stmmac_request_irq(struct net_device *dev)
int ret;
/* Request the IRQ lines */
- if (priv->plat->multi_msi_en)
+ if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
ret = stmmac_request_irq_multi_msi(dev);
else
ret = stmmac_request_irq_single(dev);
@@ -3827,10 +3843,6 @@ static int __stmmac_open(struct net_device *dev,
}
}
- /* Extra statistics */
- memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
- priv->xstats.threshold = tc;
-
priv->rx_copybreak = STMMAC_RX_COPYBREAK;
buf_sz = dma_conf->dma_buf_sz;
@@ -3838,7 +3850,8 @@ static int __stmmac_open(struct net_device *dev,
stmmac_reset_queues_param(priv);
- if (!priv->plat->serdes_up_after_phy_linkup && priv->plat->serdes_powerup) {
+ if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
+ priv->plat->serdes_powerup) {
ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
if (ret < 0) {
netdev_err(priv->dev, "%s: Serdes powerup failed\n",
@@ -4110,6 +4123,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
struct stmmac_tx_queue *tx_q;
bool has_vlan, set_ic;
u8 proto_hdr_len, hdr;
+ unsigned long flags;
u32 pay_len, mss;
dma_addr_t des;
int i;
@@ -4258,7 +4272,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, desc);
- priv->xstats.tx_set_ic_bit++;
}
/* We've used all descriptors we need for this skb, however,
@@ -4274,9 +4287,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
}
- dev->stats.tx_bytes += skb->len;
- priv->xstats.tx_tso_frames++;
- priv->xstats.tx_tso_nfrags += nfrags;
+ flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
+ tx_q->txq_stats.tx_bytes += skb->len;
+ tx_q->txq_stats.tx_tso_frames++;
+ tx_q->txq_stats.tx_tso_nfrags += nfrags;
+ if (set_ic)
+ tx_q->txq_stats.tx_set_ic_bit++;
+ u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
@@ -4326,7 +4343,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
dma_map_err:
dev_err(priv->device, "Tx dma map failed\n");
dev_kfree_skb(skb);
- priv->dev->stats.tx_dropped++;
+ priv->xstats.tx_dropped++;
return NETDEV_TX_OK;
}
@@ -4352,6 +4369,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
struct stmmac_tx_queue *tx_q;
bool has_vlan, set_ic;
int entry, first_tx;
+ unsigned long flags;
dma_addr_t des;
tx_q = &priv->dma_conf.tx_queue[queue];
@@ -4480,7 +4498,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, desc);
- priv->xstats.tx_set_ic_bit++;
}
/* We've used all descriptors we need for this skb, however,
@@ -4507,7 +4524,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
}
- dev->stats.tx_bytes += skb->len;
+ flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
+ tx_q->txq_stats.tx_bytes += skb->len;
+ if (set_ic)
+ tx_q->txq_stats.tx_set_ic_bit++;
+ u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
@@ -4569,7 +4590,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
dma_map_err:
netdev_err(priv->dev, "Tx DMA map failed\n");
dev_kfree_skb(skb);
- priv->dev->stats.tx_dropped++;
+ priv->xstats.tx_dropped++;
return NETDEV_TX_OK;
}
@@ -4770,9 +4791,12 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
set_ic = false;
if (set_ic) {
+ unsigned long flags;
tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, tx_desc);
- priv->xstats.tx_set_ic_bit++;
+ flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
+ tx_q->txq_stats.tx_set_ic_bit++;
+ u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
}
stmmac_enable_dma_transmission(priv, priv->ioaddr);
@@ -4917,16 +4941,18 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
struct dma_desc *p, struct dma_desc *np,
struct xdp_buff *xdp)
{
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
unsigned int len = xdp->data_end - xdp->data;
enum pkt_hash_types hash_type;
int coe = priv->hw->rx_csum;
+ unsigned long flags;
struct sk_buff *skb;
u32 hash;
skb = stmmac_construct_skb_zc(ch, xdp);
if (!skb) {
- priv->dev->stats.rx_dropped++;
+ priv->xstats.rx_dropped++;
return;
}
@@ -4945,8 +4971,10 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
skb_record_rx_queue(skb, queue);
napi_gro_receive(&ch->rxtx_napi, skb);
- priv->dev->stats.rx_packets++;
- priv->dev->stats.rx_bytes += len;
+ flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
+ rx_q->rxq_stats.rx_pkt_n++;
+ rx_q->rxq_stats.rx_bytes += len;
+ u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
}
static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
@@ -5023,9 +5051,11 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
unsigned int count = 0, error = 0, len = 0;
int dirty = stmmac_rx_dirty(priv, queue);
unsigned int next_entry = rx_q->cur_rx;
+ u32 rx_errors = 0, rx_dropped = 0;
unsigned int desc_size;
struct bpf_prog *prog;
bool failure = false;
+ unsigned long flags;
int xdp_status = 0;
int status = 0;
@@ -5081,8 +5111,7 @@ read_again:
p = rx_q->dma_rx + entry;
/* read the status of the incoming frame */
- status = stmmac_rx_status(priv, &priv->dev->stats,
- &priv->xstats, p);
+ status = stmmac_rx_status(priv, &priv->xstats, p);
/* check if managed by the DMA otherwise go ahead */
if (unlikely(status & dma_own))
break;
@@ -5104,8 +5133,7 @@ read_again:
break;
if (priv->extend_desc)
- stmmac_rx_extended_status(priv, &priv->dev->stats,
- &priv->xstats,
+ stmmac_rx_extended_status(priv, &priv->xstats,
rx_q->dma_erx + entry);
if (unlikely(status == discard_frame)) {
xsk_buff_free(buf->xdp);
@@ -5113,7 +5141,7 @@ read_again:
dirty++;
error = 1;
if (!priv->hwts_rx_en)
- priv->dev->stats.rx_errors++;
+ rx_errors++;
}
if (unlikely(error && (status & rx_not_ls)))
@@ -5161,7 +5189,7 @@ read_again:
break;
case STMMAC_XDP_CONSUMED:
xsk_buff_free(buf->xdp);
- priv->dev->stats.rx_dropped++;
+ rx_dropped++;
break;
case STMMAC_XDP_TX:
case STMMAC_XDP_REDIRECT:
@@ -5182,8 +5210,12 @@ read_again:
stmmac_finalize_xdp_rx(priv, xdp_status);
- priv->xstats.rx_pkt_n += count;
- priv->xstats.rxq_stats[queue].rx_pkt_n += count;
+ flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
+ rx_q->rxq_stats.rx_pkt_n += count;
+ u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
+
+ priv->xstats.rx_dropped += rx_dropped;
+ priv->xstats.rx_errors += rx_errors;
if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
if (failure || stmmac_rx_dirty(priv, queue) > 0)
@@ -5207,6 +5239,7 @@ read_again:
*/
static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
{
+ u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
unsigned int count = 0, error = 0, len = 0;
@@ -5216,6 +5249,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
unsigned int desc_size;
struct sk_buff *skb = NULL;
struct stmmac_xdp_buff ctx;
+ unsigned long flags;
int xdp_status = 0;
int buf_sz;
@@ -5271,8 +5305,7 @@ read_again:
p = rx_q->dma_rx + entry;
/* read the status of the incoming frame */
- status = stmmac_rx_status(priv, &priv->dev->stats,
- &priv->xstats, p);
+ status = stmmac_rx_status(priv, &priv->xstats, p);
/* check if managed by the DMA otherwise go ahead */
if (unlikely(status & dma_own))
break;
@@ -5289,14 +5322,13 @@ read_again:
prefetch(np);
if (priv->extend_desc)
- stmmac_rx_extended_status(priv, &priv->dev->stats,
- &priv->xstats, rx_q->dma_erx + entry);
+ stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
if (unlikely(status == discard_frame)) {
page_pool_recycle_direct(rx_q->page_pool, buf->page);
buf->page = NULL;
error = 1;
if (!priv->hwts_rx_en)
- priv->dev->stats.rx_errors++;
+ rx_errors++;
}
if (unlikely(error && (status & rx_not_ls)))
@@ -5364,7 +5396,7 @@ read_again:
virt_to_head_page(ctx.xdp.data),
sync_len, true);
buf->page = NULL;
- priv->dev->stats.rx_dropped++;
+ rx_dropped++;
/* Clear skb as it was set as
* status by XDP program.
@@ -5393,7 +5425,7 @@ read_again:
skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
if (!skb) {
- priv->dev->stats.rx_dropped++;
+ rx_dropped++;
count++;
goto drain_data;
}
@@ -5413,7 +5445,7 @@ read_again:
priv->dma_conf.dma_buf_sz);
/* Data payload appended into SKB */
- page_pool_release_page(rx_q->page_pool, buf->page);
+ skb_mark_for_recycle(skb);
buf->page = NULL;
}
@@ -5425,7 +5457,7 @@ read_again:
priv->dma_conf.dma_buf_sz);
/* Data payload appended into SKB */
- page_pool_release_page(rx_q->page_pool, buf->sec_page);
+ skb_mark_for_recycle(skb);
buf->sec_page = NULL;
}
@@ -5453,8 +5485,8 @@ drain_data:
napi_gro_receive(&ch->rx_napi, skb);
skb = NULL;
- priv->dev->stats.rx_packets++;
- priv->dev->stats.rx_bytes += len;
+ rx_packets++;
+ rx_bytes += len;
count++;
}
@@ -5469,8 +5501,14 @@ drain_data:
stmmac_rx_refill(priv, queue);
- priv->xstats.rx_pkt_n += count;
- priv->xstats.rxq_stats[queue].rx_pkt_n += count;
+ flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
+ rx_q->rxq_stats.rx_packets += rx_packets;
+ rx_q->rxq_stats.rx_bytes += rx_bytes;
+ rx_q->rxq_stats.rx_pkt_n += count;
+ u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
+
+ priv->xstats.rx_dropped += rx_dropped;
+ priv->xstats.rx_errors += rx_errors;
return count;
}
@@ -5480,10 +5518,15 @@ static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
struct stmmac_channel *ch =
container_of(napi, struct stmmac_channel, rx_napi);
struct stmmac_priv *priv = ch->priv_data;
+ struct stmmac_rx_queue *rx_q;
u32 chan = ch->index;
+ unsigned long flags;
int work_done;
- priv->xstats.napi_poll++;
+ rx_q = &priv->dma_conf.rx_queue[chan];
+ flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
+ rx_q->rxq_stats.napi_poll++;
+ u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
work_done = stmmac_rx(priv, budget, chan);
if (work_done < budget && napi_complete_done(napi, work_done)) {
@@ -5502,10 +5545,15 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
struct stmmac_channel *ch =
container_of(napi, struct stmmac_channel, tx_napi);
struct stmmac_priv *priv = ch->priv_data;
+ struct stmmac_tx_queue *tx_q;
u32 chan = ch->index;
+ unsigned long flags;
int work_done;
- priv->xstats.napi_poll++;
+ tx_q = &priv->dma_conf.tx_queue[chan];
+ flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
+ tx_q->txq_stats.napi_poll++;
+ u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
work_done = stmmac_tx_clean(priv, budget, chan);
work_done = min(work_done, budget);
@@ -5527,9 +5575,20 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
container_of(napi, struct stmmac_channel, rxtx_napi);
struct stmmac_priv *priv = ch->priv_data;
int rx_done, tx_done, rxtx_done;
+ struct stmmac_rx_queue *rx_q;
+ struct stmmac_tx_queue *tx_q;
u32 chan = ch->index;
+ unsigned long flags;
+
+ rx_q = &priv->dma_conf.rx_queue[chan];
+ flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
+ rx_q->rxq_stats.napi_poll++;
+ u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
- priv->xstats.napi_poll++;
+ tx_q = &priv->dma_conf.tx_queue[chan];
+ flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
+ tx_q->txq_stats.napi_poll++;
+ u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
tx_done = stmmac_tx_clean(priv, budget, chan);
tx_done = min(tx_done, budget);
@@ -5677,7 +5736,7 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev,
features &= ~NETIF_F_CSUM_MASK;
/* Disable tso if asked by ethtool */
- if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
+ if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
if (features & NETIF_F_TSO)
priv->tso = true;
else
@@ -5798,7 +5857,8 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv)
}
/* PCS link status */
- if (priv->hw->pcs && !priv->plat->has_integrated_pcs) {
+ if (priv->hw->pcs &&
+ !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
if (priv->xstats.pcs_link)
netif_carrier_on(priv->dev);
else
@@ -5951,7 +6011,7 @@ static void stmmac_poll_controller(struct net_device *dev)
if (test_bit(STMMAC_DOWN, &priv->state))
return;
- if (priv->plat->multi_msi_en) {
+ if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN) {
for (i = 0; i < priv->plat->rx_queues_to_use; i++)
stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]);
@@ -6788,6 +6848,56 @@ int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
return 0;
}
+static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ unsigned int start;
+ int q;
+
+ for (q = 0; q < tx_cnt; q++) {
+ struct stmmac_txq_stats *txq_stats = &priv->dma_conf.tx_queue[q].txq_stats;
+ u64 tx_packets;
+ u64 tx_bytes;
+
+ do {
+ start = u64_stats_fetch_begin(&txq_stats->syncp);
+ tx_packets = txq_stats->tx_packets;
+ tx_bytes = txq_stats->tx_bytes;
+ } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
+
+ stats->tx_packets += tx_packets;
+ stats->tx_bytes += tx_bytes;
+ }
+
+ for (q = 0; q < rx_cnt; q++) {
+ struct stmmac_rxq_stats *rxq_stats = &priv->dma_conf.rx_queue[q].rxq_stats;
+ u64 rx_packets;
+ u64 rx_bytes;
+
+ do {
+ start = u64_stats_fetch_begin(&rxq_stats->syncp);
+ rx_packets = rxq_stats->rx_packets;
+ rx_bytes = rxq_stats->rx_bytes;
+ } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
+
+ stats->rx_packets += rx_packets;
+ stats->rx_bytes += rx_bytes;
+ }
+
+ stats->rx_dropped = priv->xstats.rx_dropped;
+ stats->rx_errors = priv->xstats.rx_errors;
+ stats->tx_dropped = priv->xstats.tx_dropped;
+ stats->tx_errors = priv->xstats.tx_errors;
+ stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
+ stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
+ stats->rx_length_errors = priv->xstats.rx_length;
+ stats->rx_crc_errors = priv->xstats.rx_crc_errors;
+ stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
+ stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
+}
+
static const struct net_device_ops stmmac_netdev_ops = {
.ndo_open = stmmac_open,
.ndo_start_xmit = stmmac_xmit,
@@ -6798,6 +6908,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_set_rx_mode = stmmac_set_rx_mode,
.ndo_tx_timeout = stmmac_tx_timeout,
.ndo_eth_ioctl = stmmac_ioctl,
+ .ndo_get_stats64 = stmmac_get_stats64,
.ndo_setup_tc = stmmac_setup_tc,
.ndo_select_queue = stmmac_select_queue,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -6855,7 +6966,7 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
int ret;
/* dwmac-sun8i only work in chain mode */
- if (priv->plat->has_sun8i)
+ if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
chain_mode = 1;
priv->chain_mode = chain_mode;
@@ -6876,7 +6987,7 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
*/
priv->plat->enh_desc = priv->dma_cap.enh_desc;
priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
- !priv->plat->use_phy_wol;
+ !(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
priv->hw->pmt = priv->plat->pmt;
if (priv->dma_cap.hash_tb_sz) {
priv->hw->multicast_filter_bins =
@@ -6920,7 +7031,8 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
if (priv->dma_cap.tsoen)
dev_info(priv->device, "TSO supported\n");
- priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
+ priv->hw->vlan_fail_q_en =
+ (priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
/* Run HW quirks, if any */
@@ -7160,12 +7272,18 @@ int stmmac_dvr_probe(struct device *device,
priv->device = device;
priv->dev = ndev;
+ for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
+ u64_stats_init(&priv->dma_conf.rx_queue[i].rxq_stats.syncp);
+ for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
+ u64_stats_init(&priv->dma_conf.tx_queue[i].txq_stats.syncp);
+
stmmac_set_ethtool_ops(ndev);
priv->pause = pause;
priv->plat = plat_dat;
priv->ioaddr = res->addr;
priv->dev->base_addr = (unsigned long)res->addr;
- priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en;
+ priv->plat->dma_cfg->multi_msi_en =
+ (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
priv->dev->irq = res->irq;
priv->wol_irq = res->wol_irq;
@@ -7249,7 +7367,7 @@ int stmmac_dvr_probe(struct device *device,
ndev->hw_features |= NETIF_F_HW_TC;
}
- if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
+ if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
if (priv->plat->has_gmac4)
ndev->hw_features |= NETIF_F_GSO_UDP_L4;
@@ -7257,7 +7375,8 @@ int stmmac_dvr_probe(struct device *device,
dev_info(priv->device, "TSO feature enabled\n");
}
- if (priv->dma_cap.sphen && !priv->plat->sph_disable) {
+ if (priv->dma_cap.sphen &&
+ !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
ndev->hw_features |= NETIF_F_GRO;
priv->sph_cap = true;
priv->sph = priv->sph_cap;
@@ -7315,6 +7434,8 @@ int stmmac_dvr_probe(struct device *device,
#endif
priv->msg_enable = netif_msg_init(debug, default_msg_level);
+ priv->xstats.threshold = tc;
+
/* Initialize RSS */
rxq = priv->plat->rx_queues_to_use;
netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
@@ -7621,7 +7742,8 @@ int stmmac_resume(struct device *dev)
stmmac_mdio_reset(priv->mii);
}
- if (!priv->plat->serdes_up_after_phy_linkup && priv->plat->serdes_powerup) {
+ if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
+ priv->plat->serdes_powerup) {
ret = priv->plat->serdes_powerup(ndev,
priv->plat->bsp_priv);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 644bb54f5f02..352b01678c22 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -77,7 +77,7 @@ static int snps_gmac5_default_data(struct pci_dev *pdev,
plat->clk_csr = 5;
plat->has_gmac4 = 1;
plat->force_sf_dma_mode = 1;
- plat->tso_en = 1;
+ plat->flags |= STMMAC_FLAG_TSO_EN;
plat->pmt = 1;
/* Set default value for multicast hash bins */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 231152ee5a32..23d53ea04b24 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -466,8 +466,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
plat->force_sf_dma_mode =
of_property_read_bool(np, "snps,force_sf_dma_mode");
- plat->en_tx_lpi_clockgating =
- of_property_read_bool(np, "snps,en-tx-lpi-clockgating");
+ if (of_property_read_bool(np, "snps,en-tx-lpi-clockgating"))
+ plat->flags |= STMMAC_FLAG_EN_TX_LPI_CLOCKGATING;
/* Set the maxmtu to a default of JUMBO_LEN in case the
* parameter is not present in the device tree.
@@ -525,7 +525,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
plat->has_gmac4 = 1;
plat->has_gmac = 0;
plat->pmt = 1;
- plat->tso_en = of_property_read_bool(np, "snps,tso");
+ if (of_property_read_bool(np, "snps,tso"))
+ plat->flags |= STMMAC_FLAG_TSO_EN;
}
if (of_device_is_compatible(np, "snps,dwmac-3.610") ||
@@ -538,7 +539,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
if (of_device_is_compatible(np, "snps,dwxgmac")) {
plat->has_xgmac = 1;
plat->pmt = 1;
- plat->tso_en = of_property_read_bool(np, "snps,tso");
+ if (of_property_read_bool(np, "snps,tso"))
+ plat->flags |= STMMAC_FLAG_TSO_EN;
}
dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index b4388ca8d211..3d7825cb30bb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -192,7 +192,10 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
write_unlock_irqrestore(&priv->ptp_lock, flags);
break;
case PTP_CLK_REQ_EXTTS:
- priv->plat->ext_snapshot_en = on;
+ if (on)
+ priv->plat->flags |= STMMAC_FLAG_EXT_SNAPSHOT_EN;
+ else
+ priv->plat->flags &= ~STMMAC_FLAG_EXT_SNAPSHOT_EN;
mutex_lock(&priv->aux_ts_lock);
acr_value = readl(ptpaddr + PTP_ACR);
acr_value &= ~PTP_ACR_MASK;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
index bf619295d079..d1fe4b46f162 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -26,6 +26,12 @@
#define PTP_ACR 0x40 /* Auxiliary Control Reg */
#define PTP_ATNR 0x48 /* Auxiliary Timestamp - Nanoseconds Reg */
#define PTP_ATSR 0x4c /* Auxiliary Timestamp - Seconds Reg */
+#define PTP_TS_INGR_CORR_NS 0x58 /* Ingress timestamp correction nanoseconds */
+#define PTP_TS_EGR_CORR_NS 0x5C /* Egress timestamp correction nanoseconds*/
+#define PTP_TS_INGR_CORR_SNS 0x60 /* Ingress timestamp correction subnanoseconds */
+#define PTP_TS_EGR_CORR_SNS 0x64 /* Egress timestamp correction subnanoseconds */
+#define PTP_TS_INGR_LAT 0x68 /* MAC internal Ingress Latency */
+#define PTP_TS_EGR_LAT 0x6c /* MAC internal Egress Latency */
#define PTP_STNSUR_ADDSUB_SHIFT 31
#define PTP_DIGITAL_ROLLOVER_MODE 0x3B9ACA00 /* 10e9-1 ns */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index 6321178fc814..85dc16faca54 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -432,71 +432,6 @@ out:
EXPORT_SYMBOL(wx_read_ee_hostif_buffer);
/**
- * wx_calculate_checksum - Calculate checksum for buffer
- * @buffer: pointer to EEPROM
- * @length: size of EEPROM to calculate a checksum for
- * Calculates the checksum for some buffer on a specified length. The
- * checksum calculated is returned.
- **/
-static u8 wx_calculate_checksum(u8 *buffer, u32 length)
-{
- u8 sum = 0;
- u32 i;
-
- if (!buffer)
- return 0;
-
- for (i = 0; i < length; i++)
- sum += buffer[i];
-
- return (u8)(0 - sum);
-}
-
-/**
- * wx_reset_hostif - send reset cmd to fw
- * @wx: pointer to hardware structure
- *
- * Sends reset cmd to firmware through the manageability
- * block.
- **/
-int wx_reset_hostif(struct wx *wx)
-{
- struct wx_hic_reset reset_cmd;
- int ret_val = 0;
- int i;
-
- reset_cmd.hdr.cmd = FW_RESET_CMD;
- reset_cmd.hdr.buf_len = FW_RESET_LEN;
- reset_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
- reset_cmd.lan_id = wx->bus.func;
- reset_cmd.reset_type = (u16)wx->reset_type;
- reset_cmd.hdr.checksum = 0;
- reset_cmd.hdr.checksum = wx_calculate_checksum((u8 *)&reset_cmd,
- (FW_CEM_HDR_LEN +
- reset_cmd.hdr.buf_len));
-
- for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
- ret_val = wx_host_interface_command(wx, (u32 *)&reset_cmd,
- sizeof(reset_cmd),
- WX_HI_COMMAND_TIMEOUT,
- true);
- if (ret_val != 0)
- continue;
-
- if (reset_cmd.hdr.cmd_or_resp.ret_status ==
- FW_CEM_RESP_STATUS_SUCCESS)
- ret_val = 0;
- else
- ret_val = -EFAULT;
-
- break;
- }
-
- return ret_val;
-}
-EXPORT_SYMBOL(wx_reset_hostif);
-
-/**
* wx_init_eeprom_params - Initialize EEPROM params
* @wx: pointer to hardware structure
*
@@ -1501,7 +1436,7 @@ static void wx_restore_vlan(struct wx *wx)
*
* Configure the Rx unit of the MAC after a reset.
**/
-static void wx_configure_rx(struct wx *wx)
+void wx_configure_rx(struct wx *wx)
{
u32 psrtype, i;
int ret;
@@ -1544,6 +1479,7 @@ static void wx_configure_rx(struct wx *wx)
wx_enable_rx(wx);
wx_enable_sec_rx_path(wx);
}
+EXPORT_SYMBOL(wx_configure_rx);
static void wx_configure_isb(struct wx *wx)
{
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
index 1f93ca32c921..0b3447bc6f2f 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
@@ -14,7 +14,6 @@ int wx_host_interface_command(struct wx *wx, u32 *buffer,
int wx_read_ee_hostif(struct wx *wx, u16 offset, u16 *data);
int wx_read_ee_hostif_buffer(struct wx *wx,
u16 offset, u16 words, u16 *data);
-int wx_reset_hostif(struct wx *wx);
void wx_init_eeprom_params(struct wx *wx);
void wx_get_mac_addr(struct wx *wx, u8 *mac_addr);
void wx_init_rx_addrs(struct wx *wx);
@@ -25,6 +24,7 @@ void wx_disable_rx(struct wx *wx);
void wx_set_rx_mode(struct net_device *netdev);
int wx_change_mtu(struct net_device *netdev, int new_mtu);
void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring);
+void wx_configure_rx(struct wx *wx);
void wx_configure(struct wx *wx);
void wx_start_hw(struct wx *wx);
int wx_disable_pcie_master(struct wx *wx);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index 29dfb561887d..1de88a33a698 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -160,6 +160,10 @@
#define WX_PSR_LAN_FLEX_DW_H(_i) (0x15C04 + ((_i) * 16))
#define WX_PSR_LAN_FLEX_MSK(_i) (0x15C08 + ((_i) * 16))
+#define WX_PSR_WKUP_CTL 0x15B80
+/* Wake Up Filter Control Bit */
+#define WX_PSR_WKUP_CTL_MAG BIT(1) /* Magic Packet Wakeup Enable */
+
/* vlan tbl */
#define WX_PSR_VLAN_TBL(_i) (0x16000 + ((_i) * 4))
@@ -846,7 +850,7 @@ struct wx {
int duplex;
struct phy_device *phydev;
- bool wol_enabled;
+ bool wol_hw_supported;
bool ncsi_enabled;
bool gpio_ctrl;
raw_spinlock_t gpio_lock;
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
index 5b25834baf38..ec0e869e9aac 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
@@ -6,14 +6,49 @@
#include <linux/netdevice.h>
#include "../libwx/wx_ethtool.h"
+#include "../libwx/wx_type.h"
#include "ngbe_ethtool.h"
+static void ngbe_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ if (!wx->wol_hw_supported)
+ return;
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = 0;
+ if (wx->wol & WX_PSR_WKUP_CTL_MAG)
+ wol->wolopts |= WAKE_MAGIC;
+}
+
+static int ngbe_set_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct wx *wx = netdev_priv(netdev);
+ struct pci_dev *pdev = wx->pdev;
+
+ if (!wx->wol_hw_supported)
+ return -EOPNOTSUPP;
+
+ wx->wol = 0;
+ if (wol->wolopts & WAKE_MAGIC)
+ wx->wol = WX_PSR_WKUP_CTL_MAG;
+ netdev->wol_enabled = !!(wx->wol);
+ wr32(wx, WX_PSR_WKUP_CTL, wx->wol);
+ device_set_wakeup_enable(&pdev->dev, netdev->wol_enabled);
+
+ return 0;
+}
+
static const struct ethtool_ops ngbe_ethtool_ops = {
.get_drvinfo = wx_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
.nway_reset = phy_ethtool_nway_reset,
+ .get_wol = ngbe_get_wol,
+ .set_wol = ngbe_set_wol,
};
void ngbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
index c99a5d3de72e..2b431db6085a 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
@@ -62,7 +62,7 @@ static void ngbe_init_type_code(struct wx *wx)
em_mac_type_rgmii :
em_mac_type_mdi;
- wx->wol_enabled = (wol_mask == NGBE_WOL_SUP) ? 1 : 0;
+ wx->wol_hw_supported = (wol_mask == NGBE_WOL_SUP) ? 1 : 0;
wx->ncsi_enabled = (ncsi_mask == NGBE_NCSI_MASK ||
type_mask == NGBE_SUBID_OCP_CARD) ? 1 : 0;
@@ -440,14 +440,26 @@ static void ngbe_dev_shutdown(struct pci_dev *pdev, bool *enable_wake)
{
struct wx *wx = pci_get_drvdata(pdev);
struct net_device *netdev;
+ u32 wufc = wx->wol;
netdev = wx->netdev;
+ rtnl_lock();
netif_device_detach(netdev);
- rtnl_lock();
if (netif_running(netdev))
- ngbe_down(wx);
+ ngbe_close(netdev);
+ wx_clear_interrupt_scheme(wx);
rtnl_unlock();
+
+ if (wufc) {
+ wx_set_rx_mode(netdev);
+ wx_configure_rx(wx);
+ wr32(wx, NGBE_PSR_WKUP_CTL, wufc);
+ } else {
+ wr32(wx, NGBE_PSR_WKUP_CTL, 0);
+ }
+ pci_wake_from_d3(pdev, !!wufc);
+ *enable_wake = !!wufc;
wx_control_hw(wx, false);
pci_disable_device(pdev);
@@ -621,12 +633,11 @@ static int ngbe_probe(struct pci_dev *pdev,
}
wx->wol = 0;
- if (wx->wol_enabled)
+ if (wx->wol_hw_supported)
wx->wol = NGBE_PSR_WKUP_CTL_MAG;
- wx->wol_enabled = !!(wx->wol);
+ netdev->wol_enabled = !!(wx->wol);
wr32(wx, NGBE_PSR_WKUP_CTL, wx->wol);
-
device_set_wakeup_enable(&pdev->dev, wx->wol);
/* Save off EEPROM version number and Option Rom version which
@@ -712,11 +723,52 @@ static void ngbe_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+static int ngbe_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ bool wake;
+
+ ngbe_dev_shutdown(pdev, &wake);
+ device_set_wakeup_enable(&pdev->dev, wake);
+
+ return 0;
+}
+
+static int ngbe_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev;
+ struct wx *wx;
+ u32 err;
+
+ wx = pci_get_drvdata(pdev);
+ netdev = wx->netdev;
+
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ wx_err(wx, "Cannot enable PCI device from suspend\n");
+ return err;
+ }
+ pci_set_master(pdev);
+ device_wakeup_disable(&pdev->dev);
+
+ ngbe_reset_hw(wx);
+ rtnl_lock();
+ err = wx_init_interrupt_scheme(wx);
+ if (!err && netif_running(netdev))
+ err = ngbe_open(netdev);
+ if (!err)
+ netif_device_attach(netdev);
+ rtnl_unlock();
+
+ return 0;
+}
+
static struct pci_driver ngbe_driver = {
.name = ngbe_driver_name,
.id_table = ngbe_pci_tbl,
.probe = ngbe_probe,
.remove = ngbe_remove,
+ .suspend = ngbe_suspend,
+ .resume = ngbe_resume,
.shutdown = ngbe_shutdown,
};
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
index c9ddbbc3fa4f..cc2f325a52f7 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
@@ -236,6 +236,7 @@ static void ngbe_phy_fixup(struct wx *wx)
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+ phydev->mac_managed_pm = true;
if (wx->mac_type != em_mac_type_mdi)
return;
/* disable EEE, internal phy does not support eee */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
index 0772eb14eabf..6e130d1f7a7b 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
@@ -257,16 +257,16 @@ static void txgbe_reset_misc(struct wx *wx)
int txgbe_reset_hw(struct wx *wx)
{
int status;
+ u32 val;
/* Call adapter stop to disable tx/rx and clear interrupts */
status = wx_stop_adapter(wx);
if (status != 0)
return status;
- if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) ||
- ((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP)))
- wx_reset_hostif(wx);
-
+ val = WX_MIS_RST_LAN_RST(wx->bus.func);
+ wr32(wx, WX_MIS_RST, val | rd32(wx, WX_MIS_RST));
+ WX_WRITE_FLUSH(wx);
usleep_range(10, 100);
status = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_LAN_SW_RST(wx->bus.func));
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index acb20ad4e37e..144ec626230d 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -243,7 +243,8 @@ static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4,
fl4->flowi4_oif = sk->sk_bound_dev_if;
fl4->daddr = daddr;
fl4->saddr = saddr;
- fl4->flowi4_tos = RT_CONN_FLAGS(sk);
+ fl4->flowi4_tos = ip_sock_rt_tos(sk);
+ fl4->flowi4_scope = ip_sock_rt_scope(sk);
fl4->flowi4_proto = sk->sk_protocol;
return ip_route_output_key(sock_net(sk), fl4);
diff --git a/drivers/net/mdio/mdio-bcm-unimac.c b/drivers/net/mdio/mdio-bcm-unimac.c
index bfc9be23c973..6b26a0803696 100644
--- a/drivers/net/mdio/mdio-bcm-unimac.c
+++ b/drivers/net/mdio/mdio-bcm-unimac.c
@@ -334,6 +334,8 @@ static SIMPLE_DEV_PM_OPS(unimac_mdio_pm_ops,
unimac_mdio_suspend, unimac_mdio_resume);
static const struct of_device_id unimac_mdio_ids[] = {
+ { .compatible = "brcm,asp-v2.1-mdio", },
+ { .compatible = "brcm,asp-v2.0-mdio", },
{ .compatible = "brcm,genet-mdio-v5", },
{ .compatible = "brcm,genet-mdio-v4", },
{ .compatible = "brcm,genet-mdio-v3", },
diff --git a/drivers/net/mdio/mdio-xgene.c b/drivers/net/mdio/mdio-xgene.c
index 7aafc221b5cf..683e8f8319ab 100644
--- a/drivers/net/mdio/mdio-xgene.c
+++ b/drivers/net/mdio/mdio-xgene.c
@@ -79,7 +79,7 @@ EXPORT_SYMBOL(xgene_mdio_wr_mac);
int xgene_mdio_rgmii_read(struct mii_bus *bus, int phy_id, int reg)
{
- struct xgene_mdio_pdata *pdata = (struct xgene_mdio_pdata *)bus->priv;
+ struct xgene_mdio_pdata *pdata = bus->priv;
u32 data, done;
u8 wait = 10;
@@ -105,7 +105,7 @@ EXPORT_SYMBOL(xgene_mdio_rgmii_read);
int xgene_mdio_rgmii_write(struct mii_bus *bus, int phy_id, int reg, u16 data)
{
- struct xgene_mdio_pdata *pdata = (struct xgene_mdio_pdata *)bus->priv;
+ struct xgene_mdio_pdata *pdata = bus->priv;
u32 val, done;
u8 wait = 10;
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 4f4f79532c6c..87f18aedd3bd 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -36,6 +36,7 @@
#include <linux/inet.h>
#include <linux/configfs.h>
#include <linux/etherdevice.h>
+#include <linux/utsname.h>
MODULE_AUTHOR("Maintainer: Matt Mackall <[email protected]>");
MODULE_DESCRIPTION("Console driver for network interfaces");
@@ -84,6 +85,8 @@ static struct console netconsole_ext;
* Also, other parameters of a target may be modified at
* runtime only when it is disabled (enabled == 0).
* @extended: Denotes whether console is extended or not.
+ * @release: Denotes whether kernel release version should be prepended
+ * to the message. Depends on extended console.
* @np: The netpoll structure for this target.
* Contains the other userspace visible parameters:
* dev_name (read-write)
@@ -101,6 +104,7 @@ struct netconsole_target {
#endif
bool enabled;
bool extended;
+ bool release;
struct netpoll np;
};
@@ -188,6 +192,15 @@ static struct netconsole_target *alloc_param_target(char *target_config)
target_config++;
}
+ if (*target_config == 'r') {
+ if (!nt->extended) {
+ pr_err("Netconsole configuration error. Release feature requires extended log message");
+ goto fail;
+ }
+ nt->release = true;
+ target_config++;
+ }
+
/* Parse parameters and setup netpoll */
err = netpoll_parse_options(&nt->np, target_config);
if (err)
@@ -222,6 +235,7 @@ static void free_param_target(struct netconsole_target *nt)
* |
* <target>/
* | enabled
+ * | release
* | dev_name
* | local_port
* | remote_port
@@ -246,27 +260,32 @@ static struct netconsole_target *to_target(struct config_item *item)
static ssize_t enabled_show(struct config_item *item, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", to_target(item)->enabled);
+ return sysfs_emit(buf, "%d\n", to_target(item)->enabled);
}
static ssize_t extended_show(struct config_item *item, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", to_target(item)->extended);
+ return sysfs_emit(buf, "%d\n", to_target(item)->extended);
+}
+
+static ssize_t release_show(struct config_item *item, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", to_target(item)->release);
}
static ssize_t dev_name_show(struct config_item *item, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%s\n", to_target(item)->np.dev_name);
+ return sysfs_emit(buf, "%s\n", to_target(item)->np.dev_name);
}
static ssize_t local_port_show(struct config_item *item, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", to_target(item)->np.local_port);
+ return sysfs_emit(buf, "%d\n", to_target(item)->np.local_port);
}
static ssize_t remote_port_show(struct config_item *item, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", to_target(item)->np.remote_port);
+ return sysfs_emit(buf, "%d\n", to_target(item)->np.remote_port);
}
static ssize_t local_ip_show(struct config_item *item, char *buf)
@@ -274,9 +293,9 @@ static ssize_t local_ip_show(struct config_item *item, char *buf)
struct netconsole_target *nt = to_target(item);
if (nt->np.ipv6)
- return snprintf(buf, PAGE_SIZE, "%pI6c\n", &nt->np.local_ip.in6);
+ return sysfs_emit(buf, "%pI6c\n", &nt->np.local_ip.in6);
else
- return snprintf(buf, PAGE_SIZE, "%pI4\n", &nt->np.local_ip);
+ return sysfs_emit(buf, "%pI4\n", &nt->np.local_ip);
}
static ssize_t remote_ip_show(struct config_item *item, char *buf)
@@ -284,9 +303,9 @@ static ssize_t remote_ip_show(struct config_item *item, char *buf)
struct netconsole_target *nt = to_target(item);
if (nt->np.ipv6)
- return snprintf(buf, PAGE_SIZE, "%pI6c\n", &nt->np.remote_ip.in6);
+ return sysfs_emit(buf, "%pI6c\n", &nt->np.remote_ip.in6);
else
- return snprintf(buf, PAGE_SIZE, "%pI4\n", &nt->np.remote_ip);
+ return sysfs_emit(buf, "%pI4\n", &nt->np.remote_ip);
}
static ssize_t local_mac_show(struct config_item *item, char *buf)
@@ -294,12 +313,12 @@ static ssize_t local_mac_show(struct config_item *item, char *buf)
struct net_device *dev = to_target(item)->np.dev;
static const u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
- return snprintf(buf, PAGE_SIZE, "%pM\n", dev ? dev->dev_addr : bcast);
+ return sysfs_emit(buf, "%pM\n", dev ? dev->dev_addr : bcast);
}
static ssize_t remote_mac_show(struct config_item *item, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%pM\n", to_target(item)->np.remote_mac);
+ return sysfs_emit(buf, "%pM\n", to_target(item)->np.remote_mac);
}
/*
@@ -314,17 +333,15 @@ static ssize_t enabled_store(struct config_item *item,
{
struct netconsole_target *nt = to_target(item);
unsigned long flags;
- int enabled;
+ bool enabled;
int err;
mutex_lock(&dynamic_netconsole_mutex);
- err = kstrtoint(buf, 10, &enabled);
- if (err < 0)
+ err = kstrtobool(buf, &enabled);
+ if (err)
goto out_unlock;
err = -EINVAL;
- if (enabled < 0 || enabled > 1)
- goto out_unlock;
if ((bool)enabled == nt->enabled) {
pr_info("network logging has already %s\n",
nt->enabled ? "started" : "stopped");
@@ -332,6 +349,11 @@ static ssize_t enabled_store(struct config_item *item,
}
if (enabled) { /* true */
+ if (nt->release && !nt->extended) {
+ pr_err("Not enabling netconsole. Release feature requires extended log message");
+ goto out_unlock;
+ }
+
if (nt->extended && !console_is_registered(&netconsole_ext))
register_console(&netconsole_ext);
@@ -366,11 +388,11 @@ out_unlock:
return err;
}
-static ssize_t extended_store(struct config_item *item, const char *buf,
- size_t count)
+static ssize_t release_store(struct config_item *item, const char *buf,
+ size_t count)
{
struct netconsole_target *nt = to_target(item);
- int extended;
+ bool release;
int err;
mutex_lock(&dynamic_netconsole_mutex);
@@ -381,14 +403,38 @@ static ssize_t extended_store(struct config_item *item, const char *buf,
goto out_unlock;
}
- err = kstrtoint(buf, 10, &extended);
- if (err < 0)
+ err = kstrtobool(buf, &release);
+ if (err)
goto out_unlock;
- if (extended < 0 || extended > 1) {
+
+ nt->release = release;
+
+ mutex_unlock(&dynamic_netconsole_mutex);
+ return strnlen(buf, count);
+out_unlock:
+ mutex_unlock(&dynamic_netconsole_mutex);
+ return err;
+}
+
+static ssize_t extended_store(struct config_item *item, const char *buf,
+ size_t count)
+{
+ struct netconsole_target *nt = to_target(item);
+ bool extended;
+ int err;
+
+ mutex_lock(&dynamic_netconsole_mutex);
+ if (nt->enabled) {
+ pr_err("target (%s) is enabled, disable to update parameters\n",
+ config_item_name(&nt->item));
err = -EINVAL;
goto out_unlock;
}
+ err = kstrtobool(buf, &extended);
+ if (err)
+ goto out_unlock;
+
nt->extended = extended;
mutex_unlock(&dynamic_netconsole_mutex);
@@ -576,10 +622,12 @@ CONFIGFS_ATTR(, local_ip);
CONFIGFS_ATTR(, remote_ip);
CONFIGFS_ATTR_RO(, local_mac);
CONFIGFS_ATTR(, remote_mac);
+CONFIGFS_ATTR(, release);
static struct configfs_attribute *netconsole_target_attrs[] = {
&attr_enabled,
&attr_extended,
+ &attr_release,
&attr_dev_name,
&attr_local_port,
&attr_remote_port,
@@ -772,9 +820,23 @@ static void send_ext_msg_udp(struct netconsole_target *nt, const char *msg,
const char *header, *body;
int offset = 0;
int header_len, body_len;
+ const char *msg_ready = msg;
+ const char *release;
+ int release_len = 0;
- if (msg_len <= MAX_PRINT_CHUNK) {
- netpoll_send_udp(&nt->np, msg, msg_len);
+ if (nt->release) {
+ release = init_utsname()->release;
+ release_len = strlen(release) + 1;
+ }
+
+ if (msg_len + release_len <= MAX_PRINT_CHUNK) {
+ /* No fragmentation needed */
+ if (nt->release) {
+ scnprintf(buf, MAX_PRINT_CHUNK, "%s,%s", release, msg);
+ msg_len += release_len;
+ msg_ready = buf;
+ }
+ netpoll_send_udp(&nt->np, msg_ready, msg_len);
return;
}
@@ -792,7 +854,10 @@ static void send_ext_msg_udp(struct netconsole_target *nt, const char *msg,
* Transfer multiple chunks with the following extra header.
* "ncfrag=<byte-offset>/<total-bytes>"
*/
- memcpy(buf, header, header_len);
+ if (nt->release)
+ scnprintf(buf, MAX_PRINT_CHUNK, "%s,", release);
+ memcpy(buf + release_len, header, header_len);
+ header_len += release_len;
while (offset < body_len) {
int this_header = header_len;
diff --git a/drivers/net/netdevsim/Makefile b/drivers/net/netdevsim/Makefile
index 5735e5b1a2cb..f8de93bc5f5b 100644
--- a/drivers/net/netdevsim/Makefile
+++ b/drivers/net/netdevsim/Makefile
@@ -17,3 +17,7 @@ endif
ifneq ($(CONFIG_PSAMPLE),)
netdevsim-objs += psample.o
endif
+
+ifneq ($(CONFIG_MACSEC),)
+netdevsim-objs += macsec.o
+endif
diff --git a/drivers/net/netdevsim/macsec.c b/drivers/net/netdevsim/macsec.c
new file mode 100644
index 000000000000..0d5f50430dd3
--- /dev/null
+++ b/drivers/net/netdevsim/macsec.c
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <net/macsec.h>
+#include "netdevsim.h"
+
+static inline u64 sci_to_cpu(sci_t sci)
+{
+ return be64_to_cpu((__force __be64)sci);
+}
+
+static int nsim_macsec_find_secy(struct netdevsim *ns, sci_t sci)
+{
+ int i;
+
+ for (i = 0; i < NSIM_MACSEC_MAX_SECY_COUNT; i++) {
+ if (ns->macsec.nsim_secy[i].sci == sci)
+ return i;
+ }
+
+ return -1;
+}
+
+static int nsim_macsec_find_rxsc(struct nsim_secy *ns_secy, sci_t sci)
+{
+ int i;
+
+ for (i = 0; i < NSIM_MACSEC_MAX_RXSC_COUNT; i++) {
+ if (ns_secy->nsim_rxsc[i].sci == sci)
+ return i;
+ }
+
+ return -1;
+}
+
+static int nsim_macsec_add_secy(struct macsec_context *ctx)
+{
+ struct netdevsim *ns = netdev_priv(ctx->netdev);
+ int idx;
+
+ if (ns->macsec.nsim_secy_count == NSIM_MACSEC_MAX_SECY_COUNT)
+ return -ENOSPC;
+
+ for (idx = 0; idx < NSIM_MACSEC_MAX_SECY_COUNT; idx++) {
+ if (!ns->macsec.nsim_secy[idx].used)
+ break;
+ }
+
+ if (idx == NSIM_MACSEC_MAX_SECY_COUNT) {
+ netdev_err(ctx->netdev, "%s: nsim_secy_count not full but all SecYs used\n",
+ __func__);
+ return -ENOSPC;
+ }
+
+ netdev_dbg(ctx->netdev, "%s: adding new secy with sci %08llx at index %d\n",
+ __func__, sci_to_cpu(ctx->secy->sci), idx);
+ ns->macsec.nsim_secy[idx].used = true;
+ ns->macsec.nsim_secy[idx].nsim_rxsc_count = 0;
+ ns->macsec.nsim_secy[idx].sci = ctx->secy->sci;
+ ns->macsec.nsim_secy_count++;
+
+ return 0;
+}
+
+static int nsim_macsec_upd_secy(struct macsec_context *ctx)
+{
+ struct netdevsim *ns = netdev_priv(ctx->netdev);
+ int idx;
+
+ idx = nsim_macsec_find_secy(ns, ctx->secy->sci);
+ if (idx < 0) {
+ netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n",
+ __func__, sci_to_cpu(ctx->secy->sci));
+ return -ENOENT;
+ }
+
+ netdev_dbg(ctx->netdev, "%s: updating secy with sci %08llx at index %d\n",
+ __func__, sci_to_cpu(ctx->secy->sci), idx);
+
+ return 0;
+}
+
+static int nsim_macsec_del_secy(struct macsec_context *ctx)
+{
+ struct netdevsim *ns = netdev_priv(ctx->netdev);
+ int idx;
+
+ idx = nsim_macsec_find_secy(ns, ctx->secy->sci);
+ if (idx < 0) {
+ netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n",
+ __func__, sci_to_cpu(ctx->secy->sci));
+ return -ENOENT;
+ }
+
+ netdev_dbg(ctx->netdev, "%s: removing SecY with SCI %08llx at index %d\n",
+ __func__, sci_to_cpu(ctx->secy->sci), idx);
+
+ ns->macsec.nsim_secy[idx].used = false;
+ memset(&ns->macsec.nsim_secy[idx], 0, sizeof(ns->macsec.nsim_secy[idx]));
+ ns->macsec.nsim_secy_count--;
+
+ return 0;
+}
+
+static int nsim_macsec_add_rxsc(struct macsec_context *ctx)
+{
+ struct netdevsim *ns = netdev_priv(ctx->netdev);
+ struct nsim_secy *secy;
+ int idx;
+
+ idx = nsim_macsec_find_secy(ns, ctx->secy->sci);
+ if (idx < 0) {
+ netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n",
+ __func__, sci_to_cpu(ctx->secy->sci));
+ return -ENOENT;
+ }
+ secy = &ns->macsec.nsim_secy[idx];
+
+ if (secy->nsim_rxsc_count == NSIM_MACSEC_MAX_RXSC_COUNT)
+ return -ENOSPC;
+
+ for (idx = 0; idx < NSIM_MACSEC_MAX_RXSC_COUNT; idx++) {
+ if (!secy->nsim_rxsc[idx].used)
+ break;
+ }
+
+ if (idx == NSIM_MACSEC_MAX_RXSC_COUNT)
+ netdev_err(ctx->netdev, "%s: nsim_rxsc_count not full but all RXSCs used\n",
+ __func__);
+
+ netdev_dbg(ctx->netdev, "%s: adding new rxsc with sci %08llx at index %d\n",
+ __func__, sci_to_cpu(ctx->rx_sc->sci), idx);
+ secy->nsim_rxsc[idx].used = true;
+ secy->nsim_rxsc[idx].sci = ctx->rx_sc->sci;
+ secy->nsim_rxsc_count++;
+
+ return 0;
+}
+
+static int nsim_macsec_upd_rxsc(struct macsec_context *ctx)
+{
+ struct netdevsim *ns = netdev_priv(ctx->netdev);
+ struct nsim_secy *secy;
+ int idx;
+
+ idx = nsim_macsec_find_secy(ns, ctx->secy->sci);
+ if (idx < 0) {
+ netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n",
+ __func__, sci_to_cpu(ctx->secy->sci));
+ return -ENOENT;
+ }
+ secy = &ns->macsec.nsim_secy[idx];
+
+ idx = nsim_macsec_find_rxsc(secy, ctx->rx_sc->sci);
+ if (idx < 0) {
+ netdev_err(ctx->netdev, "%s: sci %08llx not found in RXSC table\n",
+ __func__, sci_to_cpu(ctx->rx_sc->sci));
+ return -ENOENT;
+ }
+
+ netdev_dbg(ctx->netdev, "%s: updating RXSC with sci %08llx at index %d\n",
+ __func__, sci_to_cpu(ctx->rx_sc->sci), idx);
+
+ return 0;
+}
+
+static int nsim_macsec_del_rxsc(struct macsec_context *ctx)
+{
+ struct netdevsim *ns = netdev_priv(ctx->netdev);
+ struct nsim_secy *secy;
+ int idx;
+
+ idx = nsim_macsec_find_secy(ns, ctx->secy->sci);
+ if (idx < 0) {
+ netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n",
+ __func__, sci_to_cpu(ctx->secy->sci));
+ return -ENOENT;
+ }
+ secy = &ns->macsec.nsim_secy[idx];
+
+ idx = nsim_macsec_find_rxsc(secy, ctx->rx_sc->sci);
+ if (idx < 0) {
+ netdev_err(ctx->netdev, "%s: sci %08llx not found in RXSC table\n",
+ __func__, sci_to_cpu(ctx->rx_sc->sci));
+ return -ENOENT;
+ }
+
+ netdev_dbg(ctx->netdev, "%s: removing RXSC with sci %08llx at index %d\n",
+ __func__, sci_to_cpu(ctx->rx_sc->sci), idx);
+
+ secy->nsim_rxsc[idx].used = false;
+ memset(&secy->nsim_rxsc[idx], 0, sizeof(secy->nsim_rxsc[idx]));
+ secy->nsim_rxsc_count--;
+
+ return 0;
+}
+
+static int nsim_macsec_add_rxsa(struct macsec_context *ctx)
+{
+ struct netdevsim *ns = netdev_priv(ctx->netdev);
+ struct nsim_secy *secy;
+ int idx;
+
+ idx = nsim_macsec_find_secy(ns, ctx->secy->sci);
+ if (idx < 0) {
+ netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n",
+ __func__, sci_to_cpu(ctx->secy->sci));
+ return -ENOENT;
+ }
+ secy = &ns->macsec.nsim_secy[idx];
+
+ idx = nsim_macsec_find_rxsc(secy, ctx->sa.rx_sa->sc->sci);
+ if (idx < 0) {
+ netdev_err(ctx->netdev, "%s: sci %08llx not found in RXSC table\n",
+ __func__, sci_to_cpu(ctx->sa.rx_sa->sc->sci));
+ return -ENOENT;
+ }
+
+ netdev_dbg(ctx->netdev, "%s: RXSC with sci %08llx, AN %u\n",
+ __func__, sci_to_cpu(ctx->sa.rx_sa->sc->sci), ctx->sa.assoc_num);
+
+ return 0;
+}
+
+static int nsim_macsec_upd_rxsa(struct macsec_context *ctx)
+{
+ struct netdevsim *ns = netdev_priv(ctx->netdev);
+ struct nsim_secy *secy;
+ int idx;
+
+ idx = nsim_macsec_find_secy(ns, ctx->secy->sci);
+ if (idx < 0) {
+ netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n",
+ __func__, sci_to_cpu(ctx->secy->sci));
+ return -ENOENT;
+ }
+ secy = &ns->macsec.nsim_secy[idx];
+
+ idx = nsim_macsec_find_rxsc(secy, ctx->sa.rx_sa->sc->sci);
+ if (idx < 0) {
+ netdev_err(ctx->netdev, "%s: sci %08llx not found in RXSC table\n",
+ __func__, sci_to_cpu(ctx->sa.rx_sa->sc->sci));
+ return -ENOENT;
+ }
+
+ netdev_dbg(ctx->netdev, "%s: RXSC with sci %08llx, AN %u\n",
+ __func__, sci_to_cpu(ctx->sa.rx_sa->sc->sci), ctx->sa.assoc_num);
+
+ return 0;
+}
+
+static int nsim_macsec_del_rxsa(struct macsec_context *ctx)
+{
+ struct netdevsim *ns = netdev_priv(ctx->netdev);
+ struct nsim_secy *secy;
+ int idx;
+
+ idx = nsim_macsec_find_secy(ns, ctx->secy->sci);
+ if (idx < 0) {
+ netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n",
+ __func__, sci_to_cpu(ctx->secy->sci));
+ return -ENOENT;
+ }
+ secy = &ns->macsec.nsim_secy[idx];
+
+ idx = nsim_macsec_find_rxsc(secy, ctx->sa.rx_sa->sc->sci);
+ if (idx < 0) {
+ netdev_err(ctx->netdev, "%s: sci %08llx not found in RXSC table\n",
+ __func__, sci_to_cpu(ctx->sa.rx_sa->sc->sci));
+ return -ENOENT;
+ }
+
+ netdev_dbg(ctx->netdev, "%s: RXSC with sci %08llx, AN %u\n",
+ __func__, sci_to_cpu(ctx->sa.rx_sa->sc->sci), ctx->sa.assoc_num);
+
+ return 0;
+}
+
+static int nsim_macsec_add_txsa(struct macsec_context *ctx)
+{
+ struct netdevsim *ns = netdev_priv(ctx->netdev);
+ int idx;
+
+ idx = nsim_macsec_find_secy(ns, ctx->secy->sci);
+ if (idx < 0) {
+ netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n",
+ __func__, sci_to_cpu(ctx->secy->sci));
+ return -ENOENT;
+ }
+
+ netdev_dbg(ctx->netdev, "%s: SECY with sci %08llx, AN %u\n",
+ __func__, sci_to_cpu(ctx->secy->sci), ctx->sa.assoc_num);
+
+ return 0;
+}
+
+static int nsim_macsec_upd_txsa(struct macsec_context *ctx)
+{
+ struct netdevsim *ns = netdev_priv(ctx->netdev);
+ int idx;
+
+ idx = nsim_macsec_find_secy(ns, ctx->secy->sci);
+ if (idx < 0) {
+ netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n",
+ __func__, sci_to_cpu(ctx->secy->sci));
+ return -ENOENT;
+ }
+
+ netdev_dbg(ctx->netdev, "%s: SECY with sci %08llx, AN %u\n",
+ __func__, sci_to_cpu(ctx->secy->sci), ctx->sa.assoc_num);
+
+ return 0;
+}
+
+static int nsim_macsec_del_txsa(struct macsec_context *ctx)
+{
+ struct netdevsim *ns = netdev_priv(ctx->netdev);
+ int idx;
+
+ idx = nsim_macsec_find_secy(ns, ctx->secy->sci);
+ if (idx < 0) {
+ netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n",
+ __func__, sci_to_cpu(ctx->secy->sci));
+ return -ENOENT;
+ }
+
+ netdev_dbg(ctx->netdev, "%s: SECY with sci %08llx, AN %u\n",
+ __func__, sci_to_cpu(ctx->secy->sci), ctx->sa.assoc_num);
+
+ return 0;
+}
+
+static const struct macsec_ops nsim_macsec_ops = {
+ .mdo_add_secy = nsim_macsec_add_secy,
+ .mdo_upd_secy = nsim_macsec_upd_secy,
+ .mdo_del_secy = nsim_macsec_del_secy,
+ .mdo_add_rxsc = nsim_macsec_add_rxsc,
+ .mdo_upd_rxsc = nsim_macsec_upd_rxsc,
+ .mdo_del_rxsc = nsim_macsec_del_rxsc,
+ .mdo_add_rxsa = nsim_macsec_add_rxsa,
+ .mdo_upd_rxsa = nsim_macsec_upd_rxsa,
+ .mdo_del_rxsa = nsim_macsec_del_rxsa,
+ .mdo_add_txsa = nsim_macsec_add_txsa,
+ .mdo_upd_txsa = nsim_macsec_upd_txsa,
+ .mdo_del_txsa = nsim_macsec_del_txsa,
+};
+
+void nsim_macsec_init(struct netdevsim *ns)
+{
+ ns->netdev->macsec_ops = &nsim_macsec_ops;
+ ns->netdev->features |= NETIF_F_HW_MACSEC;
+ memset(&ns->macsec, 0, sizeof(ns->macsec));
+}
+
+void nsim_macsec_teardown(struct netdevsim *ns)
+{
+}
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 35fa1ca98671..0c8daeb0d62b 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -304,6 +304,7 @@ static int nsim_init_netdevsim(struct netdevsim *ns)
if (err)
goto err_utn_destroy;
+ nsim_macsec_init(ns);
nsim_ipsec_init(ns);
err = register_netdevice(ns->netdev);
@@ -314,6 +315,7 @@ static int nsim_init_netdevsim(struct netdevsim *ns)
err_ipsec_teardown:
nsim_ipsec_teardown(ns);
+ nsim_macsec_teardown(ns);
nsim_bpf_uninit(ns);
err_utn_destroy:
rtnl_unlock();
@@ -374,6 +376,7 @@ void nsim_destroy(struct netdevsim *ns)
rtnl_lock();
unregister_netdevice(dev);
if (nsim_dev_port_is_pf(ns->nsim_dev_port)) {
+ nsim_macsec_teardown(ns);
nsim_ipsec_teardown(ns);
nsim_bpf_uninit(ns);
}
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 7d8ed8d8df5c..7be98b7dcca9 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -23,6 +23,7 @@
#include <net/devlink.h>
#include <net/udp_tunnel.h>
#include <net/xdp.h>
+#include <net/macsec.h>
#define DRV_NAME "netdevsim"
@@ -52,6 +53,25 @@ struct nsim_ipsec {
u32 ok;
};
+#define NSIM_MACSEC_MAX_SECY_COUNT 3
+#define NSIM_MACSEC_MAX_RXSC_COUNT 1
+struct nsim_rxsc {
+ sci_t sci;
+ bool used;
+};
+
+struct nsim_secy {
+ sci_t sci;
+ struct nsim_rxsc nsim_rxsc[NSIM_MACSEC_MAX_RXSC_COUNT];
+ u8 nsim_rxsc_count;
+ bool used;
+};
+
+struct nsim_macsec {
+ struct nsim_secy nsim_secy[NSIM_MACSEC_MAX_SECY_COUNT];
+ u8 nsim_secy_count;
+};
+
struct nsim_ethtool_pauseparam {
bool rx;
bool tx;
@@ -93,6 +113,7 @@ struct netdevsim {
bool bpf_map_accept;
struct nsim_ipsec ipsec;
+ struct nsim_macsec macsec;
struct {
u32 inject_error;
u32 sleep;
@@ -366,6 +387,19 @@ static inline bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb)
}
#endif
+#if IS_ENABLED(CONFIG_MACSEC)
+void nsim_macsec_init(struct netdevsim *ns);
+void nsim_macsec_teardown(struct netdevsim *ns);
+#else
+static inline void nsim_macsec_init(struct netdevsim *ns)
+{
+}
+
+static inline void nsim_macsec_teardown(struct netdevsim *ns)
+{
+}
+#endif
+
struct nsim_bus_dev {
struct device dev;
struct list_head list;
diff --git a/drivers/net/pcs/pcs-rzn1-miic.c b/drivers/net/pcs/pcs-rzn1-miic.c
index 323bec5e57f8..e5d642c67a2c 100644
--- a/drivers/net/pcs/pcs-rzn1-miic.c
+++ b/drivers/net/pcs/pcs-rzn1-miic.c
@@ -12,6 +12,7 @@
#include <linux/of_platform.h>
#include <linux/pcs-rzn1-miic.h>
#include <linux/phylink.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <dt-bindings/net/pcs-rzn1-miic.h>
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 78e6981650d9..67aaeb75301f 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -217,6 +217,12 @@ config MARVELL_10G_PHY
help
Support for the Marvell Alaska MV88X3310 and compatible PHYs.
+config MARVELL_88Q2XXX_PHY
+ tristate "Marvell 88Q2XXX PHY"
+ help
+ Support for the Marvell 88Q2XXX 100/1000BASE-T1 Automotive Ethernet
+ PHYs.
+
config MARVELL_88X2222_PHY
tristate "Marvell 88X2222 PHY"
help
@@ -344,6 +350,7 @@ config ROCKCHIP_PHY
config SMSC_PHY
tristate "SMSC PHYs"
+ select CRC16
help
Currently supports the LAN83C185, LAN8187 and LAN8700 PHYs
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 2fe51ea83bab..35142780fc9d 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -66,6 +66,7 @@ obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
obj-$(CONFIG_LXT_PHY) += lxt.o
obj-$(CONFIG_MARVELL_10G_PHY) += marvell10g.o
obj-$(CONFIG_MARVELL_PHY) += marvell.o
+obj-$(CONFIG_MARVELL_88Q2XXX_PHY) += marvell-88q2xxx.o
obj-$(CONFIG_MARVELL_88X2222_PHY) += marvell-88x2222.o
obj-$(CONFIG_MAXLINEAR_GPHY) += mxl-gpy.o
obj-$(CONFIG_MEDIATEK_GE_PHY) += mediatek-ge.o
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index c1f307d90518..13c4121fa309 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -272,6 +272,13 @@
#define QCA808X_CDT_STATUS_STAT_OPEN 2
#define QCA808X_CDT_STATUS_STAT_SHORT 3
+/* QCA808X 1G chip type */
+#define QCA808X_PHY_MMD7_CHIP_TYPE 0x901d
+#define QCA808X_PHY_CHIP_TYPE_1G BIT(0)
+
+#define QCA8081_PHY_SERDES_MMD1_FIFO_CTRL 0x9072
+#define QCA8081_PHY_FIFO_RSTN BIT(11)
+
MODULE_DESCRIPTION("Qualcomm Atheros AR803x and QCA808X PHY driver");
MODULE_AUTHOR("Matus Ujhelyi");
MODULE_LICENSE("GPL");
@@ -897,15 +904,6 @@ static int at803x_get_features(struct phy_device *phydev)
if (err)
return err;
- if (phydev->drv->phy_id == QCA8081_PHY_ID) {
- err = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_NG_EXTABLE);
- if (err < 0)
- return err;
-
- linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->supported,
- err & MDIO_PMA_NG_EXTABLE_2_5GBT);
- }
-
if (phydev->drv->phy_id != ATH8031_PHY_ID)
return 0;
@@ -1734,24 +1732,30 @@ static int qca808x_phy_fast_retrain_config(struct phy_device *phydev)
return 0;
}
-static int qca808x_phy_ms_random_seed_set(struct phy_device *phydev)
+static int qca808x_phy_ms_seed_enable(struct phy_device *phydev, bool enable)
{
- u16 seed_value = get_random_u32_below(QCA808X_MASTER_SLAVE_SEED_RANGE);
+ u16 seed_value;
+
+ if (!enable)
+ return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED,
+ QCA808X_MASTER_SLAVE_SEED_ENABLE, 0);
+ seed_value = get_random_u32_below(QCA808X_MASTER_SLAVE_SEED_RANGE);
return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED,
- QCA808X_MASTER_SLAVE_SEED_CFG,
- FIELD_PREP(QCA808X_MASTER_SLAVE_SEED_CFG, seed_value));
+ QCA808X_MASTER_SLAVE_SEED_CFG | QCA808X_MASTER_SLAVE_SEED_ENABLE,
+ FIELD_PREP(QCA808X_MASTER_SLAVE_SEED_CFG, seed_value) |
+ QCA808X_MASTER_SLAVE_SEED_ENABLE);
}
-static int qca808x_phy_ms_seed_enable(struct phy_device *phydev, bool enable)
+static bool qca808x_is_prefer_master(struct phy_device *phydev)
{
- u16 seed_enable = 0;
-
- if (enable)
- seed_enable = QCA808X_MASTER_SLAVE_SEED_ENABLE;
+ return (phydev->master_slave_get == MASTER_SLAVE_CFG_MASTER_FORCE) ||
+ (phydev->master_slave_get == MASTER_SLAVE_CFG_MASTER_PREFERRED);
+}
- return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED,
- QCA808X_MASTER_SLAVE_SEED_ENABLE, seed_enable);
+static bool qca808x_has_fast_retrain_or_slave_seed(struct phy_device *phydev)
+{
+ return linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->supported);
}
static int qca808x_config_init(struct phy_device *phydev)
@@ -1770,20 +1774,25 @@ static int qca808x_config_init(struct phy_device *phydev)
if (ret)
return ret;
- /* Config the fast retrain for the link 2500M */
- ret = qca808x_phy_fast_retrain_config(phydev);
- if (ret)
- return ret;
+ if (qca808x_has_fast_retrain_or_slave_seed(phydev)) {
+ /* Config the fast retrain for the link 2500M */
+ ret = qca808x_phy_fast_retrain_config(phydev);
+ if (ret)
+ return ret;
- /* Configure lower ramdom seed to make phy linked as slave mode */
- ret = qca808x_phy_ms_random_seed_set(phydev);
- if (ret)
- return ret;
+ ret = genphy_read_master_slave(phydev);
+ if (ret < 0)
+ return ret;
- /* Enable seed */
- ret = qca808x_phy_ms_seed_enable(phydev, true);
- if (ret)
- return ret;
+ if (!qca808x_is_prefer_master(phydev)) {
+ /* Enable seed and configure lower ramdom seed to make phy
+ * linked as slave mode.
+ */
+ ret = qca808x_phy_ms_seed_enable(phydev, true);
+ if (ret)
+ return ret;
+ }
+ }
/* Configure adc threshold as 100mv for the link 10M */
return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_ADC_THRESHOLD,
@@ -1816,17 +1825,21 @@ static int qca808x_read_status(struct phy_device *phydev)
phydev->interface = PHY_INTERFACE_MODE_SGMII;
} else {
/* generate seed as a lower random value to make PHY linked as SLAVE easily,
- * except for master/slave configuration fault detected.
+ * except for master/slave configuration fault detected or the master mode
+ * preferred.
+ *
* the reason for not putting this code into the function link_change_notify is
* the corner case where the link partner is also the qca8081 PHY and the seed
* value is configured as the same value, the link can't be up and no link change
* occurs.
*/
- if (phydev->master_slave_state == MASTER_SLAVE_STATE_ERR) {
- qca808x_phy_ms_seed_enable(phydev, false);
- } else {
- qca808x_phy_ms_random_seed_set(phydev);
- qca808x_phy_ms_seed_enable(phydev, true);
+ if (qca808x_has_fast_retrain_or_slave_seed(phydev)) {
+ if (phydev->master_slave_state == MASTER_SLAVE_STATE_ERR ||
+ qca808x_is_prefer_master(phydev)) {
+ qca808x_phy_ms_seed_enable(phydev, false);
+ } else {
+ qca808x_phy_ms_seed_enable(phydev, true);
+ }
}
}
@@ -1841,7 +1854,10 @@ static int qca808x_soft_reset(struct phy_device *phydev)
if (ret < 0)
return ret;
- return qca808x_phy_ms_seed_enable(phydev, true);
+ if (qca808x_has_fast_retrain_or_slave_seed(phydev))
+ ret = qca808x_phy_ms_seed_enable(phydev, true);
+
+ return ret;
}
static bool qca808x_cdt_fault_length_valid(int cdt_code)
@@ -1991,6 +2007,44 @@ static int qca808x_cable_test_get_status(struct phy_device *phydev, bool *finish
return 0;
}
+static int qca808x_get_features(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_c45_pma_read_abilities(phydev);
+ if (ret)
+ return ret;
+
+ /* The autoneg ability is not existed in bit3 of MMD7.1,
+ * but it is supported by qca808x PHY, so we add it here
+ * manually.
+ */
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported);
+
+ /* As for the qca8081 1G version chip, the 2500baseT ability is also
+ * existed in the bit0 of MMD1.21, we need to remove it manually if
+ * it is the qca8081 1G chip according to the bit0 of MMD7.0x901d.
+ */
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, QCA808X_PHY_MMD7_CHIP_TYPE);
+ if (ret < 0)
+ return ret;
+
+ if (QCA808X_PHY_CHIP_TYPE_1G & ret)
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->supported);
+
+ return 0;
+}
+
+static void qca808x_link_change_notify(struct phy_device *phydev)
+{
+ /* Assert interface sgmii fifo on link down, deassert it on link up,
+ * the interface device address is always phy address added by 1.
+ */
+ mdiobus_c45_modify_changed(phydev->mdio.bus, phydev->mdio.addr + 1,
+ MDIO_MMD_PMAPMD, QCA8081_PHY_SERDES_MMD1_FIFO_CTRL,
+ QCA8081_PHY_FIFO_RSTN, phydev->link ? QCA8081_PHY_FIFO_RSTN : 0);
+}
+
static struct phy_driver at803x_driver[] = {
{
/* Qualcomm Atheros AR8035 */
@@ -2160,7 +2214,7 @@ static struct phy_driver at803x_driver[] = {
.set_tunable = at803x_set_tunable,
.set_wol = at803x_set_wol,
.get_wol = at803x_get_wol,
- .get_features = at803x_get_features,
+ .get_features = qca808x_get_features,
.config_aneg = at803x_config_aneg,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -2169,6 +2223,7 @@ static struct phy_driver at803x_driver[] = {
.soft_reset = qca808x_soft_reset,
.cable_test_start = qca808x_cable_test_start,
.cable_test_get_status = qca808x_cable_test_get_status,
+ .link_change_notify = qca808x_link_change_notify,
}, };
module_phy_driver(at803x_driver);
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index f8c17a253f8b..8478b081c058 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -913,6 +913,7 @@ static struct phy_driver bcm7xxx_driver[] = {
BCM7XXX_28NM_GPHY(PHY_ID_BCM7278, "Broadcom BCM7278"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7364, "Broadcom BCM7364"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7366, "Broadcom BCM7366"),
+ BCM7XXX_16NM_EPHY(PHY_ID_BCM74165, "Broadcom BCM74165"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM74371, "Broadcom BCM74371"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7439_2, "Broadcom BCM7439 (2)"),
diff --git a/drivers/net/phy/marvell-88q2xxx.c b/drivers/net/phy/marvell-88q2xxx.c
new file mode 100644
index 000000000000..1c3ff77de56b
--- /dev/null
+++ b/drivers/net/phy/marvell-88q2xxx.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Marvell 88Q2XXX automotive 100BASE-T1/1000BASE-T1 PHY driver
+ */
+#include <linux/ethtool_netlink.h>
+#include <linux/marvell_phy.h>
+#include <linux/phy.h>
+
+#define MDIO_MMD_AN_MV_STAT 32769
+#define MDIO_MMD_AN_MV_STAT_ANEG 0x0100
+#define MDIO_MMD_AN_MV_STAT_LOCAL_RX 0x1000
+#define MDIO_MMD_AN_MV_STAT_REMOTE_RX 0x2000
+#define MDIO_MMD_AN_MV_STAT_LOCAL_MASTER 0x4000
+#define MDIO_MMD_AN_MV_STAT_MS_CONF_FAULT 0x8000
+
+#define MDIO_MMD_PCS_MV_100BT1_STAT1 33032
+#define MDIO_MMD_PCS_MV_100BT1_STAT1_IDLE_ERROR 0x00FF
+#define MDIO_MMD_PCS_MV_100BT1_STAT1_JABBER 0x0100
+#define MDIO_MMD_PCS_MV_100BT1_STAT1_LINK 0x0200
+#define MDIO_MMD_PCS_MV_100BT1_STAT1_LOCAL_RX 0x1000
+#define MDIO_MMD_PCS_MV_100BT1_STAT1_REMOTE_RX 0x2000
+#define MDIO_MMD_PCS_MV_100BT1_STAT1_LOCAL_MASTER 0x4000
+
+#define MDIO_MMD_PCS_MV_100BT1_STAT2 33033
+#define MDIO_MMD_PCS_MV_100BT1_STAT2_JABBER 0x0001
+#define MDIO_MMD_PCS_MV_100BT1_STAT2_POL 0x0002
+#define MDIO_MMD_PCS_MV_100BT1_STAT2_LINK 0x0004
+#define MDIO_MMD_PCS_MV_100BT1_STAT2_ANGE 0x0008
+
+static int mv88q2xxx_soft_reset(struct phy_device *phydev)
+{
+ int ret;
+ int val;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_PCS_1000BT1_CTRL, MDIO_PCS_1000BT1_CTRL_RESET);
+ if (ret < 0)
+ return ret;
+
+ return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_PCS,
+ MDIO_PCS_1000BT1_CTRL, val,
+ !(val & MDIO_PCS_1000BT1_CTRL_RESET),
+ 50000, 600000, true);
+}
+
+static int mv88q2xxx_read_link_gbit(struct phy_device *phydev)
+{
+ int ret;
+ bool link = false;
+
+ /* Read vendor specific Auto-Negotiation status register to get local
+ * and remote receiver status according to software initialization
+ * guide.
+ */
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_MMD_AN_MV_STAT);
+ if (ret < 0) {
+ return ret;
+ } else if ((ret & MDIO_MMD_AN_MV_STAT_LOCAL_RX) &&
+ (ret & MDIO_MMD_AN_MV_STAT_REMOTE_RX)) {
+ /* The link state is latched low so that momentary link
+ * drops can be detected. Do not double-read the status
+ * in polling mode to detect such short link drops except
+ * the link was already down.
+ */
+ if (!phy_polling_mode(phydev) || !phydev->link) {
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_1000BT1_STAT);
+ if (ret < 0)
+ return ret;
+ else if (ret & MDIO_PCS_1000BT1_STAT_LINK)
+ link = true;
+ }
+
+ if (!link) {
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_1000BT1_STAT);
+ if (ret < 0)
+ return ret;
+ else if (ret & MDIO_PCS_1000BT1_STAT_LINK)
+ link = true;
+ }
+ }
+
+ phydev->link = link;
+
+ return 0;
+}
+
+static int mv88q2xxx_read_link_100m(struct phy_device *phydev)
+{
+ int ret;
+
+ /* The link state is latched low so that momentary link
+ * drops can be detected. Do not double-read the status
+ * in polling mode to detect such short link drops except
+ * the link was already down. In case we are not polling,
+ * we always read the realtime status.
+ */
+ if (!phy_polling_mode(phydev) || !phydev->link) {
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_100BT1_STAT1);
+ if (ret < 0)
+ return ret;
+ else if (ret & MDIO_MMD_PCS_MV_100BT1_STAT1_LINK)
+ goto out;
+ }
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_100BT1_STAT1);
+ if (ret < 0)
+ return ret;
+
+out:
+ /* Check if we have link and if the remote and local receiver are ok */
+ if ((ret & MDIO_MMD_PCS_MV_100BT1_STAT1_LINK) &&
+ (ret & MDIO_MMD_PCS_MV_100BT1_STAT1_LOCAL_RX) &&
+ (ret & MDIO_MMD_PCS_MV_100BT1_STAT1_REMOTE_RX))
+ phydev->link = true;
+ else
+ phydev->link = false;
+
+ return 0;
+}
+
+static int mv88q2xxx_read_link(struct phy_device *phydev)
+{
+ int ret;
+
+ /* The 88Q2XXX PHYs do not have the PMA/PMD status register available,
+ * therefore we need to read the link status from the vendor specific
+ * registers depending on the speed.
+ */
+ if (phydev->speed == SPEED_1000)
+ ret = mv88q2xxx_read_link_gbit(phydev);
+ else
+ ret = mv88q2xxx_read_link_100m(phydev);
+
+ return ret;
+}
+
+static int mv88q2xxx_read_status(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = mv88q2xxx_read_link(phydev);
+ if (ret < 0)
+ return ret;
+
+ return genphy_c45_read_pma(phydev);
+}
+
+static int mv88q2xxx_get_features(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_c45_pma_read_abilities(phydev);
+ if (ret)
+ return ret;
+
+ /* We need to read the baset1 extended abilities manually because the
+ * PHY does not signalize it has the extended abilities register
+ * available.
+ */
+ ret = genphy_c45_pma_baset1_read_abilities(phydev);
+ if (ret)
+ return ret;
+
+ /* The PHY signalizes it supports autonegotiation. Unfortunately, so
+ * far it was not possible to get a link even when following the init
+ * sequence provided by Marvell. Disable it for now until a proper
+ * workaround is found or a new PHY revision is released.
+ */
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported);
+
+ return 0;
+}
+
+static int mv88q2xxx_config_aneg(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_c45_config_aneg(phydev);
+ if (ret)
+ return ret;
+
+ return mv88q2xxx_soft_reset(phydev);
+}
+
+static int mv88q2xxx_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ /* The 88Q2XXX PHYs do have the extended ability register available, but
+ * register MDIO_PMA_EXTABLE where they should signalize it does not
+ * work according to specification. Therefore, we force it here.
+ */
+ phydev->pma_extable = MDIO_PMA_EXTABLE_BT1;
+
+ /* Read the current PHY configuration */
+ ret = genphy_c45_read_pma(phydev);
+ if (ret)
+ return ret;
+
+ return mv88q2xxx_config_aneg(phydev);
+}
+
+static int mv88q2xxxx_get_sqi(struct phy_device *phydev)
+{
+ int ret;
+
+ if (phydev->speed == SPEED_100) {
+ /* Read the SQI from the vendor specific receiver status
+ * register
+ */
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8230);
+ if (ret < 0)
+ return ret;
+
+ ret = ret >> 12;
+ } else {
+ /* Read from vendor specific registers, they are not documented
+ * but can be found in the Software Initialization Guide. Only
+ * revisions >= A0 are supported.
+ */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, 0xFC5D, 0x00FF, 0x00AC);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, 0xfc88);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret & 0x0F;
+}
+
+static int mv88q2xxxx_get_sqi_max(struct phy_device *phydev)
+{
+ return 15;
+}
+
+static struct phy_driver mv88q2xxx_driver[] = {
+ {
+ .phy_id = MARVELL_PHY_ID_88Q2110,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
+ .name = "mv88q2110",
+ .get_features = mv88q2xxx_get_features,
+ .config_aneg = mv88q2xxx_config_aneg,
+ .config_init = mv88q2xxx_config_init,
+ .read_status = mv88q2xxx_read_status,
+ .soft_reset = mv88q2xxx_soft_reset,
+ .set_loopback = genphy_c45_loopback,
+ .get_sqi = mv88q2xxxx_get_sqi,
+ .get_sqi_max = mv88q2xxxx_get_sqi_max,
+ },
+};
+
+module_phy_driver(mv88q2xxx_driver);
+
+static struct mdio_device_id __maybe_unused mv88q2xxx_tbl[] = {
+ { MARVELL_PHY_ID_88Q2110, MARVELL_PHY_ID_MASK },
+ { /*sentinel*/ }
+};
+MODULE_DEVICE_TABLE(mdio, mv88q2xxx_tbl);
+
+MODULE_DESCRIPTION("Marvell 88Q2XXX 100/1000BASE-T1 Automotive Ethernet PHY driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/marvell-88x2222.c b/drivers/net/phy/marvell-88x2222.c
index f83cae64585d..e3aa30dad2e6 100644
--- a/drivers/net/phy/marvell-88x2222.c
+++ b/drivers/net/phy/marvell-88x2222.c
@@ -14,7 +14,6 @@
#include <linux/mdio.h>
#include <linux/marvell_phy.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/sfp.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 8b3618d3da4a..25dcaa49ab8b 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -107,16 +107,21 @@ int mdiobus_unregister_device(struct mdio_device *mdiodev)
}
EXPORT_SYMBOL(mdiobus_unregister_device);
-struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr)
+static struct mdio_device *mdiobus_find_device(struct mii_bus *bus, int addr)
{
bool addr_valid = addr >= 0 && addr < ARRAY_SIZE(bus->mdio_map);
- struct mdio_device *mdiodev;
if (WARN_ONCE(!addr_valid, "addr %d out of range\n", addr))
return NULL;
- mdiodev = bus->mdio_map[addr];
+ return bus->mdio_map[addr];
+}
+
+struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr)
+{
+ struct mdio_device *mdiodev;
+ mdiodev = mdiobus_find_device(bus, addr);
if (!mdiodev)
return NULL;
@@ -129,7 +134,7 @@ EXPORT_SYMBOL(mdiobus_get_phy);
bool mdiobus_is_registered_device(struct mii_bus *bus, int addr)
{
- return bus->mdio_map[addr];
+ return mdiobus_find_device(bus, addr) != NULL;
}
EXPORT_SYMBOL(mdiobus_is_registered_device);
@@ -1210,6 +1215,26 @@ int mdiobus_c45_write_nested(struct mii_bus *bus, int addr, int devad,
}
EXPORT_SYMBOL(mdiobus_c45_write_nested);
+/*
+ * __mdiobus_modify - Convenience function for modifying a given mdio device
+ * register
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @regnum: register number to write
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ */
+int __mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask,
+ u16 set)
+{
+ int err;
+
+ err = __mdiobus_modify_changed(bus, addr, regnum, mask, set);
+
+ return err < 0 ? err : 0;
+}
+EXPORT_SYMBOL_GPL(__mdiobus_modify);
+
/**
* mdiobus_modify - Convenience function for modifying a given mdio device
* register
@@ -1224,10 +1249,10 @@ int mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask, u16 set)
int err;
mutex_lock(&bus->mdio_lock);
- err = __mdiobus_modify_changed(bus, addr, regnum, mask, set);
+ err = __mdiobus_modify(bus, addr, regnum, mask, set);
mutex_unlock(&bus->mdio_lock);
- return err < 0 ? err : 0;
+ return err;
}
EXPORT_SYMBOL_GPL(mdiobus_modify);
diff --git a/drivers/net/phy/mediatek-ge-soc.c b/drivers/net/phy/mediatek-ge-soc.c
index 95369171a7ba..da512fab0eb0 100644
--- a/drivers/net/phy/mediatek-ge-soc.c
+++ b/drivers/net/phy/mediatek-ge-soc.c
@@ -2,8 +2,6 @@
#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
#include <linux/pinctrl/consumer.h>
#include <linux/phy.h>
diff --git a/drivers/net/phy/motorcomm.c b/drivers/net/phy/motorcomm.c
index 2fa5a90e073b..7a11fdb687cc 100644
--- a/drivers/net/phy/motorcomm.c
+++ b/drivers/net/phy/motorcomm.c
@@ -163,6 +163,10 @@
#define YT8521_CHIP_CONFIG_REG 0xA001
#define YT8521_CCR_SW_RST BIT(15)
+#define YT8531_RGMII_LDO_VOL_MASK GENMASK(5, 4)
+#define YT8531_LDO_VOL_3V3 0x0
+#define YT8531_LDO_VOL_1V8 0x2
+
/* 1b0 disable 1.9ns rxc clock delay *default*
* 1b1 enable 1.9ns rxc clock delay
*/
@@ -236,6 +240,12 @@
*/
#define YTPHY_WCR_TYPE_PULSE BIT(0)
+#define YTPHY_PAD_DRIVE_STRENGTH_REG 0xA010
+#define YT8531_RGMII_RXC_DS_MASK GENMASK(15, 13)
+#define YT8531_RGMII_RXD_DS_HI_MASK BIT(12) /* Bit 2 of rxd_ds */
+#define YT8531_RGMII_RXD_DS_LOW_MASK GENMASK(5, 4) /* Bit 1/0 of rxd_ds */
+#define YT8531_RGMII_RX_DS_DEFAULT 0x3
+
#define YTPHY_SYNCE_CFG_REG 0xA012
#define YT8521_SCR_SYNCE_ENABLE BIT(5)
/* 1b0 output 25m clock
@@ -835,6 +845,110 @@ static int ytphy_rgmii_clk_delay_config_with_lock(struct phy_device *phydev)
}
/**
+ * struct ytphy_ldo_vol_map - map a current value to a register value
+ * @vol: ldo voltage
+ * @ds: value in the register
+ * @cur: value in device configuration
+ */
+struct ytphy_ldo_vol_map {
+ u32 vol;
+ u32 ds;
+ u32 cur;
+};
+
+static const struct ytphy_ldo_vol_map yt8531_ldo_vol[] = {
+ {.vol = YT8531_LDO_VOL_1V8, .ds = 0, .cur = 1200},
+ {.vol = YT8531_LDO_VOL_1V8, .ds = 1, .cur = 2100},
+ {.vol = YT8531_LDO_VOL_1V8, .ds = 2, .cur = 2700},
+ {.vol = YT8531_LDO_VOL_1V8, .ds = 3, .cur = 2910},
+ {.vol = YT8531_LDO_VOL_1V8, .ds = 4, .cur = 3110},
+ {.vol = YT8531_LDO_VOL_1V8, .ds = 5, .cur = 3600},
+ {.vol = YT8531_LDO_VOL_1V8, .ds = 6, .cur = 3970},
+ {.vol = YT8531_LDO_VOL_1V8, .ds = 7, .cur = 4350},
+ {.vol = YT8531_LDO_VOL_3V3, .ds = 0, .cur = 3070},
+ {.vol = YT8531_LDO_VOL_3V3, .ds = 1, .cur = 4080},
+ {.vol = YT8531_LDO_VOL_3V3, .ds = 2, .cur = 4370},
+ {.vol = YT8531_LDO_VOL_3V3, .ds = 3, .cur = 4680},
+ {.vol = YT8531_LDO_VOL_3V3, .ds = 4, .cur = 5020},
+ {.vol = YT8531_LDO_VOL_3V3, .ds = 5, .cur = 5450},
+ {.vol = YT8531_LDO_VOL_3V3, .ds = 6, .cur = 5740},
+ {.vol = YT8531_LDO_VOL_3V3, .ds = 7, .cur = 6140},
+};
+
+static u32 yt8531_get_ldo_vol(struct phy_device *phydev)
+{
+ u32 val;
+
+ val = ytphy_read_ext_with_lock(phydev, YT8521_CHIP_CONFIG_REG);
+ val = FIELD_GET(YT8531_RGMII_LDO_VOL_MASK, val);
+
+ return val <= YT8531_LDO_VOL_1V8 ? val : YT8531_LDO_VOL_1V8;
+}
+
+static int yt8531_get_ds_map(struct phy_device *phydev, u32 cur)
+{
+ u32 vol;
+ int i;
+
+ vol = yt8531_get_ldo_vol(phydev);
+ for (i = 0; i < ARRAY_SIZE(yt8531_ldo_vol); i++) {
+ if (yt8531_ldo_vol[i].vol == vol && yt8531_ldo_vol[i].cur == cur)
+ return yt8531_ldo_vol[i].ds;
+ }
+
+ return -EINVAL;
+}
+
+static int yt8531_set_ds(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ u32 ds_field_low, ds_field_hi, val;
+ int ret, ds;
+
+ /* set rgmii rx clk driver strength */
+ if (!of_property_read_u32(node, "motorcomm,rx-clk-drv-microamp", &val)) {
+ ds = yt8531_get_ds_map(phydev, val);
+ if (ds < 0)
+ return dev_err_probe(&phydev->mdio.dev, ds,
+ "No matching current value was found.\n");
+ } else {
+ ds = YT8531_RGMII_RX_DS_DEFAULT;
+ }
+
+ ret = ytphy_modify_ext_with_lock(phydev,
+ YTPHY_PAD_DRIVE_STRENGTH_REG,
+ YT8531_RGMII_RXC_DS_MASK,
+ FIELD_PREP(YT8531_RGMII_RXC_DS_MASK, ds));
+ if (ret < 0)
+ return ret;
+
+ /* set rgmii rx data driver strength */
+ if (!of_property_read_u32(node, "motorcomm,rx-data-drv-microamp", &val)) {
+ ds = yt8531_get_ds_map(phydev, val);
+ if (ds < 0)
+ return dev_err_probe(&phydev->mdio.dev, ds,
+ "No matching current value was found.\n");
+ } else {
+ ds = YT8531_RGMII_RX_DS_DEFAULT;
+ }
+
+ ds_field_hi = FIELD_GET(BIT(2), ds);
+ ds_field_hi = FIELD_PREP(YT8531_RGMII_RXD_DS_HI_MASK, ds_field_hi);
+
+ ds_field_low = FIELD_GET(GENMASK(1, 0), ds);
+ ds_field_low = FIELD_PREP(YT8531_RGMII_RXD_DS_LOW_MASK, ds_field_low);
+
+ ret = ytphy_modify_ext_with_lock(phydev,
+ YTPHY_PAD_DRIVE_STRENGTH_REG,
+ YT8531_RGMII_RXD_DS_LOW_MASK | YT8531_RGMII_RXD_DS_HI_MASK,
+ ds_field_low | ds_field_hi);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/**
* yt8521_probe() - read chip config then set suitable polling_mode
* @phydev: a pointer to a &struct phy_device
*
@@ -1518,6 +1632,10 @@ static int yt8531_config_init(struct phy_device *phydev)
return ret;
}
+ ret = yt8531_set_ds(phydev);
+ if (ret < 0)
+ return ret;
+
return 0;
}
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index 93ed07223377..8e6fd4962c48 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -108,7 +108,7 @@ EXPORT_SYMBOL_GPL(genphy_c45_pma_baset1_setup_master_slave);
*/
int genphy_c45_pma_setup_forced(struct phy_device *phydev)
{
- int ctrl1, ctrl2, ret;
+ int bt1_ctrl, ctrl1, ctrl2, ret;
/* Half duplex is not supported */
if (phydev->duplex != DUPLEX_FULL)
@@ -176,6 +176,15 @@ int genphy_c45_pma_setup_forced(struct phy_device *phydev)
ret = genphy_c45_pma_baset1_setup_master_slave(phydev);
if (ret < 0)
return ret;
+
+ bt1_ctrl = 0;
+ if (phydev->speed == SPEED_1000)
+ bt1_ctrl = MDIO_PMA_PMD_BT1_CTRL_STRAP_B1000;
+
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1_CTRL,
+ MDIO_PMA_PMD_BT1_CTRL_STRAP, bt1_ctrl);
+ if (ret < 0)
+ return ret;
}
return genphy_c45_an_disable_aneg(phydev);
@@ -873,6 +882,44 @@ int genphy_c45_an_config_eee_aneg(struct phy_device *phydev)
}
/**
+ * genphy_c45_pma_baset1_read_abilities - read supported baset1 link modes from PMA
+ * @phydev: target phy_device struct
+ *
+ * Read the supported link modes from the extended BASE-T1 ability register
+ */
+int genphy_c45_pma_baset1_read_abilities(struct phy_device *phydev)
+{
+ int val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1);
+ if (val < 0)
+ return val;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
+ phydev->supported,
+ val & MDIO_PMA_PMD_BT1_B10L_ABLE);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT1_Full_BIT,
+ phydev->supported,
+ val & MDIO_PMA_PMD_BT1_B100_ABLE);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT1_Full_BIT,
+ phydev->supported,
+ val & MDIO_PMA_PMD_BT1_B1000_ABLE);
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_STAT);
+ if (val < 0)
+ return val;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->supported,
+ val & MDIO_AN_STAT1_ABLE);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(genphy_c45_pma_baset1_read_abilities);
+
+/**
* genphy_c45_pma_read_abilities - read supported link modes from PMA
* @phydev: target phy_device struct
*
@@ -968,21 +1015,9 @@ int genphy_c45_pma_read_abilities(struct phy_device *phydev)
}
if (val & MDIO_PMA_EXTABLE_BT1) {
- val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1);
+ val = genphy_c45_pma_baset1_read_abilities(phydev);
if (val < 0)
return val;
-
- linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
- phydev->supported,
- val & MDIO_PMA_PMD_BT1_B10L_ABLE);
-
- val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_STAT);
- if (val < 0)
- return val;
-
- linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
- phydev->supported,
- val & MDIO_AN_STAT1_ABLE);
}
}
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index d0aaa5cad853..4f1c8bb199e9 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -34,6 +34,10 @@ enum {
PHYLINK_DISABLE_STOPPED,
PHYLINK_DISABLE_LINK,
PHYLINK_DISABLE_MAC_WOL,
+
+ PCS_STATE_DOWN = 0,
+ PCS_STATE_STARTING,
+ PCS_STATE_STARTED,
};
/**
@@ -72,6 +76,7 @@ struct phylink {
struct phylink_link_state phy_state;
struct work_struct resolve;
unsigned int pcs_neg_mode;
+ unsigned int pcs_state;
bool mac_link_dropped;
bool using_mac_select_pcs;
@@ -993,6 +998,40 @@ static void phylink_resolve_an_pause(struct phylink_link_state *state)
}
}
+static void phylink_pcs_pre_config(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ if (pcs && pcs->ops->pcs_pre_config)
+ pcs->ops->pcs_pre_config(pcs, interface);
+}
+
+static int phylink_pcs_post_config(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ int err = 0;
+
+ if (pcs && pcs->ops->pcs_post_config)
+ err = pcs->ops->pcs_post_config(pcs, interface);
+
+ return err;
+}
+
+static void phylink_pcs_disable(struct phylink_pcs *pcs)
+{
+ if (pcs && pcs->ops->pcs_disable)
+ pcs->ops->pcs_disable(pcs);
+}
+
+static int phylink_pcs_enable(struct phylink_pcs *pcs)
+{
+ int err = 0;
+
+ if (pcs && pcs->ops->pcs_enable)
+ err = pcs->ops->pcs_enable(pcs);
+
+ return err;
+}
+
static int phylink_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
const struct phylink_link_state *state,
bool permit_pause_to_mac)
@@ -1027,30 +1066,33 @@ static void phylink_pcs_poll_start(struct phylink *pl)
static void phylink_mac_config(struct phylink *pl,
const struct phylink_link_state *state)
{
+ struct phylink_link_state st = *state;
+
+ /* Stop drivers incorrectly using these */
+ linkmode_zero(st.lp_advertising);
+ st.speed = SPEED_UNKNOWN;
+ st.duplex = DUPLEX_UNKNOWN;
+ st.an_complete = false;
+ st.link = false;
+
phylink_dbg(pl,
- "%s: mode=%s/%s/%s/%s/%s adv=%*pb pause=%02x link=%u\n",
+ "%s: mode=%s/%s/%s adv=%*pb pause=%02x\n",
__func__, phylink_an_mode_str(pl->cur_link_an_mode),
- phy_modes(state->interface),
- phy_speed_to_str(state->speed),
- phy_duplex_to_str(state->duplex),
- phy_rate_matching_to_str(state->rate_matching),
- __ETHTOOL_LINK_MODE_MASK_NBITS, state->advertising,
- state->pause, state->link);
+ phy_modes(st.interface),
+ phy_rate_matching_to_str(st.rate_matching),
+ __ETHTOOL_LINK_MODE_MASK_NBITS, st.advertising,
+ st.pause);
- pl->mac_ops->mac_config(pl->config, pl->cur_link_an_mode, state);
+ pl->mac_ops->mac_config(pl->config, pl->cur_link_an_mode, &st);
}
-static void phylink_mac_pcs_an_restart(struct phylink *pl)
+static void phylink_pcs_an_restart(struct phylink *pl)
{
- if (linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
- pl->link_config.advertising) &&
+ if (pl->pcs && linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ pl->link_config.advertising) &&
phy_interface_mode_is_8023z(pl->link_config.interface) &&
- phylink_autoneg_inband(pl->cur_link_an_mode)) {
- if (pl->pcs)
- pl->pcs->ops->pcs_an_restart(pl->pcs);
- else if (pl->config->legacy_pre_march2020)
- pl->mac_ops->mac_an_restart(pl->config);
- }
+ phylink_autoneg_inband(pl->cur_link_an_mode))
+ pl->pcs->ops->pcs_an_restart(pl->pcs);
}
static void phylink_major_config(struct phylink *pl, bool restart,
@@ -1095,11 +1137,28 @@ static void phylink_major_config(struct phylink *pl, bool restart,
/* If we have a new PCS, switch to the new PCS after preparing the MAC
* for the change.
*/
- if (pcs_changed)
+ if (pcs_changed) {
+ phylink_pcs_disable(pl->pcs);
+
+ if (pl->pcs)
+ pl->pcs->phylink = NULL;
+
+ pcs->phylink = pl;
+
pl->pcs = pcs;
+ }
+
+ if (pl->pcs)
+ phylink_pcs_pre_config(pl->pcs, state->interface);
phylink_mac_config(pl, state);
+ if (pl->pcs)
+ phylink_pcs_post_config(pl->pcs, state->interface);
+
+ if (pl->pcs_state == PCS_STATE_STARTING || pcs_changed)
+ phylink_pcs_enable(pl->pcs);
+
neg_mode = pl->cur_link_an_mode;
if (pl->pcs && pl->pcs->neg_mode)
neg_mode = pl->pcs_neg_mode;
@@ -1113,7 +1172,7 @@ static void phylink_major_config(struct phylink *pl, bool restart,
restart = true;
if (restart)
- phylink_mac_pcs_an_restart(pl);
+ phylink_pcs_an_restart(pl);
if (pl->mac_ops->mac_finish) {
err = pl->mac_ops->mac_finish(pl->config, pl->cur_link_an_mode,
@@ -1146,13 +1205,6 @@ static int phylink_change_inband_advert(struct phylink *pl)
if (test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state))
return 0;
- if (!pl->pcs && pl->config->legacy_pre_march2020) {
- /* Legacy method */
- phylink_mac_config(pl, &pl->link_config);
- phylink_mac_pcs_an_restart(pl);
- return 0;
- }
-
phylink_dbg(pl, "%s: mode=%s/%s adv=%*pb pause=%02x\n", __func__,
phylink_an_mode_str(pl->cur_link_an_mode),
phy_modes(pl->link_config.interface),
@@ -1178,7 +1230,7 @@ static int phylink_change_inband_advert(struct phylink *pl)
return ret;
if (ret > 0)
- phylink_mac_pcs_an_restart(pl);
+ phylink_pcs_an_restart(pl);
return 0;
}
@@ -1205,9 +1257,6 @@ static void phylink_mac_pcs_get_state(struct phylink *pl,
if (pl->pcs)
pl->pcs->ops->pcs_get_state(pl->pcs, state);
- else if (pl->mac_ops->mac_pcs_get_state &&
- pl->config->legacy_pre_march2020)
- pl->mac_ops->mac_pcs_get_state(pl->config, state);
else
state->link = 0;
}
@@ -1440,13 +1489,6 @@ static void phylink_resolve(struct work_struct *w)
}
phylink_major_config(pl, false, &link_state);
pl->link_config.interface = link_state.interface;
- } else if (!pl->pcs && pl->config->legacy_pre_march2020) {
- /* The interface remains unchanged, only the speed,
- * duplex or pause settings have changed. Call the
- * old mac_config() method to configure the MAC/PCS
- * only if we do not have a legacy MAC driver.
- */
- phylink_mac_config(pl, &link_state);
}
}
@@ -1586,6 +1628,7 @@ struct phylink *phylink_create(struct phylink_config *config,
pl->link_config.pause = MLO_PAUSE_AN;
pl->link_config.speed = SPEED_UNKNOWN;
pl->link_config.duplex = DUPLEX_UNKNOWN;
+ pl->pcs_state = PCS_STATE_DOWN;
pl->mac_ops = mac_ops;
__set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
timer_setup(&pl->link_poll, phylink_fixed_poll, 0);
@@ -1939,6 +1982,14 @@ void phylink_disconnect_phy(struct phylink *pl)
}
EXPORT_SYMBOL_GPL(phylink_disconnect_phy);
+static void phylink_link_changed(struct phylink *pl, bool up, const char *what)
+{
+ if (!up)
+ pl->mac_link_dropped = true;
+ phylink_run_resolve(pl);
+ phylink_dbg(pl, "%s link %s\n", what, up ? "up" : "down");
+}
+
/**
* phylink_mac_change() - notify phylink of a change in MAC state
* @pl: a pointer to a &struct phylink returned from phylink_create()
@@ -1949,13 +2000,30 @@ EXPORT_SYMBOL_GPL(phylink_disconnect_phy);
*/
void phylink_mac_change(struct phylink *pl, bool up)
{
- if (!up)
- pl->mac_link_dropped = true;
- phylink_run_resolve(pl);
- phylink_dbg(pl, "mac link %s\n", up ? "up" : "down");
+ phylink_link_changed(pl, up, "mac");
}
EXPORT_SYMBOL_GPL(phylink_mac_change);
+/**
+ * phylink_pcs_change() - notify phylink of a change to PCS link state
+ * @pcs: pointer to &struct phylink_pcs
+ * @up: indicates whether the link is currently up.
+ *
+ * The PCS driver should call this when the state of its link changes
+ * (e.g. link failure, new negotiation results, etc.) Note: it should
+ * not determine "up" by reading the BMSR. If in doubt about the link
+ * state at interrupt time, then pass true if pcs_get_state() returns
+ * the latched link-down state, otherwise pass false.
+ */
+void phylink_pcs_change(struct phylink_pcs *pcs, bool up)
+{
+ struct phylink *pl = pcs->phylink;
+
+ if (pl)
+ phylink_link_changed(pl, up, "pcs");
+}
+EXPORT_SYMBOL_GPL(phylink_pcs_change);
+
static irqreturn_t phylink_link_handler(int irq, void *data)
{
struct phylink *pl = data;
@@ -1987,6 +2055,8 @@ void phylink_start(struct phylink *pl)
if (pl->netdev)
netif_carrier_off(pl->netdev);
+ pl->pcs_state = PCS_STATE_STARTING;
+
/* Apply the link configuration to the MAC when starting. This allows
* a fixed-link to start with the correct parameters, and also
* ensures that we set the appropriate advertisement for Serdes links.
@@ -1997,6 +2067,8 @@ void phylink_start(struct phylink *pl)
*/
phylink_mac_initial_config(pl, true);
+ pl->pcs_state = PCS_STATE_STARTED;
+
phylink_enable_and_run_resolve(pl, PHYLINK_DISABLE_STOPPED);
if (pl->cfg_link_an_mode == MLO_AN_FIXED && pl->link_gpio) {
@@ -2015,15 +2087,9 @@ void phylink_start(struct phylink *pl)
poll = true;
}
- switch (pl->cfg_link_an_mode) {
- case MLO_AN_FIXED:
+ if (pl->cfg_link_an_mode == MLO_AN_FIXED)
poll |= pl->config->poll_fixed_state;
- break;
- case MLO_AN_INBAND:
- if (pl->pcs)
- poll |= pl->pcs->poll;
- break;
- }
+
if (poll)
mod_timer(&pl->link_poll, jiffies + HZ);
if (pl->phydev)
@@ -2060,6 +2126,10 @@ void phylink_stop(struct phylink *pl)
}
phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_STOPPED);
+
+ pl->pcs_state = PCS_STATE_DOWN;
+
+ phylink_pcs_disable(pl->pcs);
}
EXPORT_SYMBOL_GPL(phylink_stop);
@@ -2449,7 +2519,7 @@ int phylink_ethtool_nway_reset(struct phylink *pl)
if (pl->phydev)
ret = phy_restart_aneg(pl->phydev);
- phylink_mac_pcs_an_restart(pl);
+ phylink_pcs_an_restart(pl);
return ret;
}
@@ -3433,7 +3503,7 @@ static void phylink_decode_usgmii_word(struct phylink_link_state *state,
*
* Parse the Clause 37 or Cisco SGMII link partner negotiation word into
* the phylink @state structure. This is suitable to be used for implementing
- * the mac_pcs_get_state() member of the struct phylink_mac_ops structure if
+ * the pcs_get_state() member of the struct phylink_pcs_ops structure if
* accessing @bmsr and @lpa cannot be done with MDIO directly.
*/
void phylink_mii_c22_pcs_decode_state(struct phylink_link_state *state,
@@ -3483,7 +3553,7 @@ EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_decode_state);
* Read the MAC PCS state from the MII device configured in @config and
* parse the Clause 37 or Cisco SGMII link partner negotiation word into
* the phylink @state structure. This is suitable to be directly plugged
- * into the mac_pcs_get_state() member of the struct phylink_mac_ops
+ * into the pcs_get_state() member of the struct phylink_pcs_ops
* structure.
*/
void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs,
@@ -3594,8 +3664,8 @@ EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_config);
* clause 37 negotiation.
*
* Restart the clause 37 negotiation with the link partner. This is
- * suitable to be directly plugged into the mac_pcs_get_state() member
- * of the struct phylink_mac_ops structure.
+ * suitable to be directly plugged into the pcs_get_state() member
+ * of the struct phylink_pcs_ops structure.
*/
void phylink_mii_c22_pcs_an_restart(struct mdio_device *pcs)
{
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 692930750215..c88edb19d2e7 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -20,6 +20,8 @@
#include <linux/of.h>
#include <linux/phy.h>
#include <linux/netdevice.h>
+#include <linux/crc16.h>
+#include <linux/etherdevice.h>
#include <linux/smscphy.h>
/* Vendor-specific PHY Definitions */
@@ -51,6 +53,7 @@ struct smsc_phy_priv {
unsigned int edpd_enable:1;
unsigned int edpd_mode_set_by_user:1;
unsigned int edpd_max_wait_ms;
+ bool wol_arp;
};
static int smsc_phy_ack_interrupt(struct phy_device *phydev)
@@ -258,6 +261,243 @@ int lan87xx_read_status(struct phy_device *phydev)
}
EXPORT_SYMBOL_GPL(lan87xx_read_status);
+static int lan874x_phy_config_init(struct phy_device *phydev)
+{
+ u16 val;
+ int rc;
+
+ /* Setup LED2/nINT/nPME pin to function as nPME. May need user option
+ * to use LED1/nINT/nPME.
+ */
+ val = MII_LAN874X_PHY_PME2_SET;
+
+ /* The bits MII_LAN874X_PHY_WOL_PFDA_FR, MII_LAN874X_PHY_WOL_WUFR,
+ * MII_LAN874X_PHY_WOL_MPR, and MII_LAN874X_PHY_WOL_BCAST_FR need to
+ * be cleared to de-assert PME signal after a WoL event happens, but
+ * using PME auto clear gets around that.
+ */
+ val |= MII_LAN874X_PHY_PME_SELF_CLEAR;
+ rc = phy_write_mmd(phydev, MDIO_MMD_PCS, MII_LAN874X_PHY_MMD_WOL_WUCSR,
+ val);
+ if (rc < 0)
+ return rc;
+
+ /* set nPME self clear delay time */
+ rc = phy_write_mmd(phydev, MDIO_MMD_PCS, MII_LAN874X_PHY_MMD_MCFGR,
+ MII_LAN874X_PHY_PME_SELF_CLEAR_DELAY);
+ if (rc < 0)
+ return rc;
+
+ return smsc_phy_config_init(phydev);
+}
+
+static void lan874x_get_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ struct smsc_phy_priv *priv = phydev->priv;
+ int rc;
+
+ wol->supported = (WAKE_UCAST | WAKE_BCAST | WAKE_MAGIC |
+ WAKE_ARP | WAKE_MCAST);
+ wol->wolopts = 0;
+
+ rc = phy_read_mmd(phydev, MDIO_MMD_PCS, MII_LAN874X_PHY_MMD_WOL_WUCSR);
+ if (rc < 0)
+ return;
+
+ if (rc & MII_LAN874X_PHY_WOL_PFDAEN)
+ wol->wolopts |= WAKE_UCAST;
+
+ if (rc & MII_LAN874X_PHY_WOL_BCSTEN)
+ wol->wolopts |= WAKE_BCAST;
+
+ if (rc & MII_LAN874X_PHY_WOL_MPEN)
+ wol->wolopts |= WAKE_MAGIC;
+
+ if (rc & MII_LAN874X_PHY_WOL_WUEN) {
+ if (priv->wol_arp)
+ wol->wolopts |= WAKE_ARP;
+ else
+ wol->wolopts |= WAKE_MCAST;
+ }
+}
+
+static u16 smsc_crc16(const u8 *buffer, size_t len)
+{
+ return bitrev16(crc16(0xFFFF, buffer, len));
+}
+
+static int lan874x_chk_wol_pattern(const u8 pattern[], const u16 *mask,
+ u8 len, u8 *data, u8 *datalen)
+{
+ size_t i, j, k;
+ int ret = 0;
+ u16 bits;
+
+ /* Pattern filtering can match up to 128 bytes of frame data. There
+ * are 8 registers to program the 16-bit masks, where each bit means
+ * the byte will be compared. The frame data will then go through a
+ * CRC16 calculation for hardware comparison. This helper function
+ * makes sure only relevant frame data are included in this
+ * calculation. It provides a warning when the masks and expected
+ * data size do not match.
+ */
+ i = 0;
+ k = 0;
+ while (len > 0) {
+ bits = *mask;
+ for (j = 0; j < 16; j++, i++, len--) {
+ /* No more pattern. */
+ if (!len) {
+ /* The rest of bitmap is not empty. */
+ if (bits)
+ ret = i + 1;
+ break;
+ }
+ if (bits & 1)
+ data[k++] = pattern[i];
+ bits >>= 1;
+ }
+ mask++;
+ }
+ *datalen = k;
+ return ret;
+}
+
+static int lan874x_set_wol_pattern(struct phy_device *phydev, u16 val,
+ const u8 data[], u8 datalen,
+ const u16 *mask, u8 masklen)
+{
+ u16 crc, reg;
+ int rc;
+
+ /* Starting pattern offset is set before calling this function. */
+ val |= MII_LAN874X_PHY_WOL_FILTER_EN;
+ rc = phy_write_mmd(phydev, MDIO_MMD_PCS,
+ MII_LAN874X_PHY_MMD_WOL_WUF_CFGA, val);
+ if (rc < 0)
+ return rc;
+
+ crc = smsc_crc16(data, datalen);
+ rc = phy_write_mmd(phydev, MDIO_MMD_PCS,
+ MII_LAN874X_PHY_MMD_WOL_WUF_CFGB, crc);
+ if (rc < 0)
+ return rc;
+
+ masklen = (masklen + 15) & ~0xf;
+ reg = MII_LAN874X_PHY_MMD_WOL_WUF_MASK7;
+ while (masklen >= 16) {
+ rc = phy_write_mmd(phydev, MDIO_MMD_PCS, reg, *mask);
+ if (rc < 0)
+ return rc;
+ reg--;
+ mask++;
+ masklen -= 16;
+ }
+
+ /* Clear out the rest of mask registers. */
+ while (reg != MII_LAN874X_PHY_MMD_WOL_WUF_MASK0) {
+ phy_write_mmd(phydev, MDIO_MMD_PCS, reg, 0);
+ reg--;
+ }
+ return rc;
+}
+
+static int lan874x_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ struct net_device *ndev = phydev->attached_dev;
+ struct smsc_phy_priv *priv = phydev->priv;
+ u16 val, val_wucsr;
+ u8 data[128];
+ u8 datalen;
+ int rc;
+
+ /* lan874x has only one WoL filter pattern */
+ if ((wol->wolopts & (WAKE_ARP | WAKE_MCAST)) ==
+ (WAKE_ARP | WAKE_MCAST)) {
+ phydev_info(phydev,
+ "lan874x WoL supports one of ARP|MCAST at a time\n");
+ return -EOPNOTSUPP;
+ }
+
+ rc = phy_read_mmd(phydev, MDIO_MMD_PCS, MII_LAN874X_PHY_MMD_WOL_WUCSR);
+ if (rc < 0)
+ return rc;
+
+ val_wucsr = rc;
+
+ if (wol->wolopts & WAKE_UCAST)
+ val_wucsr |= MII_LAN874X_PHY_WOL_PFDAEN;
+ else
+ val_wucsr &= ~MII_LAN874X_PHY_WOL_PFDAEN;
+
+ if (wol->wolopts & WAKE_BCAST)
+ val_wucsr |= MII_LAN874X_PHY_WOL_BCSTEN;
+ else
+ val_wucsr &= ~MII_LAN874X_PHY_WOL_BCSTEN;
+
+ if (wol->wolopts & WAKE_MAGIC)
+ val_wucsr |= MII_LAN874X_PHY_WOL_MPEN;
+ else
+ val_wucsr &= ~MII_LAN874X_PHY_WOL_MPEN;
+
+ /* Need to use pattern matching */
+ if (wol->wolopts & (WAKE_ARP | WAKE_MCAST))
+ val_wucsr |= MII_LAN874X_PHY_WOL_WUEN;
+ else
+ val_wucsr &= ~MII_LAN874X_PHY_WOL_WUEN;
+
+ if (wol->wolopts & WAKE_ARP) {
+ const u8 pattern[2] = { 0x08, 0x06 };
+ const u16 mask[1] = { 0x0003 };
+
+ rc = lan874x_chk_wol_pattern(pattern, mask, 2, data,
+ &datalen);
+ if (rc)
+ phydev_dbg(phydev, "pattern not valid at %d\n", rc);
+
+ /* Need to match broadcast destination address and provided
+ * data pattern at offset 12.
+ */
+ val = 12 | MII_LAN874X_PHY_WOL_FILTER_BCSTEN;
+ rc = lan874x_set_wol_pattern(phydev, val, data, datalen, mask,
+ 2);
+ if (rc < 0)
+ return rc;
+ priv->wol_arp = true;
+ }
+
+ if (wol->wolopts & WAKE_MCAST) {
+ /* Need to match multicast destination address. */
+ val = MII_LAN874X_PHY_WOL_FILTER_MCASTTEN;
+ rc = lan874x_set_wol_pattern(phydev, val, data, 0, NULL, 0);
+ if (rc < 0)
+ return rc;
+ priv->wol_arp = false;
+ }
+
+ if (wol->wolopts & (WAKE_MAGIC | WAKE_UCAST)) {
+ const u8 *mac = (const u8 *)ndev->dev_addr;
+ int i, reg;
+
+ reg = MII_LAN874X_PHY_MMD_WOL_RX_ADDRC;
+ for (i = 0; i < 6; i += 2, reg--) {
+ rc = phy_write_mmd(phydev, MDIO_MMD_PCS, reg,
+ ((mac[i + 1] << 8) | mac[i]));
+ if (rc < 0)
+ return rc;
+ }
+ }
+
+ rc = phy_write_mmd(phydev, MDIO_MMD_PCS, MII_LAN874X_PHY_MMD_WOL_WUCSR,
+ val_wucsr);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
static int smsc_get_sset_count(struct phy_device *phydev)
{
return ARRAY_SIZE(smsc_hw_stats);
@@ -533,7 +773,7 @@ static struct phy_driver smsc_phy_driver[] = {
/* basic functions */
.read_status = lan87xx_read_status,
- .config_init = smsc_phy_config_init,
+ .config_init = lan874x_phy_config_init,
.soft_reset = smsc_phy_reset,
/* IRQ related */
@@ -548,6 +788,10 @@ static struct phy_driver smsc_phy_driver[] = {
.get_tunable = smsc_phy_get_tunable,
.set_tunable = smsc_phy_set_tunable,
+ /* WoL */
+ .set_wol = lan874x_set_wol,
+ .get_wol = lan874x_get_wol,
+
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
@@ -566,7 +810,7 @@ static struct phy_driver smsc_phy_driver[] = {
/* basic functions */
.read_status = lan87xx_read_status,
- .config_init = smsc_phy_config_init,
+ .config_init = lan874x_phy_config_init,
.soft_reset = smsc_phy_reset,
/* IRQ related */
@@ -581,6 +825,10 @@ static struct phy_driver smsc_phy_driver[] = {
.get_tunable = smsc_phy_get_tunable,
.set_tunable = smsc_phy_set_tunable,
+ /* WoL */
+ .set_wol = lan874x_set_wol,
+ .get_wol = lan874x_get_wol,
+
.suspend = genphy_suspend,
.resume = genphy_resume,
} };
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 3b79c603b936..ba8b6bd8233c 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -968,7 +968,7 @@ abort:
***********************************************************************/
static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb)
{
- struct sock *sk = (struct sock *)chan->private;
+ struct sock *sk = chan->private;
return __pppoe_xmit(sk, skb);
}
@@ -976,7 +976,7 @@ static int pppoe_fill_forward_path(struct net_device_path_ctx *ctx,
struct net_device_path *path,
const struct ppp_channel *chan)
{
- struct sock *sk = (struct sock *)chan->private;
+ struct sock *sk = chan->private;
struct pppox_sock *po = pppox_sk(sk);
struct net_device *dev = po->pppoe_dev;
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 32183f24e63f..6833ef0c7930 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -129,10 +129,10 @@ static void del_chan(struct pppox_sock *sock)
spin_unlock(&chan_lock);
}
-static struct rtable *pptp_route_output(struct pppox_sock *po,
+static struct rtable *pptp_route_output(const struct pppox_sock *po,
struct flowi4 *fl4)
{
- struct sock *sk = &po->sk;
+ const struct sock *sk = &po->sk;
struct net *net;
net = sock_net(sk);
@@ -148,7 +148,7 @@ static struct rtable *pptp_route_output(struct pppox_sock *po,
static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
{
- struct sock *sk = (struct sock *) chan->private;
+ struct sock *sk = chan->private;
struct pppox_sock *po = pppox_sk(sk);
struct net *net = sock_net(sk);
struct pptp_opt *opt = &po->proto.pptp;
@@ -575,7 +575,7 @@ out:
static int pptp_ppp_ioctl(struct ppp_channel *chan, unsigned int cmd,
unsigned long arg)
{
- struct sock *sk = (struct sock *) chan->private;
+ struct sock *sk = chan->private;
struct pppox_sock *po = pppox_sk(sk);
struct pptp_opt *opt = &po->proto.pptp;
void __user *argp = (void __user *)arg;
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index c9a9373733c0..2bddcdf482a7 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -2720,6 +2720,45 @@ drop:
dev_kfree_skb(skb);
}
+static netdev_tx_t vxlan_xmit_nhid(struct sk_buff *skb, struct net_device *dev,
+ u32 nhid, __be32 vni)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_rdst nh_rdst;
+ struct nexthop *nh;
+ bool do_xmit;
+ u32 hash;
+
+ memset(&nh_rdst, 0, sizeof(struct vxlan_rdst));
+ hash = skb_get_hash(skb);
+
+ rcu_read_lock();
+ nh = nexthop_find_by_id(dev_net(dev), nhid);
+ if (unlikely(!nh || !nexthop_is_fdb(nh) || !nexthop_is_multipath(nh))) {
+ rcu_read_unlock();
+ goto drop;
+ }
+ do_xmit = vxlan_fdb_nh_path_select(nh, hash, &nh_rdst);
+ rcu_read_unlock();
+
+ if (vxlan->cfg.saddr.sa.sa_family != nh_rdst.remote_ip.sa.sa_family)
+ goto drop;
+
+ if (likely(do_xmit))
+ vxlan_xmit_one(skb, dev, vni, &nh_rdst, false);
+ else
+ goto drop;
+
+ return NETDEV_TX_OK;
+
+drop:
+ dev->stats.tx_dropped++;
+ vxlan_vnifilter_count(netdev_priv(dev), vni, NULL,
+ VXLAN_VNI_STATS_TX_DROPS, 0);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
/* Transmit local packets over Vxlan
*
* Outer IP header inherits ECN and DF from inner header.
@@ -2735,6 +2774,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
struct vxlan_fdb *f;
struct ethhdr *eth;
__be32 vni = 0;
+ u32 nhid = 0;
info = skb_tunnel_info(skb);
@@ -2744,6 +2784,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
if (info && info->mode & IP_TUNNEL_INFO_BRIDGE &&
info->mode & IP_TUNNEL_INFO_TX) {
vni = tunnel_id_to_key32(info->key.tun_id);
+ nhid = info->key.nhid;
} else {
if (info && info->mode & IP_TUNNEL_INFO_TX)
vxlan_xmit_one(skb, dev, vni, NULL, false);
@@ -2771,6 +2812,9 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
#endif
}
+ if (nhid)
+ return vxlan_xmit_nhid(skb, dev, nhid, vni);
+
if (vxlan->cfg.flags & VXLAN_F_MDB) {
struct vxlan_mdb_entry *mdb_entry;
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
index 7162bf38a8c9..cc70360364b7 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
@@ -1066,13 +1066,18 @@ static void t7xx_hw_info_init(struct cldma_ctrl *md_ctrl)
struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
u32 phy_ao_base, phy_pd_base;
- if (md_ctrl->hif_id != CLDMA_ID_MD)
- return;
-
- phy_ao_base = CLDMA1_AO_BASE;
- phy_pd_base = CLDMA1_PD_BASE;
- hw_info->phy_interrupt_id = CLDMA1_INT;
hw_info->hw_mode = MODE_BIT_64;
+
+ if (md_ctrl->hif_id == CLDMA_ID_MD) {
+ phy_ao_base = CLDMA1_AO_BASE;
+ phy_pd_base = CLDMA1_PD_BASE;
+ hw_info->phy_interrupt_id = CLDMA1_INT;
+ } else {
+ phy_ao_base = CLDMA0_AO_BASE;
+ phy_pd_base = CLDMA0_PD_BASE;
+ hw_info->phy_interrupt_id = CLDMA0_INT;
+ }
+
hw_info->ap_ao_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base,
pbase->pcie_dev_reg_trsl_addr, phy_ao_base);
hw_info->ap_pdn_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base,
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.h b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h
index 47a35e552da7..4410bac6993a 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.h
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h
@@ -34,7 +34,7 @@
/**
* enum cldma_id - Identifiers for CLDMA HW units.
* @CLDMA_ID_MD: Modem control channel.
- * @CLDMA_ID_AP: Application Processor control channel (not used at the moment).
+ * @CLDMA_ID_AP: Application Processor control channel.
* @CLDMA_NUM: Number of CLDMA HW units available.
*/
enum cldma_id {
diff --git a/drivers/net/wwan/t7xx/t7xx_mhccif.h b/drivers/net/wwan/t7xx/t7xx_mhccif.h
index 209b386bc088..20c50dce9fc3 100644
--- a/drivers/net/wwan/t7xx/t7xx_mhccif.h
+++ b/drivers/net/wwan/t7xx/t7xx_mhccif.h
@@ -25,6 +25,7 @@
D2H_INT_EXCEPTION_CLEARQ_DONE | \
D2H_INT_EXCEPTION_ALLQ_RESET | \
D2H_INT_PORT_ENUM | \
+ D2H_INT_ASYNC_AP_HK | \
D2H_INT_ASYNC_MD_HK)
void t7xx_mhccif_mask_set(struct t7xx_pci_dev *t7xx_dev, u32 val);
diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.c b/drivers/net/wwan/t7xx/t7xx_modem_ops.c
index 7d0f5e4f0a78..24e7d491468e 100644
--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.c
+++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c
@@ -44,6 +44,7 @@
#include "t7xx_state_monitor.h"
#define RT_ID_MD_PORT_ENUM 0
+#define RT_ID_AP_PORT_ENUM 1
/* Modem feature query identification code - "ICCC" */
#define MD_FEATURE_QUERY_ID 0x49434343
@@ -298,6 +299,7 @@ static void t7xx_md_exception(struct t7xx_modem *md, enum hif_ex_stage stage)
}
t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_MD], stage);
+ t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_AP], stage);
if (stage == HIF_EX_INIT)
t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_ACK);
@@ -426,7 +428,7 @@ static int t7xx_parse_host_rt_data(struct t7xx_fsm_ctl *ctl, struct t7xx_sys_inf
if (ft_spt_st != MTK_FEATURE_MUST_BE_SUPPORTED)
return -EINVAL;
- if (i == RT_ID_MD_PORT_ENUM)
+ if (i == RT_ID_MD_PORT_ENUM || i == RT_ID_AP_PORT_ENUM)
t7xx_port_enum_msg_handler(ctl->md, rt_feature->data);
}
@@ -456,12 +458,12 @@ static int t7xx_core_reset(struct t7xx_modem *md)
return 0;
}
-static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_fsm_ctl *ctl,
+static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_sys_info *core_info,
+ struct t7xx_fsm_ctl *ctl,
enum t7xx_fsm_event_state event_id,
enum t7xx_fsm_event_state err_detect)
{
struct t7xx_fsm_event *event = NULL, *event_next;
- struct t7xx_sys_info *core_info = &md->core_md;
struct device *dev = &md->t7xx_dev->pdev->dev;
unsigned long flags;
int ret;
@@ -531,19 +533,33 @@ static void t7xx_md_hk_wq(struct work_struct *work)
t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]);
t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS2);
md->core_md.handshake_ongoing = true;
- t7xx_core_hk_handler(md, ctl, FSM_EVENT_MD_HS2, FSM_EVENT_MD_HS2_EXIT);
+ t7xx_core_hk_handler(md, &md->core_md, ctl, FSM_EVENT_MD_HS2, FSM_EVENT_MD_HS2_EXIT);
+}
+
+static void t7xx_ap_hk_wq(struct work_struct *work)
+{
+ struct t7xx_modem *md = container_of(work, struct t7xx_modem, ap_handshake_work);
+ struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
+
+ /* Clear the HS2 EXIT event appended in t7xx_core_reset(). */
+ t7xx_fsm_clr_event(ctl, FSM_EVENT_AP_HS2_EXIT);
+ t7xx_cldma_stop(md->md_ctrl[CLDMA_ID_AP]);
+ t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_AP]);
+ t7xx_cldma_start(md->md_ctrl[CLDMA_ID_AP]);
+ md->core_ap.handshake_ongoing = true;
+ t7xx_core_hk_handler(md, &md->core_ap, ctl, FSM_EVENT_AP_HS2, FSM_EVENT_AP_HS2_EXIT);
}
void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id)
{
struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
- void __iomem *mhccif_base;
unsigned int int_sta;
unsigned long flags;
switch (evt_id) {
case FSM_PRE_START:
- t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM);
+ t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM | D2H_INT_ASYNC_MD_HK |
+ D2H_INT_ASYNC_AP_HK);
break;
case FSM_START:
@@ -556,16 +572,26 @@ void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id)
ctl->exp_flg = true;
md->exp_id &= ~D2H_INT_EXCEPTION_INIT;
md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
+ md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
} else if (ctl->exp_flg) {
md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
- } else if (md->exp_id & D2H_INT_ASYNC_MD_HK) {
- queue_work(md->handshake_wq, &md->handshake_work);
- md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
- mhccif_base = md->t7xx_dev->base_addr.mhccif_rc_base;
- iowrite32(D2H_INT_ASYNC_MD_HK, mhccif_base + REG_EP2RC_SW_INT_ACK);
- t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
+ md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
} else {
- t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
+ void __iomem *mhccif_base = md->t7xx_dev->base_addr.mhccif_rc_base;
+
+ if (md->exp_id & D2H_INT_ASYNC_MD_HK) {
+ queue_work(md->handshake_wq, &md->handshake_work);
+ md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
+ iowrite32(D2H_INT_ASYNC_MD_HK, mhccif_base + REG_EP2RC_SW_INT_ACK);
+ t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
+ }
+
+ if (md->exp_id & D2H_INT_ASYNC_AP_HK) {
+ queue_work(md->handshake_wq, &md->ap_handshake_work);
+ md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
+ iowrite32(D2H_INT_ASYNC_AP_HK, mhccif_base + REG_EP2RC_SW_INT_ACK);
+ t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK);
+ }
}
spin_unlock_irqrestore(&md->exp_lock, flags);
@@ -578,6 +604,7 @@ void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id)
case FSM_READY:
t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
+ t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK);
break;
default:
@@ -629,6 +656,12 @@ static struct t7xx_modem *t7xx_md_alloc(struct t7xx_pci_dev *t7xx_dev)
md->core_md.feature_set[RT_ID_MD_PORT_ENUM] &= ~FEATURE_MSK;
md->core_md.feature_set[RT_ID_MD_PORT_ENUM] |=
FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED);
+
+ INIT_WORK(&md->ap_handshake_work, t7xx_ap_hk_wq);
+ md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] &= ~FEATURE_MSK;
+ md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] |=
+ FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED);
+
return md;
}
@@ -640,6 +673,7 @@ int t7xx_md_reset(struct t7xx_pci_dev *t7xx_dev)
md->exp_id = 0;
t7xx_fsm_reset(md);
t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_MD]);
+ t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_AP]);
t7xx_port_proxy_reset(md->port_prox);
md->md_init_finish = true;
return t7xx_core_reset(md);
@@ -669,6 +703,10 @@ int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev)
if (ret)
goto err_destroy_hswq;
+ ret = t7xx_cldma_alloc(CLDMA_ID_AP, t7xx_dev);
+ if (ret)
+ goto err_destroy_hswq;
+
ret = t7xx_fsm_init(md);
if (ret)
goto err_destroy_hswq;
@@ -681,12 +719,16 @@ int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev)
if (ret)
goto err_uninit_ccmni;
- ret = t7xx_port_proxy_init(md);
+ ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_AP]);
if (ret)
goto err_uninit_md_cldma;
+ ret = t7xx_port_proxy_init(md);
+ if (ret)
+ goto err_uninit_ap_cldma;
+
ret = t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_START, 0);
- if (ret) /* fsm_uninit flushes cmd queue */
+ if (ret) /* t7xx_fsm_uninit() flushes cmd queue */
goto err_uninit_proxy;
t7xx_md_sys_sw_init(t7xx_dev);
@@ -696,6 +738,9 @@ int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev)
err_uninit_proxy:
t7xx_port_proxy_uninit(md->port_prox);
+err_uninit_ap_cldma:
+ t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]);
+
err_uninit_md_cldma:
t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
@@ -722,6 +767,7 @@ void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev)
t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
t7xx_port_proxy_uninit(md->port_prox);
+ t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]);
t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
t7xx_ccmni_exit(t7xx_dev);
t7xx_fsm_uninit(md);
diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.h b/drivers/net/wwan/t7xx/t7xx_modem_ops.h
index 7469ed636ae8..abe633cf7adc 100644
--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.h
+++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.h
@@ -66,10 +66,12 @@ struct t7xx_modem {
struct cldma_ctrl *md_ctrl[CLDMA_NUM];
struct t7xx_pci_dev *t7xx_dev;
struct t7xx_sys_info core_md;
+ struct t7xx_sys_info core_ap;
bool md_init_finish;
bool rgu_irq_asserted;
struct workqueue_struct *handshake_wq;
struct work_struct handshake_work;
+ struct work_struct ap_handshake_work;
struct t7xx_fsm_ctl *fsm_ctl;
struct port_proxy *port_prox;
unsigned int exp_id;
diff --git a/drivers/net/wwan/t7xx/t7xx_port.h b/drivers/net/wwan/t7xx/t7xx_port.h
index 8ea9079af997..4ae8a00a8532 100644
--- a/drivers/net/wwan/t7xx/t7xx_port.h
+++ b/drivers/net/wwan/t7xx/t7xx_port.h
@@ -36,9 +36,13 @@
/* Channel ID and Message ID definitions.
* The channel number consists of peer_id(15:12) , channel_id(11:0)
* peer_id:
- * 0:reserved, 1: to sAP, 2: to MD
+ * 0:reserved, 1: to AP, 2: to MD
*/
enum port_ch {
+ /* to AP */
+ PORT_CH_AP_CONTROL_RX = 0x1000,
+ PORT_CH_AP_CONTROL_TX = 0x1001,
+
/* to MD */
PORT_CH_CONTROL_RX = 0x2000,
PORT_CH_CONTROL_TX = 0x2001,
diff --git a/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c b/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c
index 68430b130a67..ae632ef96698 100644
--- a/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c
+++ b/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c
@@ -167,8 +167,12 @@ static int control_msg_handler(struct t7xx_port *port, struct sk_buff *skb)
case CTL_ID_HS2_MSG:
skb_pull(skb, sizeof(*ctrl_msg_h));
- if (port_conf->rx_ch == PORT_CH_CONTROL_RX) {
- ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2, skb->data,
+ if (port_conf->rx_ch == PORT_CH_CONTROL_RX ||
+ port_conf->rx_ch == PORT_CH_AP_CONTROL_RX) {
+ int event = port_conf->rx_ch == PORT_CH_CONTROL_RX ?
+ FSM_EVENT_MD_HS2 : FSM_EVENT_AP_HS2;
+
+ ret = t7xx_fsm_append_event(ctl, event, skb->data,
le32_to_cpu(ctrl_msg_h->data_length));
if (ret)
dev_err(port->dev, "Failed to append Handshake 2 event");
diff --git a/drivers/net/wwan/t7xx/t7xx_port_proxy.c b/drivers/net/wwan/t7xx/t7xx_port_proxy.c
index 894b1d11b2c9..274846d39fbf 100644
--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.c
+++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c
@@ -48,7 +48,7 @@
i < (proxy)->port_count; \
i++, (p) = &(proxy)->ports[i])
-static const struct t7xx_port_conf t7xx_md_port_conf[] = {
+static const struct t7xx_port_conf t7xx_port_conf[] = {
{
.tx_ch = PORT_CH_UART2_TX,
.rx_ch = PORT_CH_UART2_RX,
@@ -89,6 +89,14 @@ static const struct t7xx_port_conf t7xx_md_port_conf[] = {
.path_id = CLDMA_ID_MD,
.ops = &ctl_port_ops,
.name = "t7xx_ctrl",
+ }, {
+ .tx_ch = PORT_CH_AP_CONTROL_TX,
+ .rx_ch = PORT_CH_AP_CONTROL_RX,
+ .txq_index = Q_IDX_CTRL,
+ .rxq_index = Q_IDX_CTRL,
+ .path_id = CLDMA_ID_AP,
+ .ops = &ctl_port_ops,
+ .name = "t7xx_ap_ctrl",
},
};
@@ -428,6 +436,9 @@ static void t7xx_proxy_init_all_ports(struct t7xx_modem *md)
if (port_conf->tx_ch == PORT_CH_CONTROL_TX)
md->core_md.ctl_port = port;
+ if (port_conf->tx_ch == PORT_CH_AP_CONTROL_TX)
+ md->core_ap.ctl_port = port;
+
port->t7xx_dev = md->t7xx_dev;
port->dev = &md->t7xx_dev->pdev->dev;
spin_lock_init(&port->port_update_lock);
@@ -442,7 +453,7 @@ static void t7xx_proxy_init_all_ports(struct t7xx_modem *md)
static int t7xx_proxy_alloc(struct t7xx_modem *md)
{
- unsigned int port_count = ARRAY_SIZE(t7xx_md_port_conf);
+ unsigned int port_count = ARRAY_SIZE(t7xx_port_conf);
struct device *dev = &md->t7xx_dev->pdev->dev;
struct port_proxy *port_prox;
int i;
@@ -456,7 +467,7 @@ static int t7xx_proxy_alloc(struct t7xx_modem *md)
port_prox->dev = dev;
for (i = 0; i < port_count; i++)
- port_prox->ports[i].port_conf = &t7xx_md_port_conf[i];
+ port_prox->ports[i].port_conf = &t7xx_port_conf[i];
port_prox->port_count = port_count;
t7xx_proxy_init_all_ports(md);
@@ -481,6 +492,7 @@ int t7xx_port_proxy_init(struct t7xx_modem *md)
if (ret)
return ret;
+ t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_AP], t7xx_port_proxy_recv_skb);
t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_MD], t7xx_port_proxy_recv_skb);
return 0;
}
diff --git a/drivers/net/wwan/t7xx/t7xx_reg.h b/drivers/net/wwan/t7xx/t7xx_reg.h
index 7c1b81091a0f..c41d7d094c08 100644
--- a/drivers/net/wwan/t7xx/t7xx_reg.h
+++ b/drivers/net/wwan/t7xx/t7xx_reg.h
@@ -56,7 +56,7 @@
#define D2H_INT_RESUME_ACK BIT(12)
#define D2H_INT_SUSPEND_ACK_AP BIT(13)
#define D2H_INT_RESUME_ACK_AP BIT(14)
-#define D2H_INT_ASYNC_SAP_HK BIT(15)
+#define D2H_INT_ASYNC_AP_HK BIT(15)
#define D2H_INT_ASYNC_MD_HK BIT(16)
/* Register base */
diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.c b/drivers/net/wwan/t7xx/t7xx_state_monitor.c
index 0bcca08ff2bd..80edb8e75a6a 100644
--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.c
+++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c
@@ -285,8 +285,9 @@ static int fsm_routine_starting(struct t7xx_fsm_ctl *ctl)
t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS1);
t7xx_md_event_notify(md, FSM_START);
- wait_event_interruptible_timeout(ctl->async_hk_wq, md->core_md.ready || ctl->exp_flg,
- HZ * 60);
+ wait_event_interruptible_timeout(ctl->async_hk_wq,
+ (md->core_md.ready && md->core_ap.ready) ||
+ ctl->exp_flg, HZ * 60);
dev = &md->t7xx_dev->pdev->dev;
if (ctl->exp_flg)
@@ -299,6 +300,13 @@ static int fsm_routine_starting(struct t7xx_fsm_ctl *ctl)
fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT);
return -ETIMEDOUT;
+ } else if (!md->core_ap.ready) {
+ dev_err(dev, "AP handshake timeout\n");
+ if (md->core_ap.handshake_ongoing)
+ t7xx_fsm_append_event(ctl, FSM_EVENT_AP_HS2_EXIT, NULL, 0);
+
+ fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT);
+ return -ETIMEDOUT;
}
t7xx_pci_pm_init_late(md->t7xx_dev);
@@ -335,6 +343,7 @@ static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command
return;
}
+ t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_AP]);
t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_MD]);
fsm_finish_command(ctl, cmd, fsm_routine_starting(ctl));
}
diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.h b/drivers/net/wwan/t7xx/t7xx_state_monitor.h
index b1af0259d4c5..b6e76f3903c8 100644
--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.h
+++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.h
@@ -38,10 +38,12 @@ enum t7xx_fsm_state {
enum t7xx_fsm_event_state {
FSM_EVENT_INVALID,
FSM_EVENT_MD_HS2,
+ FSM_EVENT_AP_HS2,
FSM_EVENT_MD_EX,
FSM_EVENT_MD_EX_REC_OK,
FSM_EVENT_MD_EX_PASS,
FSM_EVENT_MD_HS2_EXIT,
+ FSM_EVENT_AP_HS2_EXIT,
FSM_EVENT_MAX
};