aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml22
-rw-r--r--Documentation/networking/device_drivers/ethernet/index.rst1
-rw-r--r--Documentation/networking/device_drivers/ethernet/mellanox/mlx5/kconfig.rst3
-rw-r--r--Documentation/networking/device_drivers/ethernet/meta/fbnic.rst29
-rw-r--r--Documentation/networking/mptcp-sysctl.rst11
-rw-r--r--Documentation/networking/timestamping.rst20
-rw-r--r--MAINTAINERS1
-rw-r--r--drivers/net/bonding/bond_main.c3
-rw-r--r--drivers/net/can/cc770/cc770_platform.c32
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-timestamp.c2
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-tx.c2
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd.h2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c4
-rw-r--r--drivers/net/ethernet/atheros/Kconfig4
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c76
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c19
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h6
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c6
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c16
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_ethtool.c5
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h6
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile2
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c46
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.h1
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink_port.c506
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink_port.h46
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c111
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.h22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c50
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c64
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c211
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.h22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sf_eth.c329
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sf_eth.h33
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.c21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h954
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.c2604
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.h307
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.c149
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c997
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.c1300
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.h361
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.c260
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.c480
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.h40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c2146
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.h834
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_internal.h59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c1216
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.h107
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.c579
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.h101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.c640
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.h151
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_prm.h514
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c780
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.h84
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c1209
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.h270
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.c493
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.h68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.h13
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_devlink.c75
-rw-r--r--drivers/net/ethernet/microchip/Kconfig5
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c123
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c646
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h4
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Kconfig1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Makefile1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c409
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h58
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c12
-rw-r--r--drivers/net/ethernet/pensando/Kconfig1
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h23
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c159
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c420
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.h4
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c12
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c4
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase_main.c6
-rw-r--r--drivers/net/ethernet/sfc/ef10.c127
-rw-r--r--drivers/net/ethernet/sfc/efx.c4
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c5
-rw-r--r--drivers/net/ethernet/sfc/nic.h2
-rw-r--r--drivers/net/ethernet/sfc/nic_common.h1
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c96
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.h12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c100
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c273
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c153
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_ethtool.c17
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c2
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.c21
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h5
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c22
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c4
-rw-r--r--drivers/net/mdio/fwnode_mdio.c3
-rw-r--r--drivers/net/phy/microchip_t1.c413
-rw-r--r--drivers/net/phy/phylink.c42
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h8
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h23
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c2
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h8
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h12
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c54
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c7
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.h5
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/libertas_tf.h3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11h.c11
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c66
-rw-r--r--drivers/net/wireless/mediatek/mt76/mcu.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/beacon.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c41
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c27
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mmio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/testmode.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h28
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c35
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c154
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c56
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c34
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mac.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/main.c36
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/init.c89
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c37
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c62
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.c4
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c10
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c5
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c12
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c38
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c13
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c13
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c30
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h14
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c11
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c12
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c508
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c107
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h118
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c180
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c93
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h67
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c26
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c49
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h6
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c7
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h7
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c21
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c138
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.h18
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c34
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.h4
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c292
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.h17
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c23
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_common.c29
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_common.h24
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c211
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h20
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt.c31
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c188
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h20
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c21
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c260
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h17
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c124
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h59
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c3
-rw-r--r--drivers/net/wireless/rsi/rsi_debugfs.h1
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.c2
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c2
-rw-r--r--drivers/ptp/ptp_ines.c4
-rw-r--r--include/linux/mlx5/mlx5_ifc.h189
-rw-r--r--include/linux/mlx5/qp.h1
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/phylink.h2
-rw-r--r--include/linux/skbuff.h1
-rw-r--r--include/linux/stmmac.h28
-rw-r--r--include/net/cfg80211.h25
-rw-r--r--include/net/mac80211.h5
-rw-r--r--include/net/mptcp.h4
-rw-r--r--include/net/xfrm.h45
-rw-r--r--include/uapi/linux/net_tstamp.h3
-rw-r--r--net/core/dev.c7
-rw-r--r--net/core/skbuff.c35
-rw-r--r--net/dsa/tag_ksz.c5
-rw-r--r--net/ethtool/common.c1
-rw-r--r--net/ethtool/phy.c2
-rw-r--r--net/hsr/hsr_device.c1
-rw-r--r--net/hsr/hsr_main.h1
-rw-r--r--net/hsr/hsr_slave.c11
-rw-r--r--net/ipv4/tcp.c9
-rw-r--r--net/ipv4/tcp_timer.c1
-rw-r--r--net/mac80211/airtime.c140
-rw-r--r--net/mac80211/cfg.c49
-rw-r--r--net/mac80211/chan.c1
-rw-r--r--net/mac80211/ieee80211_i.h8
-rw-r--r--net/mac80211/iface.c25
-rw-r--r--net/mac80211/link.c12
-rw-r--r--net/mac80211/mlme.c13
-rw-r--r--net/mac80211/pm.c2
-rw-r--r--net/mac80211/scan.c6
-rw-r--r--net/mac80211/util.c80
-rw-r--r--net/mptcp/ctrl.c133
-rw-r--r--net/mptcp/mib.c3
-rw-r--r--net/mptcp/mib.h3
-rw-r--r--net/mptcp/protocol.c18
-rw-r--r--net/mptcp/protocol.h16
-rw-r--r--net/mptcp/subflow.c4
-rw-r--r--net/sched/sch_cake.c53
-rw-r--r--net/smc/smc_pnet.c3
-rw-r--r--net/smc/smc_sysctl.c11
-rw-r--r--net/socket.c10
-rw-r--r--net/unix/af_unix.c61
-rw-r--r--net/wireless/core.h8
-rw-r--r--net/wireless/ibss.c2
-rw-r--r--net/wireless/mesh.c2
-rw-r--r--net/wireless/mlme.c20
-rw-r--r--net/wireless/nl80211.c62
-rw-r--r--net/wireless/rdev-ops.h13
-rw-r--r--net/wireless/reg.c19
-rw-r--r--net/wireless/scan.c45
-rw-r--r--net/wireless/sme.c3
-rw-r--r--net/wireless/trace.h40
-rw-r--r--net/xfrm/xfrm_device.c6
-rw-r--r--net/xfrm/xfrm_policy.c222
-rw-r--r--tools/testing/selftests/Makefile5
-rw-r--r--tools/testing/selftests/kselftest/runner.sh7
-rw-r--r--tools/testing/selftests/net/Makefile2
-rw-r--r--tools/testing/selftests/net/af_unix/msg_oob.c23
-rwxr-xr-xtools/testing/selftests/net/mptcp/diag.sh2
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect.sh17
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh3
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_lib.sh17
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_sockopt.sh1
-rwxr-xr-xtools/testing/selftests/net/mptcp/pm_netlink.sh2
-rwxr-xr-xtools/testing/selftests/net/mptcp/simult_flows.sh1
-rwxr-xr-xtools/testing/selftests/net/mptcp/userspace_pm.sh1
-rw-r--r--tools/testing/selftests/net/packetdrill/Makefile9
-rw-r--r--tools/testing/selftests/net/packetdrill/config5
-rwxr-xr-xtools/testing/selftests/net/packetdrill/defaults.sh63
-rwxr-xr-xtools/testing/selftests/net/packetdrill/ksft_runner.sh41
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_inq_client.pkt51
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_inq_server.pkt51
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_md5_md5-only-on-client-ack.pkt28
-rw-r--r--tools/testing/selftests/net/rxtimestamp.c18
-rw-r--r--tools/testing/selftests/net/txtimestamp.c6
-rwxr-xr-xtools/testing/selftests/net/xfrm_policy_add_speed.sh83
352 files changed, 25017 insertions, 3345 deletions
diff --git a/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml b/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml
index ee7a65b528cd..d1e2bca3c503 100644
--- a/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml
@@ -58,18 +58,18 @@ allOf:
- const: timing-adjustment
amlogic,tx-delay-ns:
- $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 2, 4, 6]
+ default: 2
description:
- The internal RGMII TX clock delay (provided by this driver) in
- nanoseconds. Allowed values are 0ns, 2ns, 4ns, 6ns.
- When phy-mode is set to "rgmii" then the TX delay should be
- explicitly configured. When not configured a fallback of 2ns is
- used. When the phy-mode is set to either "rgmii-id" or "rgmii-txid"
- the TX clock delay is already provided by the PHY. In that case
- this property should be set to 0ns (which disables the TX clock
- delay in the MAC to prevent the clock from going off because both
- PHY and MAC are adding a delay).
- Any configuration is ignored when the phy-mode is set to "rmii".
+ The internal RGMII TX clock delay (provided by this driver)
+ in nanoseconds. When phy-mode is set to "rgmii" then the TX
+ delay should be explicitly configured. When the phy-mode is
+ set to either "rgmii-id" or "rgmii-txid" the TX clock delay
+ is already provided by the PHY. In that case this property
+ should be set to 0ns (which disables the TX clock delay in
+ the MAC to prevent the clock from going off because both
+ PHY and MAC are adding a delay). Any configuration is
+ ignored when the phy-mode is set to "rmii".
amlogic,rx-delay-ns:
deprecated: true
diff --git a/Documentation/networking/device_drivers/ethernet/index.rst b/Documentation/networking/device_drivers/ethernet/index.rst
index 6932d8c043c2..6fc1961492b7 100644
--- a/Documentation/networking/device_drivers/ethernet/index.rst
+++ b/Documentation/networking/device_drivers/ethernet/index.rst
@@ -44,6 +44,7 @@ Contents:
marvell/octeon_ep
marvell/octeon_ep_vf
mellanox/mlx5/index
+ meta/fbnic
microsoft/netvsc
neterion/s2io
netronome/nfp
diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/kconfig.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/kconfig.rst
index 20d3b7e87049..34e911480108 100644
--- a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/kconfig.rst
+++ b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/kconfig.rst
@@ -130,6 +130,9 @@ Enabling the driver and kconfig options
| Build support for software-managed steering in the NIC.
+**CONFIG_MLX5_HW_STEERING=(y/n)**
+
+| Build support for hardware-managed steering in the NIC.
**CONFIG_MLX5_TC_CT=(y/n)**
diff --git a/Documentation/networking/device_drivers/ethernet/meta/fbnic.rst b/Documentation/networking/device_drivers/ethernet/meta/fbnic.rst
new file mode 100644
index 000000000000..32ff114f5c26
--- /dev/null
+++ b/Documentation/networking/device_drivers/ethernet/meta/fbnic.rst
@@ -0,0 +1,29 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+=====================================
+Meta Platforms Host Network Interface
+=====================================
+
+Firmware Versions
+-----------------
+
+fbnic has three components stored on the flash which are provided in one PLDM
+image:
+
+1. fw - The control firmware used to view and modify firmware settings, request
+ firmware actions, and retrieve firmware counters outside of the data path.
+ This is the firmware which fbnic_fw.c interacts with.
+2. bootloader - The firmware which validate firmware security and control basic
+ operations including loading and updating the firmware. This is also known
+ as the cmrt firmware.
+3. undi - This is the UEFI driver which is based on the Linux driver.
+
+fbnic stores two copies of these three components on flash. This allows fbnic
+to fall back to an older version of firmware automatically in case firmware
+fails to boot. Version information for both is provided as running and stored.
+The undi is only provided in stored as it is not actively running once the Linux
+driver takes over.
+
+devlink dev info provides version information for all three components. In
+addition to the version the hg commit hash of the build is included as a
+separate entry.
diff --git a/Documentation/networking/mptcp-sysctl.rst b/Documentation/networking/mptcp-sysctl.rst
index fd514bba8c43..95598c21fc8e 100644
--- a/Documentation/networking/mptcp-sysctl.rst
+++ b/Documentation/networking/mptcp-sysctl.rst
@@ -34,6 +34,17 @@ available_schedulers - STRING
Shows the available schedulers choices that are registered. More packet
schedulers may be available, but not loaded.
+blackhole_timeout - INTEGER (seconds)
+ Initial time period in second to disable MPTCP on active MPTCP sockets
+ when a MPTCP firewall blackhole issue happens. This time period will
+ grow exponentially when more blackhole issues get detected right after
+ MPTCP is re-enabled and will reset to the initial value when the
+ blackhole issue goes away.
+
+ 0 to disable the blackhole detection.
+
+ Default: 3600
+
checksum_enabled - BOOLEAN
Control whether DSS checksum can be enabled.
diff --git a/Documentation/networking/timestamping.rst b/Documentation/networking/timestamping.rst
index 5e93cd71f99f..8199e6917671 100644
--- a/Documentation/networking/timestamping.rst
+++ b/Documentation/networking/timestamping.rst
@@ -158,7 +158,8 @@ SOF_TIMESTAMPING_SYS_HARDWARE:
SOF_TIMESTAMPING_RAW_HARDWARE:
Report hardware timestamps as generated by
- SOF_TIMESTAMPING_TX_HARDWARE when available.
+ SOF_TIMESTAMPING_TX_HARDWARE or SOF_TIMESTAMPING_RX_HARDWARE
+ when available.
1.3.3 Timestamp Options
@@ -266,6 +267,23 @@ SOF_TIMESTAMPING_OPT_TX_SWHW:
two separate messages will be looped to the socket's error queue,
each containing just one timestamp.
+SOF_TIMESTAMPING_OPT_RX_FILTER:
+ Filter out spurious receive timestamps: report a receive timestamp
+ only if the matching timestamp generation flag is enabled.
+
+ Receive timestamps are generated early in the ingress path, before a
+ packet's destination socket is known. If any socket enables receive
+ timestamps, packets for all socket will receive timestamped packets.
+ Including those that request timestamp reporting with
+ SOF_TIMESTAMPING_SOFTWARE and/or SOF_TIMESTAMPING_RAW_HARDWARE, but
+ do not request receive timestamp generation. This can happen when
+ requesting transmit timestamps only.
+
+ Receiving spurious timestamps is generally benign. A process can
+ ignore the unexpected non-zero value. But it makes behavior subtly
+ dependent on other sockets. This flag isolates the socket for more
+ deterministic behavior.
+
New applications are encouraged to pass SOF_TIMESTAMPING_OPT_ID to
disambiguate timestamps and SOF_TIMESTAMPING_OPT_TSONLY to operate
regardless of the setting of sysctl net.core.tstamp_allow_data.
diff --git a/MAINTAINERS b/MAINTAINERS
index ca1469d52076..4053168fdc12 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -14833,6 +14833,7 @@ M: Alexander Duyck <alexanderduyck@fb.com>
M: Jakub Kicinski <kuba@kernel.org>
R: kernel-team@meta.com
S: Supported
+F: Documentation/networking/device_drivers/ethernet/meta/
F: drivers/net/ethernet/meta/
METHODE UDPU SUPPORT
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 47ab4ccd6fc1..b560644ee1b1 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -5886,9 +5886,6 @@ static int bond_ethtool_get_ts_info(struct net_device *bond_dev,
if (real_dev) {
ret = ethtool_get_ts_info_by_layer(real_dev, info);
} else {
- info->phc_index = -1;
- info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
/* Check if all slaves support software tx timestamping */
rcu_read_lock();
bond_for_each_slave_rcu(bond, slave, iter) {
diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c
index 13bcfba05f18..f2424fe58612 100644
--- a/drivers/net/can/cc770/cc770_platform.c
+++ b/drivers/net/can/cc770/cc770_platform.c
@@ -70,17 +70,10 @@ static void cc770_platform_write_reg(const struct cc770_priv *priv, int reg,
static int cc770_get_of_node_data(struct platform_device *pdev,
struct cc770_priv *priv)
{
+ u32 clkext = CC770_PLATFORM_CAN_CLOCK, clkout = 0;
struct device_node *np = pdev->dev.of_node;
- const u32 *prop;
- int prop_size;
- u32 clkext;
-
- prop = of_get_property(np, "bosch,external-clock-frequency",
- &prop_size);
- if (prop && (prop_size == sizeof(u32)))
- clkext = *prop;
- else
- clkext = CC770_PLATFORM_CAN_CLOCK; /* default */
+
+ of_property_read_u32(np, "bosch,external-clock-frequency", &clkext);
priv->can.clock.freq = clkext;
/* The system clock may not exceed 10 MHz */
@@ -98,7 +91,7 @@ static int cc770_get_of_node_data(struct platform_device *pdev,
if (of_property_read_bool(np, "bosch,iso-low-speed-mux"))
priv->cpu_interface |= CPUIF_MUX;
- if (!of_get_property(np, "bosch,no-comperator-bypass", NULL))
+ if (!of_property_read_bool(np, "bosch,no-comperator-bypass"))
priv->bus_config |= BUSCFG_CBY;
if (of_property_read_bool(np, "bosch,disconnect-rx0-input"))
priv->bus_config |= BUSCFG_DR0;
@@ -109,25 +102,22 @@ static int cc770_get_of_node_data(struct platform_device *pdev,
if (of_property_read_bool(np, "bosch,polarity-dominant"))
priv->bus_config |= BUSCFG_POL;
- prop = of_get_property(np, "bosch,clock-out-frequency", &prop_size);
- if (prop && (prop_size == sizeof(u32)) && *prop > 0) {
- u32 cdv = clkext / *prop;
- int slew;
+ of_property_read_u32(np, "bosch,clock-out-frequency", &clkout);
+ if (clkout > 0) {
+ u32 cdv = clkext / clkout;
if (cdv > 0 && cdv < 16) {
+ u32 slew;
+
priv->cpu_interface |= CPUIF_CEN;
priv->clkout |= (cdv - 1) & CLKOUT_CD_MASK;
- prop = of_get_property(np, "bosch,slew-rate",
- &prop_size);
- if (prop && (prop_size == sizeof(u32))) {
- slew = *prop;
- } else {
+ if (of_property_read_u32(np, "bosch,slew-rate", &slew)) {
/* Determine default slew rate */
slew = (CLKOUT_SL_MASK >>
CLKOUT_SL_SHIFT) -
((cdv * clkext - 1) / 8000000);
- if (slew < 0)
+ if (slew > (CLKOUT_SL_MASK >> CLKOUT_SL_SHIFT))
slew = 0;
}
priv->clkout |= (slew << CLKOUT_SL_SHIFT) &
diff --git a/drivers/net/can/rockchip/rockchip_canfd-timestamp.c b/drivers/net/can/rockchip/rockchip_canfd-timestamp.c
index 81cccc5fd838..fb1a8f4e6217 100644
--- a/drivers/net/can/rockchip/rockchip_canfd-timestamp.c
+++ b/drivers/net/can/rockchip/rockchip_canfd-timestamp.c
@@ -71,7 +71,7 @@ void rkcanfd_timestamp_init(struct rkcanfd_priv *priv)
max_cycles = div_u64(ULLONG_MAX, cc->mult);
max_cycles = min(max_cycles, cc->mask);
- work_delay_ns = clocksource_cyc2ns(max_cycles, cc->mult, cc->shift) / 3;
+ work_delay_ns = div_u64(clocksource_cyc2ns(max_cycles, cc->mult, cc->shift), 3);
priv->work_delay_jiffies = nsecs_to_jiffies(work_delay_ns);
INIT_DELAYED_WORK(&priv->timestamp, rkcanfd_timestamp_work);
diff --git a/drivers/net/can/rockchip/rockchip_canfd-tx.c b/drivers/net/can/rockchip/rockchip_canfd-tx.c
index f954f38b955f..865a15e033a9 100644
--- a/drivers/net/can/rockchip/rockchip_canfd-tx.c
+++ b/drivers/net/can/rockchip/rockchip_canfd-tx.c
@@ -63,7 +63,7 @@ void rkcanfd_xmit_retry(struct rkcanfd_priv *priv)
rkcanfd_start_xmit_write_cmd(priv, reg_cmd);
}
-int rkcanfd_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+netdev_tx_t rkcanfd_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct rkcanfd_priv *priv = netdev_priv(ndev);
u32 reg_frameinfo, reg_id, reg_cmd;
diff --git a/drivers/net/can/rockchip/rockchip_canfd.h b/drivers/net/can/rockchip/rockchip_canfd.h
index 3efd7f174e14..93131c7d7f54 100644
--- a/drivers/net/can/rockchip/rockchip_canfd.h
+++ b/drivers/net/can/rockchip/rockchip_canfd.h
@@ -546,7 +546,7 @@ void rkcanfd_timestamp_stop_sync(struct rkcanfd_priv *priv);
unsigned int rkcanfd_get_effective_tx_free(const struct rkcanfd_priv *priv);
void rkcanfd_xmit_retry(struct rkcanfd_priv *priv);
-int rkcanfd_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+netdev_tx_t rkcanfd_start_xmit(struct sk_buff *skb, struct net_device *ndev);
void rkcanfd_handle_tx_done_one(struct rkcanfd_priv *priv, const u32 ts,
unsigned int *frame_len_p);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 21407a26f806..5fc94c2f638e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -582,16 +582,12 @@ static int xgbe_get_ts_info(struct net_device *netdev,
struct xgbe_prv_data *pdata = netdev_priv(netdev);
ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (pdata->ptp_clock)
ts_info->phc_index = ptp_clock_index(pdata->ptp_clock);
- else
- ts_info->phc_index = -1;
ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig
index 482c58c4c584..bec5cdf8d1da 100644
--- a/drivers/net/ethernet/atheros/Kconfig
+++ b/drivers/net/ethernet/atheros/Kconfig
@@ -6,7 +6,7 @@
config NET_VENDOR_ATHEROS
bool "Atheros devices"
default y
- depends on (PCI || ATH79)
+ depends on PCI || ATH79 || COMPILE_TEST
help
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -19,7 +19,7 @@ if NET_VENDOR_ATHEROS
config AG71XX
tristate "Atheros AR7XXX/AR9XXX built-in ethernet mac support"
- depends on ATH79
+ depends on ATH79 || COMPILE_TEST
select PHYLINK
imply NET_SELFTESTS
help
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index db2a8ade6205..96a6189cc31e 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -149,11 +149,11 @@
#define FIFO_CFG4_MC BIT(8) /* Multicast Packet */
#define FIFO_CFG4_BC BIT(9) /* Broadcast Packet */
#define FIFO_CFG4_DR BIT(10) /* Dribble */
-#define FIFO_CFG4_LE BIT(11) /* Long Event */
-#define FIFO_CFG4_CF BIT(12) /* Control Frame */
-#define FIFO_CFG4_PF BIT(13) /* Pause Frame */
-#define FIFO_CFG4_UO BIT(14) /* Unsupported Opcode */
-#define FIFO_CFG4_VT BIT(15) /* VLAN tag detected */
+#define FIFO_CFG4_CF BIT(11) /* Control Frame */
+#define FIFO_CFG4_PF BIT(12) /* Pause Frame */
+#define FIFO_CFG4_UO BIT(13) /* Unsupported Opcode */
+#define FIFO_CFG4_VT BIT(14) /* VLAN tag detected */
+#define FIFO_CFG4_LE BIT(15) /* Long Event */
#define FIFO_CFG4_FT BIT(16) /* Frame Truncated */
#define FIFO_CFG4_UC BIT(17) /* Unicast Packet */
#define FIFO_CFG4_INIT (FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \
@@ -168,28 +168,28 @@
#define FIFO_CFG5_DV BIT(1) /* RX_DV Event */
#define FIFO_CFG5_FC BIT(2) /* False Carrier */
#define FIFO_CFG5_CE BIT(3) /* Code Error */
-#define FIFO_CFG5_LM BIT(4) /* Length Mismatch */
-#define FIFO_CFG5_LO BIT(5) /* Length Out of Range */
-#define FIFO_CFG5_OK BIT(6) /* Packet is OK */
-#define FIFO_CFG5_MC BIT(7) /* Multicast Packet */
-#define FIFO_CFG5_BC BIT(8) /* Broadcast Packet */
-#define FIFO_CFG5_DR BIT(9) /* Dribble */
-#define FIFO_CFG5_CF BIT(10) /* Control Frame */
-#define FIFO_CFG5_PF BIT(11) /* Pause Frame */
-#define FIFO_CFG5_UO BIT(12) /* Unsupported Opcode */
-#define FIFO_CFG5_VT BIT(13) /* VLAN tag detected */
-#define FIFO_CFG5_LE BIT(14) /* Long Event */
-#define FIFO_CFG5_FT BIT(15) /* Frame Truncated */
-#define FIFO_CFG5_16 BIT(16) /* unknown */
-#define FIFO_CFG5_17 BIT(17) /* unknown */
+#define FIFO_CFG5_CR BIT(4) /* CRC error */
+#define FIFO_CFG5_LM BIT(5) /* Length Mismatch */
+#define FIFO_CFG5_LO BIT(6) /* Length Out of Range */
+#define FIFO_CFG5_OK BIT(7) /* Packet is OK */
+#define FIFO_CFG5_MC BIT(8) /* Multicast Packet */
+#define FIFO_CFG5_BC BIT(9) /* Broadcast Packet */
+#define FIFO_CFG5_DR BIT(10) /* Dribble */
+#define FIFO_CFG5_CF BIT(11) /* Control Frame */
+#define FIFO_CFG5_PF BIT(12) /* Pause Frame */
+#define FIFO_CFG5_UO BIT(13) /* Unsupported Opcode */
+#define FIFO_CFG5_VT BIT(14) /* VLAN tag detected */
+#define FIFO_CFG5_LE BIT(15) /* Long Event */
+#define FIFO_CFG5_FT BIT(16) /* Frame Truncated */
+#define FIFO_CFG5_UC BIT(17) /* Unicast Packet */
#define FIFO_CFG5_SF BIT(18) /* Short Frame */
#define FIFO_CFG5_BM BIT(19) /* Byte Mode */
#define FIFO_CFG5_INIT (FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \
- FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \
- FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \
- FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \
- FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \
- FIFO_CFG5_17 | FIFO_CFG5_SF)
+ FIFO_CFG5_CE | FIFO_CFG5_LM | FIFO_CFG5_LO | \
+ FIFO_CFG5_OK | FIFO_CFG5_MC | FIFO_CFG5_BC | \
+ FIFO_CFG5_DR | FIFO_CFG5_CF | FIFO_CFG5_UO | \
+ FIFO_CFG5_VT | FIFO_CFG5_LE | FIFO_CFG5_FT | \
+ FIFO_CFG5_UC | FIFO_CFG5_SF)
#define AG71XX_REG_TX_CTRL 0x0180
#define TX_CTRL_TXE BIT(0) /* Tx Enable */
@@ -379,7 +379,6 @@ struct ag71xx {
u32 fifodata[3];
int mac_idx;
- struct reset_control *mdio_reset;
struct clk *clk_mdio;
};
@@ -509,8 +508,7 @@ static void ag71xx_ethtool_get_strings(struct net_device *netdev, u32 sset,
switch (sset) {
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(ag71xx_statistics); i++)
- memcpy(data + i * ETH_GSTRING_LEN,
- ag71xx_statistics[i].name, ETH_GSTRING_LEN);
+ ethtool_puts(&data, ag71xx_statistics[i].name);
break;
case ETH_SS_TEST:
net_selftest_get_strings(data);
@@ -690,6 +688,7 @@ static int ag71xx_mdio_probe(struct ag71xx *ag)
{
struct device *dev = &ag->pdev->dev;
struct net_device *ndev = ag->ndev;
+ struct reset_control *mdio_reset;
static struct mii_bus *mii_bus;
struct device_node *np, *mnp;
int err;
@@ -706,10 +705,10 @@ static int ag71xx_mdio_probe(struct ag71xx *ag)
if (!mii_bus)
return -ENOMEM;
- ag->mdio_reset = of_reset_control_get_exclusive(np, "mdio");
- if (IS_ERR(ag->mdio_reset)) {
+ mdio_reset = devm_reset_control_get_exclusive(dev, "mdio");
+ if (IS_ERR(mdio_reset)) {
netif_err(ag, probe, ndev, "Failed to get reset mdio.\n");
- return PTR_ERR(ag->mdio_reset);
+ return PTR_ERR(mdio_reset);
}
mii_bus->name = "ag71xx_mdio";
@@ -720,12 +719,10 @@ static int ag71xx_mdio_probe(struct ag71xx *ag)
mii_bus->parent = dev;
snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s.%d", np->name, ag->mac_idx);
- if (!IS_ERR(ag->mdio_reset)) {
- reset_control_assert(ag->mdio_reset);
- msleep(100);
- reset_control_deassert(ag->mdio_reset);
- msleep(200);
- }
+ reset_control_assert(mdio_reset);
+ msleep(100);
+ reset_control_deassert(mdio_reset);
+ msleep(200);
mnp = of_get_child_by_name(np, "mdio");
err = devm_of_mdiobus_register(dev, mii_bus, mnp);
@@ -1853,6 +1850,12 @@ static int ag71xx_probe(struct platform_device *pdev)
if (!ag->mac_base)
return -ENOMEM;
+ /* ensure that HW is in manual polling mode before interrupts are
+ * activated. Otherwise ag71xx_interrupt might call napi_schedule
+ * before it is initialized by netif_napi_add.
+ */
+ ag71xx_int_disable(ag, AG71XX_INT_POLL);
+
ndev->irq = platform_get_irq(pdev, 0);
err = devm_request_irq(&pdev->dev, ndev->irq, ag71xx_interrupt,
0x0, dev_name(&pdev->dev), ndev);
@@ -2033,4 +2036,5 @@ static struct platform_driver ag71xx_driver = {
};
module_platform_driver(ag71xx_driver);
+MODULE_DESCRIPTION("Atheros AR71xx built-in ethernet mac driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index c9248ed9330c..6e422e24750a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -13803,6 +13803,7 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp;
struct bnxt_hw_rings hwr = {0};
int rx_rings = rx;
+ int rc;
if (tcs)
tx_sets = tcs;
@@ -13835,7 +13836,23 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
}
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
hwr.cp_p5 = hwr.tx + rx;
- return bnxt_hwrm_check_rings(bp, &hwr);
+ rc = bnxt_hwrm_check_rings(bp, &hwr);
+ if (!rc && pci_msix_can_alloc_dyn(bp->pdev)) {
+ if (!bnxt_ulp_registered(bp->edev)) {
+ hwr.cp += bnxt_get_ulp_msix_num(bp);
+ hwr.cp = min_t(int, hwr.cp, bnxt_get_max_func_irqs(bp));
+ }
+ if (hwr.cp > bp->total_irqs) {
+ int total_msix = bnxt_change_msix(bp, hwr.cp);
+
+ if (total_msix < hwr.cp) {
+ netdev_warn(bp->dev, "Unable to allocate %d MSIX vectors, maximum available %d\n",
+ hwr.cp, total_msix);
+ rc = -ENOSPC;
+ }
+ }
+ }
+ return rc;
}
static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 3b805ed433ed..69231e85140b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1217,12 +1217,15 @@ struct bnxt_napi {
bool in_reset;
};
+/* "TxRx", 2 hypens, plus maximum integer */
+#define BNXT_IRQ_NAME_EXTRA 17
+
struct bnxt_irq {
irq_handler_t handler;
unsigned int vector;
u8 requested:1;
u8 have_cpumask:1;
- char name[IFNAMSIZ + 2];
+ char name[IFNAMSIZ + BNXT_IRQ_NAME_EXTRA];
cpumask_var_t cpu_mask;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 7392a716f28d..f71cc8188b4e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -955,11 +955,6 @@ static int bnxt_set_channels(struct net_device *dev,
}
tx_xdp = req_rx_rings;
}
- rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp);
- if (rc) {
- netdev_warn(dev, "Unable to allocate the requested rings\n");
- return rc;
- }
if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) !=
bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) &&
@@ -968,6 +963,12 @@ static int bnxt_set_channels(struct net_device *dev,
return -EINVAL;
}
+ rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp);
+ if (rc) {
+ netdev_warn(dev, "Unable to allocate the requested rings\n");
+ return rc;
+ }
+
if (netif_running(dev)) {
if (BNXT_PF(bp)) {
/* TODO CHIMP_FW: Send message to all VF's
@@ -5043,11 +5044,8 @@ static int bnxt_get_ts_info(struct net_device *dev,
struct bnxt_ptp_cfg *ptp;
ptp = bp->ptp_cfg;
- info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
- info->phc_index = -1;
if (!ptp)
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index b9e7d3e7b15d..fdd6356f21ef 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -176,11 +176,17 @@ EXPORT_SYMBOL(bnxt_unregister_dev);
static int bnxt_set_dflt_ulp_msix(struct bnxt *bp)
{
- u32 roce_msix = BNXT_VF(bp) ?
- BNXT_MAX_VF_ROCE_MSIX : BNXT_MAX_ROCE_MSIX;
+ int roce_msix = BNXT_MAX_ROCE_MSIX;
- return ((bp->flags & BNXT_FLAG_ROCE_CAP) ?
- min_t(u32, roce_msix, num_online_cpus()) : 0);
+ if (BNXT_VF(bp))
+ roce_msix = BNXT_MAX_ROCE_MSIX_VF;
+ else if (bp->port_partition_type)
+ roce_msix = BNXT_MAX_ROCE_MSIX_NPAR_PF;
+
+ /* NQ MSIX vectors should match the number of CPUs plus 1 more for
+ * the CREQ MSIX, up to the default.
+ */
+ return min_t(int, roce_msix, num_online_cpus() + 1);
}
int bnxt_send_msg(struct bnxt_en_dev *edev,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index 4eafe6ec0abf..4f4914f5c84c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -15,8 +15,10 @@
#define BNXT_MIN_ROCE_CP_RINGS 2
#define BNXT_MIN_ROCE_STAT_CTXS 1
-#define BNXT_MAX_ROCE_MSIX 9
-#define BNXT_MAX_VF_ROCE_MSIX 2
+
+#define BNXT_MAX_ROCE_MSIX_VF 2
+#define BNXT_MAX_ROCE_MSIX_NPAR_PF 5
+#define BNXT_MAX_ROCE_MSIX 64
struct hwrm_async_event_cmpl;
struct bnxt;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 0ec5f01551f9..378815917741 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6145,9 +6145,7 @@ static int tg3_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info
{
struct tg3 *tp = netdev_priv(dev);
- info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
if (tg3_flag(tp, PTP_CAPABLE)) {
info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
@@ -6157,8 +6155,6 @@ static int tg3_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info
if (tp->ptp_clock)
info->phc_index = ptp_clock_index(tp->ptp_clock);
- else
- info->phc_index = -1;
info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 8e1e4b2b2386..f06babec04a0 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -3410,8 +3410,6 @@ static int gem_get_ts_info(struct net_device *dev,
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
@@ -3423,7 +3421,8 @@ static int gem_get_ts_info(struct net_device *dev,
(1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_ALL);
- info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1;
+ if (bp->ptp_clock)
+ info->phc_index = ptp_clock_index(bp->ptp_clock);
return 0;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 5835965dbc32..c849e2c871a9 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -2496,37 +2496,31 @@ ret_intrmod:
return ret;
}
+#ifdef PTP_HARDWARE_TIMESTAMPING
static int lio_get_ts_info(struct net_device *netdev,
struct kernel_ethtool_ts_info *info)
{
struct lio *lio = GET_LIO(netdev);
info->so_timestamping =
-#ifdef PTP_HARDWARE_TIMESTAMPING
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE |
- SOF_TIMESTAMPING_TX_SOFTWARE |
-#endif
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ SOF_TIMESTAMPING_TX_SOFTWARE;
if (lio->ptp_clock)
info->phc_index = ptp_clock_index(lio->ptp_clock);
- else
- info->phc_index = -1;
-#ifdef PTP_HARDWARE_TIMESTAMPING
info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
-#endif
return 0;
}
+#endif
/* Return register dump len. */
static int lio_get_regs_len(struct net_device *dev)
@@ -3146,7 +3140,9 @@ static const struct ethtool_ops lio_ethtool_ops = {
.set_coalesce = lio_set_intr_coalesce,
.get_priv_flags = lio_get_priv_flags,
.set_priv_flags = lio_set_priv_flags,
+#ifdef PTP_HARDWARE_TIMESTAMPING
.get_ts_info = lio_get_ts_info,
+#endif
};
static const struct ethtool_ops lio_vf_ethtool_ops = {
@@ -3169,7 +3165,9 @@ static const struct ethtool_ops lio_vf_ethtool_ops = {
.set_coalesce = lio_set_intr_coalesce,
.get_priv_flags = lio_get_priv_flags,
.set_priv_flags = lio_set_priv_flags,
+#ifdef PTP_HARDWARE_TIMESTAMPING
.get_ts_info = lio_get_ts_info,
+#endif
};
void liquidio_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index 6a04d2530176..d0ff0c170b1a 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -844,8 +844,6 @@ static int nicvf_get_ts_info(struct net_device *netdev,
return ethtool_op_get_ts_info(netdev, info);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index f2f1055880b2..31685ee304c6 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -601,9 +601,7 @@ static int enic_set_rxfh(struct net_device *netdev,
static int enic_get_ts_info(struct net_device *netdev,
struct kernel_ethtool_ts_info *info)
{
- info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
return 0;
}
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 2baef59f741d..ecb1703ea150 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -754,6 +754,8 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
err = of_get_ethdev_address(np, dev);
+ if (err == -EPROBE_DEFER)
+ goto err_grp_init;
if (err) {
eth_hw_addr_random(dev);
dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
index 7f081e6e8c87..ba83dbf4ed22 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
@@ -1042,12 +1042,9 @@ static int fun_set_rxfh(struct net_device *netdev,
static int fun_get_ts_info(struct net_device *netdev,
struct kernel_ethtool_ts_info *info)
{
- info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
+ info->so_timestamping = SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
- info->phc_index = -1;
info->tx_types = BIT(HWTSTAMP_TX_OFF);
info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
return 0;
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index a19d098f2e2b..6ace55837172 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -418,8 +418,8 @@ do_retry:
static void emac_hash_mc(struct emac_instance *dev)
{
+ u32 __iomem *gaht_base = emac_gaht_base(dev);
const int regs = EMAC_XAHT_REGS(dev);
- u32 *gaht_base = emac_gaht_base(dev);
u32 gaht_temp[EMAC_XAHT_MAX_REGS];
struct netdev_hw_addr *ha;
int i;
diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h
index 295516b07662..d8664bd65e1f 100644
--- a/drivers/net/ethernet/ibm/emac/core.h
+++ b/drivers/net/ethernet/ibm/emac/core.h
@@ -400,7 +400,7 @@ static inline int emac_has_feature(struct emac_instance *dev,
((u32)(1 << (EMAC_XAHT_WIDTH(dev) - 1)) >> \
((slot) & (u32)(EMAC_XAHT_WIDTH(dev) - 1)))
-static inline u32 *emac_xaht_base(struct emac_instance *dev)
+static inline u32 __iomem *emac_xaht_base(struct emac_instance *dev)
{
struct emac_regs __iomem *p = dev->emacp;
int offset;
@@ -413,10 +413,10 @@ static inline u32 *emac_xaht_base(struct emac_instance *dev)
else
offset = offsetof(struct emac_regs, u0.emac4.iaht1);
- return (u32 *)((ptrdiff_t)p + offset);
+ return (u32 __iomem *)((__force ptrdiff_t)p + offset);
}
-static inline u32 *emac_gaht_base(struct emac_instance *dev)
+static inline u32 __iomem *emac_gaht_base(struct emac_instance *dev)
{
/* GAHT registers always come after an identical number of
* IAHT registers.
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index b4f6fa4ba13d..3307d551f431 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -33,6 +33,8 @@ ice-y := ice_main.o \
ice_idc.o \
devlink/devlink.o \
devlink/devlink_port.o \
+ ice_sf_eth.o \
+ ice_sf_vsi_vlan_ops.o \
ice_ddp.o \
ice_fw_update.o \
ice_lag.o \
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c
index 810a901d7afd..415445cefdb2 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c
@@ -6,9 +6,11 @@
#include "ice.h"
#include "ice_lib.h"
#include "devlink.h"
+#include "devlink_port.h"
#include "ice_eswitch.h"
#include "ice_fw_update.h"
#include "ice_dcb_lib.h"
+#include "ice_sf_eth.h"
/* context for devlink info version reporting */
struct ice_info_ctx {
@@ -744,6 +746,7 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node
struct ice_sched_node *tc_node, struct ice_pf *pf)
{
struct devlink_rate *rate_node = NULL;
+ struct ice_dynamic_port *sf;
struct ice_vf *vf;
int i;
@@ -755,6 +758,7 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node
/* create root node */
rate_node = devl_rate_node_create(devlink, node, node->name, NULL);
} else if (node->vsi_handle &&
+ pf->vsi[node->vsi_handle]->type == ICE_VSI_VF &&
pf->vsi[node->vsi_handle]->vf) {
vf = pf->vsi[node->vsi_handle]->vf;
if (!vf->devlink_port.devlink_rate)
@@ -763,6 +767,16 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node
*/
devl_rate_leaf_create(&vf->devlink_port, node,
node->parent->rate_node);
+ } else if (node->vsi_handle &&
+ pf->vsi[node->vsi_handle]->type == ICE_VSI_SF &&
+ pf->vsi[node->vsi_handle]->sf) {
+ sf = pf->vsi[node->vsi_handle]->sf;
+ if (!sf->devlink_port.devlink_rate)
+ /* leaf nodes doesn't have children
+ * so we don't set rate_node
+ */
+ devl_rate_leaf_create(&sf->devlink_port, node,
+ node->parent->rate_node);
} else if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF &&
node->parent->rate_node) {
rate_node = devl_rate_node_create(devlink, node, node->name,
@@ -1277,8 +1291,12 @@ static const struct devlink_ops ice_devlink_ops = {
.rate_leaf_parent_set = ice_devlink_set_parent,
.rate_node_parent_set = ice_devlink_set_parent,
+
+ .port_new = ice_devlink_port_new,
};
+static const struct devlink_ops ice_sf_devlink_ops;
+
static int
ice_devlink_enable_roce_get(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
@@ -1562,6 +1580,34 @@ struct ice_pf *ice_allocate_pf(struct device *dev)
}
/**
+ * ice_allocate_sf - Allocate devlink and return SF structure pointer
+ * @dev: the device to allocate for
+ * @pf: pointer to the PF structure
+ *
+ * Allocate a devlink instance for SF.
+ *
+ * Return: ice_sf_priv pointer to allocated memory or ERR_PTR in case of error
+ */
+struct ice_sf_priv *ice_allocate_sf(struct device *dev, struct ice_pf *pf)
+{
+ struct devlink *devlink;
+ int err;
+
+ devlink = devlink_alloc(&ice_sf_devlink_ops, sizeof(struct ice_sf_priv),
+ dev);
+ if (!devlink)
+ return ERR_PTR(-ENOMEM);
+
+ err = devl_nested_devlink_set(priv_to_devlink(pf), devlink);
+ if (err) {
+ devlink_free(devlink);
+ return ERR_PTR(err);
+ }
+
+ return devlink_priv(devlink);
+}
+
+/**
* ice_devlink_register - Register devlink interface for this PF
* @pf: the PF to register the devlink for.
*
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.h b/drivers/net/ethernet/intel/ice/devlink/devlink.h
index d291c0e2e17b..1af3b0763fbb 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink.h
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.h
@@ -5,6 +5,7 @@
#define _ICE_DEVLINK_H_
struct ice_pf *ice_allocate_pf(struct device *dev);
+struct ice_sf_priv *ice_allocate_sf(struct device *dev, struct ice_pf *pf);
void ice_devlink_register(struct ice_pf *pf);
void ice_devlink_unregister(struct ice_pf *pf);
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c b/drivers/net/ethernet/intel/ice/devlink/devlink_port.c
index 62ef8e2fb5f1..928c8bdb6649 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink_port.c
@@ -5,6 +5,9 @@
#include "ice.h"
#include "devlink.h"
+#include "devlink_port.h"
+#include "ice_lib.h"
+#include "ice_fltr.h"
static int ice_active_port_option = -1;
@@ -485,3 +488,506 @@ void ice_devlink_destroy_vf_port(struct ice_vf *vf)
devl_rate_leaf_destroy(&vf->devlink_port);
devl_port_unregister(&vf->devlink_port);
}
+
+/**
+ * ice_devlink_create_sf_dev_port - Register virtual port for a subfunction
+ * @sf_dev: the subfunction device to create a devlink port for
+ *
+ * Register virtual flavour devlink port for the subfunction auxiliary device
+ * created after activating a dynamically added devlink port.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_devlink_create_sf_dev_port(struct ice_sf_dev *sf_dev)
+{
+ struct devlink_port_attrs attrs = {};
+ struct ice_dynamic_port *dyn_port;
+ struct devlink_port *devlink_port;
+ struct devlink *devlink;
+ struct ice_vsi *vsi;
+
+ dyn_port = sf_dev->dyn_port;
+ vsi = dyn_port->vsi;
+
+ devlink_port = &sf_dev->priv->devlink_port;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_VIRTUAL;
+
+ devlink_port_attrs_set(devlink_port, &attrs);
+ devlink = priv_to_devlink(sf_dev->priv);
+
+ return devl_port_register(devlink, devlink_port, vsi->idx);
+}
+
+/**
+ * ice_devlink_destroy_sf_dev_port - Destroy virtual port for a subfunction
+ * @sf_dev: the subfunction device to create a devlink port for
+ *
+ * Unregisters the virtual port associated with this subfunction.
+ */
+void ice_devlink_destroy_sf_dev_port(struct ice_sf_dev *sf_dev)
+{
+ devl_port_unregister(&sf_dev->priv->devlink_port);
+}
+
+/**
+ * ice_activate_dynamic_port - Activate a dynamic port
+ * @dyn_port: dynamic port instance to activate
+ * @extack: extack for reporting error messages
+ *
+ * Activate the dynamic port based on its flavour.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_activate_dynamic_port(struct ice_dynamic_port *dyn_port,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ if (dyn_port->active)
+ return 0;
+
+ err = ice_sf_eth_activate(dyn_port, extack);
+ if (err)
+ return err;
+
+ dyn_port->active = true;
+
+ return 0;
+}
+
+/**
+ * ice_deactivate_dynamic_port - Deactivate a dynamic port
+ * @dyn_port: dynamic port instance to deactivate
+ *
+ * Undo activation of a dynamic port.
+ */
+static void ice_deactivate_dynamic_port(struct ice_dynamic_port *dyn_port)
+{
+ if (!dyn_port->active)
+ return;
+
+ ice_sf_eth_deactivate(dyn_port);
+ dyn_port->active = false;
+}
+
+/**
+ * ice_dealloc_dynamic_port - Deallocate and remove a dynamic port
+ * @dyn_port: dynamic port instance to deallocate
+ *
+ * Free resources associated with a dynamically added devlink port. Will
+ * deactivate the port if its currently active.
+ */
+static void ice_dealloc_dynamic_port(struct ice_dynamic_port *dyn_port)
+{
+ struct devlink_port *devlink_port = &dyn_port->devlink_port;
+ struct ice_pf *pf = dyn_port->pf;
+
+ ice_deactivate_dynamic_port(dyn_port);
+
+ xa_erase(&pf->sf_nums, devlink_port->attrs.pci_sf.sf);
+ ice_eswitch_detach_sf(pf, dyn_port);
+ ice_vsi_free(dyn_port->vsi);
+ xa_erase(&pf->dyn_ports, dyn_port->vsi->idx);
+ kfree(dyn_port);
+}
+
+/**
+ * ice_dealloc_all_dynamic_ports - Deallocate all dynamic devlink ports
+ * @pf: pointer to the pf structure
+ */
+void ice_dealloc_all_dynamic_ports(struct ice_pf *pf)
+{
+ struct ice_dynamic_port *dyn_port;
+ unsigned long index;
+
+ xa_for_each(&pf->dyn_ports, index, dyn_port)
+ ice_dealloc_dynamic_port(dyn_port);
+}
+
+/**
+ * ice_devlink_port_new_check_attr - Check that new port attributes are valid
+ * @pf: pointer to the PF structure
+ * @new_attr: the attributes for the new port
+ * @extack: extack for reporting error messages
+ *
+ * Check that the attributes for the new port are valid before continuing to
+ * allocate the devlink port.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_new_check_attr(struct ice_pf *pf,
+ const struct devlink_port_new_attrs *new_attr,
+ struct netlink_ext_ack *extack)
+{
+ if (new_attr->flavour != DEVLINK_PORT_FLAVOUR_PCI_SF) {
+ NL_SET_ERR_MSG_MOD(extack, "Flavour other than pcisf is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (new_attr->controller_valid) {
+ NL_SET_ERR_MSG_MOD(extack, "Setting controller is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (new_attr->port_index_valid) {
+ NL_SET_ERR_MSG_MOD(extack, "Driver does not support user defined port index assignment");
+ return -EOPNOTSUPP;
+ }
+
+ if (new_attr->pfnum != pf->hw.pf_id) {
+ NL_SET_ERR_MSG_MOD(extack, "Incorrect pfnum supplied");
+ return -EINVAL;
+ }
+
+ if (!pci_msix_can_alloc_dyn(pf->pdev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Dynamic MSIX-X interrupt allocation is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_del - devlink handler for port delete
+ * @devlink: pointer to devlink
+ * @port: devlink port to be deleted
+ * @extack: pointer to extack
+ *
+ * Deletes devlink port and deallocates all resources associated with
+ * created subfunction.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_del(struct devlink *devlink, struct devlink_port *port,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dynamic_port *dyn_port;
+
+ dyn_port = ice_devlink_port_to_dyn(port);
+ ice_dealloc_dynamic_port(dyn_port);
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_fn_hw_addr_set - devlink handler for mac address set
+ * @port: pointer to devlink port
+ * @hw_addr: hw address to set
+ * @hw_addr_len: hw address length
+ * @extack: extack for reporting error messages
+ *
+ * Sets mac address for the port, verifies arguments and copies address
+ * to the subfunction structure.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_fn_hw_addr_set(struct devlink_port *port, const u8 *hw_addr,
+ int hw_addr_len,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dynamic_port *dyn_port;
+
+ dyn_port = ice_devlink_port_to_dyn(port);
+
+ if (dyn_port->attached) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Ethernet address can be change only in detached state");
+ return -EBUSY;
+ }
+
+ if (hw_addr_len != ETH_ALEN || !is_valid_ether_addr(hw_addr)) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid ethernet address");
+ return -EADDRNOTAVAIL;
+ }
+
+ ether_addr_copy(dyn_port->hw_addr, hw_addr);
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_fn_hw_addr_get - devlink handler for mac address get
+ * @port: pointer to devlink port
+ * @hw_addr: hw address to set
+ * @hw_addr_len: hw address length
+ * @extack: extack for reporting error messages
+ *
+ * Returns mac address for the port.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_fn_hw_addr_get(struct devlink_port *port, u8 *hw_addr,
+ int *hw_addr_len,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dynamic_port *dyn_port;
+
+ dyn_port = ice_devlink_port_to_dyn(port);
+
+ ether_addr_copy(hw_addr, dyn_port->hw_addr);
+ *hw_addr_len = ETH_ALEN;
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_fn_state_set - devlink handler for port state set
+ * @port: pointer to devlink port
+ * @state: state to set
+ * @extack: extack for reporting error messages
+ *
+ * Activates or deactivates the port.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_fn_state_set(struct devlink_port *port,
+ enum devlink_port_fn_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dynamic_port *dyn_port;
+
+ dyn_port = ice_devlink_port_to_dyn(port);
+
+ switch (state) {
+ case DEVLINK_PORT_FN_STATE_ACTIVE:
+ return ice_activate_dynamic_port(dyn_port, extack);
+
+ case DEVLINK_PORT_FN_STATE_INACTIVE:
+ ice_deactivate_dynamic_port(dyn_port);
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_fn_state_get - devlink handler for port state get
+ * @port: pointer to devlink port
+ * @state: admin configured state of the port
+ * @opstate: current port operational state
+ * @extack: extack for reporting error messages
+ *
+ * Gets port state.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_fn_state_get(struct devlink_port *port,
+ enum devlink_port_fn_state *state,
+ enum devlink_port_fn_opstate *opstate,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dynamic_port *dyn_port;
+
+ dyn_port = ice_devlink_port_to_dyn(port);
+
+ if (dyn_port->active)
+ *state = DEVLINK_PORT_FN_STATE_ACTIVE;
+ else
+ *state = DEVLINK_PORT_FN_STATE_INACTIVE;
+
+ if (dyn_port->attached)
+ *opstate = DEVLINK_PORT_FN_OPSTATE_ATTACHED;
+ else
+ *opstate = DEVLINK_PORT_FN_OPSTATE_DETACHED;
+
+ return 0;
+}
+
+static const struct devlink_port_ops ice_devlink_port_sf_ops = {
+ .port_del = ice_devlink_port_del,
+ .port_fn_hw_addr_get = ice_devlink_port_fn_hw_addr_get,
+ .port_fn_hw_addr_set = ice_devlink_port_fn_hw_addr_set,
+ .port_fn_state_get = ice_devlink_port_fn_state_get,
+ .port_fn_state_set = ice_devlink_port_fn_state_set,
+};
+
+/**
+ * ice_reserve_sf_num - Reserve a subfunction number for this port
+ * @pf: pointer to the pf structure
+ * @new_attr: devlink port attributes requested
+ * @extack: extack for reporting error messages
+ * @sfnum: on success, the sf number reserved
+ *
+ * Reserve a subfunction number for this port. Only called for
+ * DEVLINK_PORT_FLAVOUR_PCI_SF ports.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_reserve_sf_num(struct ice_pf *pf,
+ const struct devlink_port_new_attrs *new_attr,
+ struct netlink_ext_ack *extack, u32 *sfnum)
+{
+ int err;
+
+ /* If user didn't request an explicit number, pick one */
+ if (!new_attr->sfnum_valid)
+ return xa_alloc(&pf->sf_nums, sfnum, NULL, xa_limit_32b,
+ GFP_KERNEL);
+
+ /* Otherwise, check and use the number provided */
+ err = xa_insert(&pf->sf_nums, new_attr->sfnum, NULL, GFP_KERNEL);
+ if (err) {
+ if (err == -EBUSY)
+ NL_SET_ERR_MSG_MOD(extack, "Subfunction with given sfnum already exists");
+ return err;
+ }
+
+ *sfnum = new_attr->sfnum;
+
+ return 0;
+}
+
+/**
+ * ice_devlink_create_sf_port - Register PCI subfunction devlink port
+ * @dyn_port: the dynamic port instance structure for this subfunction
+ *
+ * Register PCI subfunction flavour devlink port for a dynamically added
+ * subfunction port.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_devlink_create_sf_port(struct ice_dynamic_port *dyn_port)
+{
+ struct devlink_port_attrs attrs = {};
+ struct devlink_port *devlink_port;
+ struct devlink *devlink;
+ struct ice_vsi *vsi;
+ struct ice_pf *pf;
+
+ vsi = dyn_port->vsi;
+ pf = dyn_port->pf;
+
+ devlink_port = &dyn_port->devlink_port;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_SF;
+ attrs.pci_sf.pf = pf->hw.pf_id;
+ attrs.pci_sf.sf = dyn_port->sfnum;
+
+ devlink_port_attrs_set(devlink_port, &attrs);
+ devlink = priv_to_devlink(pf);
+
+ return devl_port_register_with_ops(devlink, devlink_port, vsi->idx,
+ &ice_devlink_port_sf_ops);
+}
+
+/**
+ * ice_devlink_destroy_sf_port - Destroy the devlink_port for this SF
+ * @dyn_port: the dynamic port instance structure for this subfunction
+ *
+ * Unregisters the devlink_port structure associated with this SF.
+ */
+void ice_devlink_destroy_sf_port(struct ice_dynamic_port *dyn_port)
+{
+ devl_rate_leaf_destroy(&dyn_port->devlink_port);
+ devl_port_unregister(&dyn_port->devlink_port);
+}
+
+/**
+ * ice_alloc_dynamic_port - Allocate new dynamic port
+ * @pf: pointer to the pf structure
+ * @new_attr: devlink port attributes requested
+ * @extack: extack for reporting error messages
+ * @devlink_port: index of newly created devlink port
+ *
+ * Allocate a new dynamic port instance and prepare it for configuration
+ * with devlink.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_alloc_dynamic_port(struct ice_pf *pf,
+ const struct devlink_port_new_attrs *new_attr,
+ struct netlink_ext_ack *extack,
+ struct devlink_port **devlink_port)
+{
+ struct ice_dynamic_port *dyn_port;
+ struct ice_vsi *vsi;
+ u32 sfnum;
+ int err;
+
+ err = ice_reserve_sf_num(pf, new_attr, extack, &sfnum);
+ if (err)
+ return err;
+
+ dyn_port = kzalloc(sizeof(*dyn_port), GFP_KERNEL);
+ if (!dyn_port) {
+ err = -ENOMEM;
+ goto unroll_reserve_sf_num;
+ }
+
+ vsi = ice_vsi_alloc(pf);
+ if (!vsi) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to allocate VSI");
+ err = -ENOMEM;
+ goto unroll_dyn_port_alloc;
+ }
+
+ dyn_port->vsi = vsi;
+ dyn_port->pf = pf;
+ dyn_port->sfnum = sfnum;
+ eth_random_addr(dyn_port->hw_addr);
+
+ err = xa_insert(&pf->dyn_ports, vsi->idx, dyn_port, GFP_KERNEL);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Port index reservation failed");
+ goto unroll_vsi_alloc;
+ }
+
+ err = ice_eswitch_attach_sf(pf, dyn_port);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to attach SF to eswitch");
+ goto unroll_xa_insert;
+ }
+
+ *devlink_port = &dyn_port->devlink_port;
+
+ return 0;
+
+unroll_xa_insert:
+ xa_erase(&pf->dyn_ports, vsi->idx);
+unroll_vsi_alloc:
+ ice_vsi_free(vsi);
+unroll_dyn_port_alloc:
+ kfree(dyn_port);
+unroll_reserve_sf_num:
+ xa_erase(&pf->sf_nums, sfnum);
+
+ return err;
+}
+
+/**
+ * ice_devlink_port_new - devlink handler for the new port
+ * @devlink: pointer to devlink
+ * @new_attr: pointer to the port new attributes
+ * @extack: extack for reporting error messages
+ * @devlink_port: pointer to a new port
+ *
+ * Creates new devlink port, checks new port attributes and reject
+ * any unsupported parameters, allocates new subfunction for that port.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int
+ice_devlink_port_new(struct devlink *devlink,
+ const struct devlink_port_new_attrs *new_attr,
+ struct netlink_ext_ack *extack,
+ struct devlink_port **devlink_port)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ int err;
+
+ err = ice_devlink_port_new_check_attr(pf, new_attr, extack);
+ if (err)
+ return err;
+
+ return ice_alloc_dynamic_port(pf, new_attr, extack, devlink_port);
+}
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.h b/drivers/net/ethernet/intel/ice/devlink/devlink_port.h
index 9223bcdb6444..d60efc340945 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink_port.h
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink_port.h
@@ -4,9 +4,55 @@
#ifndef _DEVLINK_PORT_H_
#define _DEVLINK_PORT_H_
+#include "../ice.h"
+#include "../ice_sf_eth.h"
+
+/**
+ * struct ice_dynamic_port - Track dynamically added devlink port instance
+ * @hw_addr: the HW address for this port
+ * @active: true if the port has been activated
+ * @attached: true it the prot is attached
+ * @devlink_port: the associated devlink port structure
+ * @pf: pointer to the PF private structure
+ * @vsi: the VSI associated with this port
+ * @repr_id: the representor ID
+ * @sfnum: the subfunction ID
+ * @sf_dev: pointer to the subfunction device
+ *
+ * An instance of a dynamically added devlink port. Each port flavour
+ */
+struct ice_dynamic_port {
+ u8 hw_addr[ETH_ALEN];
+ u8 active: 1;
+ u8 attached: 1;
+ struct devlink_port devlink_port;
+ struct ice_pf *pf;
+ struct ice_vsi *vsi;
+ unsigned long repr_id;
+ u32 sfnum;
+ /* Flavour-specific implementation data */
+ union {
+ struct ice_sf_dev *sf_dev;
+ };
+};
+
+void ice_dealloc_all_dynamic_ports(struct ice_pf *pf);
+
int ice_devlink_create_pf_port(struct ice_pf *pf);
void ice_devlink_destroy_pf_port(struct ice_pf *pf);
int ice_devlink_create_vf_port(struct ice_vf *vf);
void ice_devlink_destroy_vf_port(struct ice_vf *vf);
+int ice_devlink_create_sf_port(struct ice_dynamic_port *dyn_port);
+void ice_devlink_destroy_sf_port(struct ice_dynamic_port *dyn_port);
+int ice_devlink_create_sf_dev_port(struct ice_sf_dev *sf_dev);
+void ice_devlink_destroy_sf_dev_port(struct ice_sf_dev *sf_dev);
+
+#define ice_devlink_port_to_dyn(port) \
+ container_of(port, struct ice_dynamic_port, devlink_port)
+int
+ice_devlink_port_new(struct devlink *devlink,
+ const struct devlink_port_new_attrs *new_attr,
+ struct netlink_ext_ack *extack,
+ struct devlink_port **devlink_port);
#endif /* _DEVLINK_PORT_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index ce8b5505b16d..d6f80da30dec 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -451,7 +451,12 @@ struct ice_vsi {
struct_group_tagged(ice_vsi_cfg_params, params,
struct ice_port_info *port_info; /* back pointer to port_info */
struct ice_channel *ch; /* VSI's channel structure, may be NULL */
- struct ice_vf *vf; /* VF associated with this VSI, may be NULL */
+ union {
+ /* VF associated with this VSI, may be NULL */
+ struct ice_vf *vf;
+ /* SF associated with this VSI, may be NULL */
+ struct ice_dynamic_port *sf;
+ };
u32 flags; /* VSI flags used for rebuild and configuration */
enum ice_vsi_type type; /* the type of the VSI */
);
@@ -652,6 +657,9 @@ struct ice_pf {
struct ice_eswitch eswitch;
struct ice_esw_br_port *br_port;
+ struct xarray dyn_ports;
+ struct xarray sf_nums;
+
#define ICE_INVALID_AGG_NODE_ID 0
#define ICE_PF_AGG_NODE_ID_START 1
#define ICE_MAX_PF_AGG_NODES 32
@@ -918,6 +926,7 @@ int ice_vsi_open(struct ice_vsi *vsi);
void ice_set_ethtool_ops(struct net_device *netdev);
void ice_set_ethtool_repr_ops(struct net_device *netdev);
void ice_set_ethtool_safe_mode_ops(struct net_device *netdev);
+void ice_set_ethtool_sf_ops(struct net_device *netdev);
u16 ice_get_avail_txq_count(struct ice_pf *pf);
u16 ice_get_avail_rxq_count(struct ice_pf *pf);
int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked);
@@ -1003,6 +1012,14 @@ void ice_unload(struct ice_pf *pf);
void ice_adv_lnk_speed_maps_init(void);
int ice_init_dev(struct ice_pf *pf);
void ice_deinit_dev(struct ice_pf *pf);
+int ice_change_mtu(struct net_device *netdev, int new_mtu);
+void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue);
+int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp);
+void ice_set_netdev_features(struct net_device *netdev);
+int ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid);
+int ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid);
+void ice_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats);
/**
* ice_set_rdma_cap - enable RDMA support
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index c158749a80e0..4a9a6899fc45 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -325,6 +325,9 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf
tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id;
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
break;
+ case ICE_VSI_SF:
+ tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
+ break;
default:
return;
}
@@ -540,7 +543,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
ring->rx_buf_len = ring->vsi->rx_buf_len;
- if (ring->vsi->type == ICE_VSI_PF) {
+ if (ring->vsi->type == ICE_VSI_PF || ring->vsi->type == ICE_VSI_SF) {
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index,
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index a94e7072b570..a7c510832824 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -187,6 +187,7 @@ void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi)
vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg);
break;
case ICE_VSI_CHNL:
+ case ICE_VSI_SF:
vsi->tc_cfg.ena_tc = BIT(ice_get_first_droptc(vsi));
vsi->tc_cfg.numtc = 1;
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index 3cfa071e3718..c0b3e70a7ea3 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -452,11 +452,9 @@ static void ice_eswitch_start_reprs(struct ice_pf *pf)
ice_eswitch_start_all_tx_queues(pf);
}
-int
-ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
+static int
+ice_eswitch_attach(struct ice_pf *pf, struct ice_repr *repr, unsigned long *id)
{
- struct devlink *devlink = priv_to_devlink(pf);
- struct ice_repr *repr;
int err;
if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY)
@@ -470,13 +468,9 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
ice_eswitch_stop_reprs(pf);
- devl_lock(devlink);
- repr = ice_repr_add_vf(vf);
- devl_unlock(devlink);
- if (IS_ERR(repr)) {
- err = PTR_ERR(repr);
+ err = repr->ops.add(repr);
+ if (err)
goto err_create_repr;
- }
err = ice_eswitch_setup_repr(pf, repr);
if (err)
@@ -486,7 +480,7 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
if (err)
goto err_xa_alloc;
- vf->repr_id = repr->id;
+ *id = repr->id;
ice_eswitch_start_reprs(pf);
@@ -495,9 +489,7 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
err_xa_alloc:
ice_eswitch_release_repr(pf, repr);
err_setup_repr:
- devl_lock(devlink);
- ice_repr_rem_vf(repr);
- devl_unlock(devlink);
+ repr->ops.rem(repr);
err_create_repr:
if (xa_empty(&pf->eswitch.reprs))
ice_eswitch_disable_switchdev(pf);
@@ -506,14 +498,59 @@ err_create_repr:
return err;
}
-void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf)
+/**
+ * ice_eswitch_attach_vf - attach VF to a eswitch
+ * @pf: pointer to PF structure
+ * @vf: pointer to VF structure to be attached
+ *
+ * During attaching port representor for VF is created.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_eswitch_attach_vf(struct ice_pf *pf, struct ice_vf *vf)
{
- struct ice_repr *repr = xa_load(&pf->eswitch.reprs, vf->repr_id);
+ struct ice_repr *repr = ice_repr_create_vf(vf);
struct devlink *devlink = priv_to_devlink(pf);
+ int err;
- if (!repr)
- return;
+ if (IS_ERR(repr))
+ return PTR_ERR(repr);
+
+ devl_lock(devlink);
+ err = ice_eswitch_attach(pf, repr, &vf->repr_id);
+ if (err)
+ ice_repr_destroy(repr);
+ devl_unlock(devlink);
+
+ return err;
+}
+
+/**
+ * ice_eswitch_attach_sf - attach SF to a eswitch
+ * @pf: pointer to PF structure
+ * @sf: pointer to SF structure to be attached
+ *
+ * During attaching port representor for SF is created.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_eswitch_attach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf)
+{
+ struct ice_repr *repr = ice_repr_create_sf(sf);
+ int err;
+ if (IS_ERR(repr))
+ return PTR_ERR(repr);
+
+ err = ice_eswitch_attach(pf, repr, &sf->repr_id);
+ if (err)
+ ice_repr_destroy(repr);
+
+ return err;
+}
+
+static void ice_eswitch_detach(struct ice_pf *pf, struct ice_repr *repr)
+{
ice_eswitch_stop_reprs(pf);
xa_erase(&pf->eswitch.reprs, repr->id);
@@ -521,10 +558,12 @@ void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf)
ice_eswitch_disable_switchdev(pf);
ice_eswitch_release_repr(pf, repr);
- devl_lock(devlink);
- ice_repr_rem_vf(repr);
+ repr->ops.rem(repr);
+ ice_repr_destroy(repr);
if (xa_empty(&pf->eswitch.reprs)) {
+ struct devlink *devlink = priv_to_devlink(pf);
+
/* since all port representors are destroyed, there is
* no point in keeping the nodes
*/
@@ -533,10 +572,42 @@ void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf)
} else {
ice_eswitch_start_reprs(pf);
}
+}
+
+/**
+ * ice_eswitch_detach_vf - detach VF from a eswitch
+ * @pf: pointer to PF structure
+ * @vf: pointer to VF structure to be detached
+ */
+void ice_eswitch_detach_vf(struct ice_pf *pf, struct ice_vf *vf)
+{
+ struct ice_repr *repr = xa_load(&pf->eswitch.reprs, vf->repr_id);
+ struct devlink *devlink = priv_to_devlink(pf);
+
+ if (!repr)
+ return;
+
+ devl_lock(devlink);
+ ice_eswitch_detach(pf, repr);
devl_unlock(devlink);
}
/**
+ * ice_eswitch_detach_sf - detach SF from a eswitch
+ * @pf: pointer to PF structure
+ * @sf: pointer to SF structure to be detached
+ */
+void ice_eswitch_detach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf)
+{
+ struct ice_repr *repr = xa_load(&pf->eswitch.reprs, sf->repr_id);
+
+ if (!repr)
+ return;
+
+ ice_eswitch_detach(pf, repr);
+}
+
+/**
* ice_eswitch_get_target - get netdev based on src_vsi from descriptor
* @rx_ring: ring used to receive the packet
* @rx_desc: descriptor used to get src_vsi value
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h
index 78fd39a6935d..20ce32dda69c 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.h
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h
@@ -5,11 +5,13 @@
#define _ICE_ESWITCH_H_
#include <net/devlink.h>
+#include "devlink/devlink_port.h"
#ifdef CONFIG_ICE_SWITCHDEV
-void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf);
-int
-ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf);
+void ice_eswitch_detach_vf(struct ice_pf *pf, struct ice_vf *vf);
+void ice_eswitch_detach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf);
+int ice_eswitch_attach_vf(struct ice_pf *pf, struct ice_vf *vf);
+int ice_eswitch_attach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf);
int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode);
int
@@ -31,10 +33,20 @@ struct net_device *ice_eswitch_get_target(struct ice_rx_ring *rx_ring,
int ice_eswitch_cfg_vsi(struct ice_vsi *vsi, const u8 *mac);
void ice_eswitch_decfg_vsi(struct ice_vsi *vsi, const u8 *mac);
#else /* CONFIG_ICE_SWITCHDEV */
-static inline void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) { }
+static inline void
+ice_eswitch_detach_vf(struct ice_pf *pf, struct ice_vf *vf) { }
+
+static inline void
+ice_eswitch_detach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf) { }
+
+static inline int
+ice_eswitch_attach_vf(struct ice_pf *pf, struct ice_vf *vf)
+{
+ return -EOPNOTSUPP;
+}
static inline int
-ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
+ice_eswitch_attach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 793a64d44aa3..d5cc934d1359 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -4412,7 +4412,7 @@ ice_repr_get_drvinfo(struct net_device *netdev,
{
struct ice_repr *repr = ice_netdev_to_repr(netdev);
- if (ice_check_vf_ready_for_cfg(repr->vf))
+ if (repr->ops.ready(repr))
return;
__ice_get_drvinfo(netdev, drvinfo, repr->src_vsi);
@@ -4424,8 +4424,7 @@ ice_repr_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
struct ice_repr *repr = ice_netdev_to_repr(netdev);
/* for port representors only ETH_SS_STATS is supported */
- if (ice_check_vf_ready_for_cfg(repr->vf) ||
- stringset != ETH_SS_STATS)
+ if (repr->ops.ready(repr) || stringset != ETH_SS_STATS)
return;
__ice_get_strings(netdev, stringset, data, repr->src_vsi);
@@ -4438,7 +4437,7 @@ ice_repr_get_ethtool_stats(struct net_device *netdev,
{
struct ice_repr *repr = ice_netdev_to_repr(netdev);
- if (ice_check_vf_ready_for_cfg(repr->vf))
+ if (repr->ops.ready(repr))
return;
__ice_get_ethtool_stats(netdev, stats, data, repr->src_vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 737c00b02dd0..570425772b63 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -7,6 +7,7 @@
#include "ice_lib.h"
#include "ice_fltr.h"
#include "ice_dcb_lib.h"
+#include "ice_type.h"
#include "ice_vsi_vlan_ops.h"
/**
@@ -20,6 +21,8 @@ const char *ice_vsi_type_str(enum ice_vsi_type vsi_type)
return "ICE_VSI_PF";
case ICE_VSI_VF:
return "ICE_VSI_VF";
+ case ICE_VSI_SF:
+ return "ICE_VSI_SF";
case ICE_VSI_CTRL:
return "ICE_VSI_CTRL";
case ICE_VSI_CHNL:
@@ -135,6 +138,7 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
{
switch (vsi->type) {
case ICE_VSI_PF:
+ case ICE_VSI_SF:
case ICE_VSI_CTRL:
case ICE_VSI_LB:
/* a user could change the values of num_[tr]x_desc using
@@ -201,6 +205,12 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
max_t(int, vsi->alloc_rxq,
vsi->alloc_txq));
break;
+ case ICE_VSI_SF:
+ vsi->alloc_txq = 1;
+ vsi->alloc_rxq = 1;
+ vsi->num_q_vectors = 1;
+ vsi->irq_dyn_alloc = true;
+ break;
case ICE_VSI_VF:
if (vf->num_req_qs)
vf->num_vf_qs = vf->num_req_qs;
@@ -423,7 +433,7 @@ err_out:
* This deallocates the VSI's queue resources, removes it from the PF's
* VSI array if necessary, and deallocates the VSI
*/
-static void ice_vsi_free(struct ice_vsi *vsi)
+void ice_vsi_free(struct ice_vsi *vsi)
{
struct ice_pf *pf = NULL;
struct device *dev;
@@ -559,6 +569,7 @@ ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch)
switch (vsi->type) {
case ICE_VSI_PF:
+ case ICE_VSI_SF:
/* Setup default MSIX irq handler for VSI */
vsi->irq_handler = ice_msix_clean_rings;
break;
@@ -595,7 +606,7 @@ ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch)
*
* returns a pointer to a VSI on success, NULL on failure.
*/
-static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf)
+struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_vsi *vsi = NULL;
@@ -889,6 +900,11 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
max_rss_size);
vsi->rss_lut_type = ICE_LUT_PF;
break;
+ case ICE_VSI_SF:
+ vsi->rss_table_size = ICE_LUT_VSI_SIZE;
+ vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size);
+ vsi->rss_lut_type = ICE_LUT_VSI;
+ break;
case ICE_VSI_VF:
/* VF VSI will get a small RSS table.
* For VSI_LUT, LUT size should be set to 64 bytes.
@@ -1136,6 +1152,7 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
break;
case ICE_VSI_VF:
+ case ICE_VSI_SF:
/* VF VSI will gets a small RSS table which is a VSI LUT type */
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
break;
@@ -1214,6 +1231,7 @@ static int ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags)
case ICE_VSI_PF:
ctxt->flags = ICE_AQ_VSI_TYPE_PF;
break;
+ case ICE_VSI_SF:
case ICE_VSI_CHNL:
ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2;
break;
@@ -2095,6 +2113,7 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
case ICE_VSI_CHNL:
case ICE_VSI_LB:
case ICE_VSI_PF:
+ case ICE_VSI_SF:
max_agg_nodes = ICE_MAX_PF_AGG_NODES;
agg_node_id_start = ICE_PF_AGG_NODE_ID_START;
agg_node_iter = &pf->pf_agg_node[0];
@@ -2264,6 +2283,7 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi)
switch (vsi->type) {
case ICE_VSI_CTRL:
+ case ICE_VSI_SF:
case ICE_VSI_PF:
ret = ice_vsi_alloc_q_vectors(vsi);
if (ret)
@@ -2648,7 +2668,8 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
clear_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
- if (vsi->netdev && vsi->type == ICE_VSI_PF) {
+ if (vsi->netdev && (vsi->type == ICE_VSI_PF ||
+ vsi->type == ICE_VSI_SF)) {
if (netif_running(vsi->netdev)) {
if (!locked)
rtnl_lock();
@@ -2676,7 +2697,8 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
- if (vsi->type == ICE_VSI_PF && vsi->netdev) {
+ if (vsi->netdev && (vsi->type == ICE_VSI_PF ||
+ vsi->type == ICE_VSI_SF)) {
if (netif_running(vsi->netdev)) {
if (!locked)
rtnl_lock();
@@ -2747,6 +2769,26 @@ void ice_vsi_clear_napi_queues(struct ice_vsi *vsi)
}
/**
+ * ice_napi_add - register NAPI handler for the VSI
+ * @vsi: VSI for which NAPI handler is to be registered
+ *
+ * This function is only called in the driver's load path. Registering the NAPI
+ * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
+ * reset/rebuild, etc.)
+ */
+void ice_napi_add(struct ice_vsi *vsi)
+{
+ int v_idx;
+
+ if (!vsi->netdev)
+ return;
+
+ ice_for_each_q_vector(vsi, v_idx)
+ netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
+ ice_napi_poll);
+}
+
+/**
* ice_vsi_release - Delete a VSI and free its resources
* @vsi: the VSI being removed
*
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 36d86535695d..1a6cfc8693ce 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -45,6 +45,7 @@ struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params);
void ice_vsi_set_napi_queues(struct ice_vsi *vsi);
+void ice_napi_add(struct ice_vsi *vsi);
void ice_vsi_clear_napi_queues(struct ice_vsi *vsi);
@@ -59,6 +60,8 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked);
int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags);
int ice_vsi_cfg(struct ice_vsi *vsi);
+struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf);
+void ice_vsi_free(struct ice_vsi *vsi);
bool ice_is_reset_in_progress(unsigned long *state);
int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index c7db88b517da..59c0e88153c0 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -15,6 +15,7 @@
#include "ice_dcb_nl.h"
#include "devlink/devlink.h"
#include "devlink/devlink_port.h"
+#include "ice_sf_eth.h"
#include "ice_hwmon.h"
/* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
* ice tracepoint functions. This must be done exactly once across the
@@ -2974,6 +2975,9 @@ int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
if (avail < cpus / 2)
return -ENOMEM;
+ if (vsi->type == ICE_VSI_SF)
+ avail = vsi->alloc_txq;
+
vsi->num_xdp_txq = min_t(u16, avail, cpus);
if (vsi->num_xdp_txq < cpus)
@@ -3089,14 +3093,14 @@ static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
* @dev: netdevice
* @xdp: XDP command
*/
-static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
struct ice_netdev_priv *np = netdev_priv(dev);
struct ice_vsi *vsi = np->vsi;
int ret;
- if (vsi->type != ICE_VSI_PF) {
- NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
+ if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_SF) {
+ NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF or SF VSI");
return -EINVAL;
}
@@ -3556,26 +3560,6 @@ skip_req_irq:
}
/**
- * ice_napi_add - register NAPI handler for the VSI
- * @vsi: VSI for which NAPI handler is to be registered
- *
- * This function is only called in the driver's load path. Registering the NAPI
- * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
- * reset/rebuild, etc.)
- */
-static void ice_napi_add(struct ice_vsi *vsi)
-{
- int v_idx;
-
- if (!vsi->netdev)
- return;
-
- ice_for_each_q_vector(vsi, v_idx)
- netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
- ice_napi_poll);
-}
-
-/**
* ice_set_ops - set netdev and ethtools ops for the given netdev
* @vsi: the VSI associated with the new netdev
*/
@@ -3608,7 +3592,7 @@ static void ice_set_ops(struct ice_vsi *vsi)
* ice_set_netdev_features - set features for the given netdev
* @netdev: netdev instance
*/
-static void ice_set_netdev_features(struct net_device *netdev)
+void ice_set_netdev_features(struct net_device *netdev)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
bool is_dvm_ena = ice_is_dvm_ena(&pf->hw);
@@ -3790,8 +3774,7 @@ ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
*
* net_device_ops implementation for adding VLAN IDs
*/
-static int
-ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
+int ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi_vlan_ops *vlan_ops;
@@ -3853,8 +3836,7 @@ finish:
*
* net_device_ops implementation for removing VLAN IDs
*/
-static int
-ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
+int ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi_vlan_ops *vlan_ops;
@@ -4023,6 +4005,9 @@ static void ice_deinit_pf(struct ice_pf *pf)
if (pf->ptp.clock)
ptp_clock_unregister(pf->ptp.clock);
+
+ xa_destroy(&pf->dyn_ports);
+ xa_destroy(&pf->sf_nums);
}
/**
@@ -4116,6 +4101,9 @@ static int ice_init_pf(struct ice_pf *pf)
hash_init(pf->vfs.table);
ice_mbx_init_snapshot(&pf->hw);
+ xa_init(&pf->dyn_ports);
+ xa_init(&pf->sf_nums);
+
return 0;
}
@@ -5458,6 +5446,7 @@ static void ice_remove(struct pci_dev *pdev)
ice_remove_arfs(pf);
devl_lock(priv_to_devlink(pf));
+ ice_dealloc_all_dynamic_ports(pf);
ice_deinit_devlink(pf);
ice_unload(pf);
@@ -5946,8 +5935,16 @@ static int __init ice_module_init(void)
goto err_dest_lag_wq;
}
+ status = ice_sf_driver_register();
+ if (status) {
+ pr_err("Failed to register SF driver, err %d\n", status);
+ goto err_sf_driver;
+ }
+
return 0;
+err_sf_driver:
+ pci_unregister_driver(&ice_driver);
err_dest_lag_wq:
destroy_workqueue(ice_lag_wq);
ice_debugfs_exit();
@@ -5965,6 +5962,7 @@ module_init(ice_module_init);
*/
static void __exit ice_module_exit(void)
{
+ ice_sf_driver_unregister();
pci_unregister_driver(&ice_driver);
ice_debugfs_exit();
destroy_workqueue(ice_wq);
@@ -6766,7 +6764,8 @@ static int ice_up_complete(struct ice_vsi *vsi)
if (vsi->port_info &&
(vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
- vsi->netdev && vsi->type == ICE_VSI_PF) {
+ ((vsi->netdev && (vsi->type == ICE_VSI_PF ||
+ vsi->type == ICE_VSI_SF)))) {
ice_print_link_msg(vsi, true);
netif_tx_start_all_queues(vsi->netdev);
netif_carrier_on(vsi->netdev);
@@ -7124,7 +7123,6 @@ void ice_update_pf_stats(struct ice_pf *pf)
* @netdev: network interface device structure
* @stats: main device statistics structure
*/
-static
void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
@@ -7465,7 +7463,7 @@ int ice_vsi_open(struct ice_vsi *vsi)
ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
- if (vsi->type == ICE_VSI_PF) {
+ if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_SF) {
/* Notify the stack of the actual queue counts. */
err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
if (err)
@@ -7801,7 +7799,7 @@ clear_recovery:
*
* Returns 0 on success, negative on failure
*/
-static int ice_change_mtu(struct net_device *netdev, int new_mtu)
+int ice_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
@@ -8225,7 +8223,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
* @netdev: network interface device structure
* @txqueue: Tx queue
*/
-static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_tx_ring *tx_ring = NULL;
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
index bdda3401e343..00d4a9125dfa 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.c
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -59,12 +59,13 @@ static void
ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_repr *repr = np->repr;
struct ice_eth_stats *eth_stats;
struct ice_vsi *vsi;
- if (ice_is_vf_disabled(np->repr->vf))
+ if (repr->ops.ready(repr))
return;
- vsi = np->repr->src_vsi;
+ vsi = repr->src_vsi;
ice_update_vsi_stats(vsi);
eth_stats = &vsi->eth_stats;
@@ -93,7 +94,7 @@ struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev)
}
/**
- * ice_repr_open - Enable port representor's network interface
+ * ice_repr_vf_open - Enable port representor's network interface
* @netdev: network interface device structure
*
* The open entry point is called when a port representor's network
@@ -102,7 +103,7 @@ struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev)
*
* Returns 0 on success
*/
-static int ice_repr_open(struct net_device *netdev)
+static int ice_repr_vf_open(struct net_device *netdev)
{
struct ice_repr *repr = ice_netdev_to_repr(netdev);
struct ice_vf *vf;
@@ -118,8 +119,16 @@ static int ice_repr_open(struct net_device *netdev)
return 0;
}
+static int ice_repr_sf_open(struct net_device *netdev)
+{
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+
+ return 0;
+}
+
/**
- * ice_repr_stop - Disable port representor's network interface
+ * ice_repr_vf_stop - Disable port representor's network interface
* @netdev: network interface device structure
*
* The stop entry point is called when a port representor's network
@@ -128,7 +137,7 @@ static int ice_repr_open(struct net_device *netdev)
*
* Returns 0 on success
*/
-static int ice_repr_stop(struct net_device *netdev)
+static int ice_repr_vf_stop(struct net_device *netdev)
{
struct ice_repr *repr = ice_netdev_to_repr(netdev);
struct ice_vf *vf;
@@ -144,6 +153,14 @@ static int ice_repr_stop(struct net_device *netdev)
return 0;
}
+static int ice_repr_sf_stop(struct net_device *netdev)
+{
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+
+ return 0;
+}
+
/**
* ice_repr_sp_stats64 - get slow path stats for port representor
* @dev: network interface device structure
@@ -245,10 +262,20 @@ ice_repr_setup_tc(struct net_device *netdev, enum tc_setup_type type,
}
}
-static const struct net_device_ops ice_repr_netdev_ops = {
+static const struct net_device_ops ice_repr_vf_netdev_ops = {
+ .ndo_get_stats64 = ice_repr_get_stats64,
+ .ndo_open = ice_repr_vf_open,
+ .ndo_stop = ice_repr_vf_stop,
+ .ndo_start_xmit = ice_eswitch_port_start_xmit,
+ .ndo_setup_tc = ice_repr_setup_tc,
+ .ndo_has_offload_stats = ice_repr_ndo_has_offload_stats,
+ .ndo_get_offload_stats = ice_repr_ndo_get_offload_stats,
+};
+
+static const struct net_device_ops ice_repr_sf_netdev_ops = {
.ndo_get_stats64 = ice_repr_get_stats64,
- .ndo_open = ice_repr_open,
- .ndo_stop = ice_repr_stop,
+ .ndo_open = ice_repr_sf_open,
+ .ndo_stop = ice_repr_sf_stop,
.ndo_start_xmit = ice_eswitch_port_start_xmit,
.ndo_setup_tc = ice_repr_setup_tc,
.ndo_has_offload_stats = ice_repr_ndo_has_offload_stats,
@@ -261,18 +288,20 @@ static const struct net_device_ops ice_repr_netdev_ops = {
*/
bool ice_is_port_repr_netdev(const struct net_device *netdev)
{
- return netdev && (netdev->netdev_ops == &ice_repr_netdev_ops);
+ return netdev && (netdev->netdev_ops == &ice_repr_vf_netdev_ops ||
+ netdev->netdev_ops == &ice_repr_sf_netdev_ops);
}
/**
* ice_repr_reg_netdev - register port representor netdev
* @netdev: pointer to port representor netdev
+ * @ops: new ops for netdev
*/
static int
-ice_repr_reg_netdev(struct net_device *netdev)
+ice_repr_reg_netdev(struct net_device *netdev, const struct net_device_ops *ops)
{
eth_hw_addr_random(netdev);
- netdev->netdev_ops = &ice_repr_netdev_ops;
+ netdev->netdev_ops = ops;
ice_set_ethtool_repr_ops(netdev);
netdev->hw_features |= NETIF_F_HW_TC;
@@ -283,57 +312,56 @@ ice_repr_reg_netdev(struct net_device *netdev)
return register_netdev(netdev);
}
-static void ice_repr_remove_node(struct devlink_port *devlink_port)
+static int ice_repr_ready_vf(struct ice_repr *repr)
+{
+ return !ice_check_vf_ready_for_cfg(repr->vf);
+}
+
+static int ice_repr_ready_sf(struct ice_repr *repr)
{
- devl_rate_leaf_destroy(devlink_port);
+ return !repr->sf->active;
}
/**
- * ice_repr_rem - remove representor from VF
+ * ice_repr_destroy - remove representor from VF
* @repr: pointer to representor structure
*/
-static void ice_repr_rem(struct ice_repr *repr)
+void ice_repr_destroy(struct ice_repr *repr)
{
free_percpu(repr->stats);
free_netdev(repr->netdev);
kfree(repr);
}
-/**
- * ice_repr_rem_vf - remove representor from VF
- * @repr: pointer to representor structure
- */
-void ice_repr_rem_vf(struct ice_repr *repr)
+static void ice_repr_rem_vf(struct ice_repr *repr)
{
- ice_repr_remove_node(&repr->vf->devlink_port);
ice_eswitch_decfg_vsi(repr->src_vsi, repr->parent_mac);
unregister_netdev(repr->netdev);
ice_devlink_destroy_vf_port(repr->vf);
ice_virtchnl_set_dflt_ops(repr->vf);
- ice_repr_rem(repr);
}
-static void ice_repr_set_tx_topology(struct ice_pf *pf)
+static void ice_repr_rem_sf(struct ice_repr *repr)
{
- struct devlink *devlink;
+ unregister_netdev(repr->netdev);
+ ice_devlink_destroy_sf_port(repr->sf);
+}
+static void ice_repr_set_tx_topology(struct ice_pf *pf, struct devlink *devlink)
+{
/* only export if ADQ and DCB disabled and eswitch enabled*/
if (ice_is_adq_active(pf) || ice_is_dcb_active(pf) ||
!ice_is_switchdev_running(pf))
return;
- devlink = priv_to_devlink(pf);
ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf));
}
/**
- * ice_repr_add - add representor for generic VSI
- * @pf: pointer to PF structure
+ * ice_repr_create - add representor for generic VSI
* @src_vsi: pointer to VSI structure of device to represent
- * @parent_mac: device MAC address
*/
-static struct ice_repr *
-ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac)
+static struct ice_repr *ice_repr_create(struct ice_vsi *src_vsi)
{
struct ice_netdev_priv *np;
struct ice_repr *repr;
@@ -360,7 +388,10 @@ ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac)
np = netdev_priv(repr->netdev);
np->repr = repr;
- ether_addr_copy(repr->parent_mac, parent_mac);
+ repr->netdev->min_mtu = ETH_MIN_MTU;
+ repr->netdev->max_mtu = ICE_MAX_MTU;
+
+ SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(src_vsi->back));
return repr;
@@ -371,34 +402,18 @@ err_alloc:
return ERR_PTR(err);
}
-struct ice_repr *ice_repr_add_vf(struct ice_vf *vf)
+static int ice_repr_add_vf(struct ice_repr *repr)
{
- struct ice_repr *repr;
- struct ice_vsi *vsi;
+ struct ice_vf *vf = repr->vf;
+ struct devlink *devlink;
int err;
- vsi = ice_get_vf_vsi(vf);
- if (!vsi)
- return ERR_PTR(-ENOENT);
-
err = ice_devlink_create_vf_port(vf);
if (err)
- return ERR_PTR(err);
-
- repr = ice_repr_add(vf->pf, vsi, vf->hw_lan_addr);
- if (IS_ERR(repr)) {
- err = PTR_ERR(repr);
- goto err_repr_add;
- }
-
- repr->vf = vf;
-
- repr->netdev->min_mtu = ETH_MIN_MTU;
- repr->netdev->max_mtu = ICE_MAX_MTU;
+ return err;
- SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(vf->pf));
SET_NETDEV_DEVLINK_PORT(repr->netdev, &vf->devlink_port);
- err = ice_repr_reg_netdev(repr->netdev);
+ err = ice_repr_reg_netdev(repr->netdev, &ice_repr_vf_netdev_ops);
if (err)
goto err_netdev;
@@ -407,17 +422,97 @@ struct ice_repr *ice_repr_add_vf(struct ice_vf *vf)
goto err_cfg_vsi;
ice_virtchnl_set_repr_ops(vf);
- ice_repr_set_tx_topology(vf->pf);
- return repr;
+ devlink = priv_to_devlink(vf->pf);
+ ice_repr_set_tx_topology(vf->pf, devlink);
+
+ return 0;
err_cfg_vsi:
unregister_netdev(repr->netdev);
err_netdev:
- ice_repr_rem(repr);
-err_repr_add:
ice_devlink_destroy_vf_port(vf);
- return ERR_PTR(err);
+ return err;
+}
+
+/**
+ * ice_repr_create_vf - add representor for VF VSI
+ * @vf: VF to create port representor on
+ *
+ * Set correct representor type for VF and functions pointer.
+ *
+ * Return: created port representor on success, error otherwise
+ */
+struct ice_repr *ice_repr_create_vf(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ struct ice_repr *repr;
+
+ if (!vsi)
+ return ERR_PTR(-EINVAL);
+
+ repr = ice_repr_create(vsi);
+ if (!repr)
+ return ERR_PTR(-ENOMEM);
+
+ repr->type = ICE_REPR_TYPE_VF;
+ repr->vf = vf;
+ repr->ops.add = ice_repr_add_vf;
+ repr->ops.rem = ice_repr_rem_vf;
+ repr->ops.ready = ice_repr_ready_vf;
+
+ ether_addr_copy(repr->parent_mac, vf->hw_lan_addr);
+
+ return repr;
+}
+
+static int ice_repr_add_sf(struct ice_repr *repr)
+{
+ struct ice_dynamic_port *sf = repr->sf;
+ int err;
+
+ err = ice_devlink_create_sf_port(sf);
+ if (err)
+ return err;
+
+ SET_NETDEV_DEVLINK_PORT(repr->netdev, &sf->devlink_port);
+ err = ice_repr_reg_netdev(repr->netdev, &ice_repr_sf_netdev_ops);
+ if (err)
+ goto err_netdev;
+
+ ice_repr_set_tx_topology(sf->vsi->back, priv_to_devlink(sf->vsi->back));
+
+ return 0;
+
+err_netdev:
+ ice_devlink_destroy_sf_port(sf);
+ return err;
+}
+
+/**
+ * ice_repr_create_sf - add representor for SF VSI
+ * @sf: SF to create port representor on
+ *
+ * Set correct representor type for SF and functions pointer.
+ *
+ * Return: created port representor on success, error otherwise
+ */
+struct ice_repr *ice_repr_create_sf(struct ice_dynamic_port *sf)
+{
+ struct ice_repr *repr = ice_repr_create(sf->vsi);
+
+ if (!repr)
+ return ERR_PTR(-ENOMEM);
+
+ repr->type = ICE_REPR_TYPE_SF;
+ repr->sf = sf;
+ repr->ops.add = ice_repr_add_sf;
+ repr->ops.rem = ice_repr_rem_sf;
+ repr->ops.ready = ice_repr_ready_sf;
+
+ ether_addr_copy(repr->parent_mac, sf->hw_addr);
+
+ return repr;
}
struct ice_repr *ice_repr_get(struct ice_pf *pf, u32 id)
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h
index 488661b2900b..35bd93165e1e 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.h
+++ b/drivers/net/ethernet/intel/ice/ice_repr.h
@@ -15,19 +15,35 @@ struct ice_repr_pcpu_stats {
u64 tx_drops;
};
+enum ice_repr_type {
+ ICE_REPR_TYPE_VF,
+ ICE_REPR_TYPE_SF,
+};
+
struct ice_repr {
struct ice_vsi *src_vsi;
- struct ice_vf *vf;
struct net_device *netdev;
struct metadata_dst *dst;
struct ice_esw_br_port *br_port;
struct ice_repr_pcpu_stats __percpu *stats;
u32 id;
u8 parent_mac[ETH_ALEN];
+ enum ice_repr_type type;
+ union {
+ struct ice_vf *vf;
+ struct ice_dynamic_port *sf;
+ };
+ struct {
+ int (*add)(struct ice_repr *repr);
+ void (*rem)(struct ice_repr *repr);
+ int (*ready)(struct ice_repr *repr);
+ } ops;
};
-struct ice_repr *ice_repr_add_vf(struct ice_vf *vf);
-void ice_repr_rem_vf(struct ice_repr *repr);
+struct ice_repr *ice_repr_create_vf(struct ice_vf *vf);
+struct ice_repr *ice_repr_create_sf(struct ice_dynamic_port *sf);
+
+void ice_repr_destroy(struct ice_repr *repr);
void ice_repr_start_tx_queues(struct ice_repr *repr);
void ice_repr_stop_tx_queues(struct ice_repr *repr);
diff --git a/drivers/net/ethernet/intel/ice/ice_sf_eth.c b/drivers/net/ethernet/intel/ice/ice_sf_eth.c
new file mode 100644
index 000000000000..d00389c405c4
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_sf_eth.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024, Intel Corporation. */
+#include "ice.h"
+#include "ice_lib.h"
+#include "ice_txrx.h"
+#include "ice_fltr.h"
+#include "ice_sf_eth.h"
+#include "devlink/devlink_port.h"
+#include "devlink/devlink.h"
+
+static const struct net_device_ops ice_sf_netdev_ops = {
+ .ndo_open = ice_open,
+ .ndo_stop = ice_stop,
+ .ndo_start_xmit = ice_start_xmit,
+ .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
+ .ndo_change_mtu = ice_change_mtu,
+ .ndo_get_stats64 = ice_get_stats64,
+ .ndo_tx_timeout = ice_tx_timeout,
+ .ndo_bpf = ice_xdp,
+ .ndo_xdp_xmit = ice_xdp_xmit,
+ .ndo_xsk_wakeup = ice_xsk_wakeup,
+};
+
+/**
+ * ice_sf_cfg_netdev - Allocate, configure and register a netdev
+ * @dyn_port: subfunction associated with configured netdev
+ * @devlink_port: subfunction devlink port to be linked with netdev
+ *
+ * Return: 0 on success, negative value on failure
+ */
+static int ice_sf_cfg_netdev(struct ice_dynamic_port *dyn_port,
+ struct devlink_port *devlink_port)
+{
+ struct ice_vsi *vsi = dyn_port->vsi;
+ struct ice_netdev_priv *np;
+ struct net_device *netdev;
+ int err;
+
+ netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
+ vsi->alloc_rxq);
+ if (!netdev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(netdev, &vsi->back->pdev->dev);
+ set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
+ vsi->netdev = netdev;
+ np = netdev_priv(netdev);
+ np->vsi = vsi;
+
+ ice_set_netdev_features(netdev);
+
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY |
+ NETDEV_XDP_ACT_RX_SG;
+ netdev->xdp_zc_max_segs = ICE_MAX_BUF_TXD;
+
+ eth_hw_addr_set(netdev, dyn_port->hw_addr);
+ ether_addr_copy(netdev->perm_addr, dyn_port->hw_addr);
+ netdev->netdev_ops = &ice_sf_netdev_ops;
+ SET_NETDEV_DEVLINK_PORT(netdev, devlink_port);
+
+ err = register_netdev(netdev);
+ if (err) {
+ free_netdev(netdev);
+ vsi->netdev = NULL;
+ return -ENOMEM;
+ }
+ set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+
+ return 0;
+}
+
+static void ice_sf_decfg_netdev(struct ice_vsi *vsi)
+{
+ unregister_netdev(vsi->netdev);
+ clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
+}
+
+/**
+ * ice_sf_dev_probe - subfunction driver probe function
+ * @adev: pointer to the auxiliary device
+ * @id: pointer to the auxiliary_device id
+ *
+ * Configure VSI and netdev resources for the subfunction device.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int ice_sf_dev_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct ice_sf_dev *sf_dev = ice_adev_to_sf_dev(adev);
+ struct ice_dynamic_port *dyn_port = sf_dev->dyn_port;
+ struct ice_vsi *vsi = dyn_port->vsi;
+ struct ice_pf *pf = dyn_port->pf;
+ struct device *dev = &adev->dev;
+ struct ice_sf_priv *priv;
+ struct devlink *devlink;
+ int err;
+
+ vsi->type = ICE_VSI_SF;
+ vsi->port_info = pf->hw.port_info;
+ vsi->flags = ICE_VSI_FLAG_INIT;
+
+ priv = ice_allocate_sf(&adev->dev, pf);
+ if (!priv) {
+ dev_err(dev, "Subfunction devlink alloc failed");
+ return -ENOMEM;
+ }
+
+ priv->dev = sf_dev;
+ sf_dev->priv = priv;
+ devlink = priv_to_devlink(priv);
+
+ devl_lock(devlink);
+
+ err = ice_vsi_cfg(vsi);
+ if (err) {
+ dev_err(dev, "Subfunction vsi config failed");
+ goto err_free_devlink;
+ }
+ vsi->sf = dyn_port;
+
+ ice_eswitch_update_repr(&dyn_port->repr_id, vsi);
+
+ err = ice_devlink_create_sf_dev_port(sf_dev);
+ if (err) {
+ dev_err(dev, "Cannot add ice virtual devlink port for subfunction");
+ goto err_vsi_decfg;
+ }
+
+ err = ice_sf_cfg_netdev(dyn_port, &sf_dev->priv->devlink_port);
+ if (err) {
+ dev_err(dev, "Subfunction netdev config failed");
+ goto err_devlink_destroy;
+ }
+
+ err = devl_port_fn_devlink_set(&dyn_port->devlink_port, devlink);
+ if (err) {
+ dev_err(dev, "Can't link devlink instance to SF devlink port");
+ goto err_netdev_decfg;
+ }
+
+ ice_napi_add(vsi);
+
+ devl_register(devlink);
+ devl_unlock(devlink);
+
+ dyn_port->attached = true;
+
+ return 0;
+
+err_netdev_decfg:
+ ice_sf_decfg_netdev(vsi);
+err_devlink_destroy:
+ ice_devlink_destroy_sf_dev_port(sf_dev);
+err_vsi_decfg:
+ ice_vsi_decfg(vsi);
+err_free_devlink:
+ devl_unlock(devlink);
+ devlink_free(devlink);
+ return err;
+}
+
+/**
+ * ice_sf_dev_remove - subfunction driver remove function
+ * @adev: pointer to the auxiliary device
+ *
+ * Deinitalize VSI and netdev resources for the subfunction device.
+ */
+static void ice_sf_dev_remove(struct auxiliary_device *adev)
+{
+ struct ice_sf_dev *sf_dev = ice_adev_to_sf_dev(adev);
+ struct ice_dynamic_port *dyn_port = sf_dev->dyn_port;
+ struct ice_vsi *vsi = dyn_port->vsi;
+ struct devlink *devlink;
+
+ devlink = priv_to_devlink(sf_dev->priv);
+ devl_lock(devlink);
+
+ ice_vsi_close(vsi);
+
+ ice_sf_decfg_netdev(vsi);
+ ice_devlink_destroy_sf_dev_port(sf_dev);
+ devl_unregister(devlink);
+ devl_unlock(devlink);
+ devlink_free(devlink);
+ ice_vsi_decfg(vsi);
+
+ dyn_port->attached = false;
+}
+
+static const struct auxiliary_device_id ice_sf_dev_id_table[] = {
+ { .name = "ice.sf", },
+ { },
+};
+
+MODULE_DEVICE_TABLE(auxiliary, ice_sf_dev_id_table);
+
+static struct auxiliary_driver ice_sf_driver = {
+ .name = "sf",
+ .probe = ice_sf_dev_probe,
+ .remove = ice_sf_dev_remove,
+ .id_table = ice_sf_dev_id_table
+};
+
+static DEFINE_XARRAY_ALLOC1(ice_sf_aux_id);
+
+/**
+ * ice_sf_driver_register - Register new auxiliary subfunction driver
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_sf_driver_register(void)
+{
+ return auxiliary_driver_register(&ice_sf_driver);
+}
+
+/**
+ * ice_sf_driver_unregister - Unregister new auxiliary subfunction driver
+ *
+ */
+void ice_sf_driver_unregister(void)
+{
+ auxiliary_driver_unregister(&ice_sf_driver);
+}
+
+/**
+ * ice_sf_dev_release - Release device associated with auxiliary device
+ * @device: pointer to the device
+ *
+ * Since most of the code for subfunction deactivation is handled in
+ * the remove handler, here just free tracking resources.
+ */
+static void ice_sf_dev_release(struct device *device)
+{
+ struct auxiliary_device *adev = to_auxiliary_dev(device);
+ struct ice_sf_dev *sf_dev = ice_adev_to_sf_dev(adev);
+
+ xa_erase(&ice_sf_aux_id, adev->id);
+ kfree(sf_dev);
+}
+
+/**
+ * ice_sf_eth_activate - Activate Ethernet subfunction port
+ * @dyn_port: the dynamic port instance for this subfunction
+ * @extack: extack for reporting error messages
+ *
+ * Activate the dynamic port as an Ethernet subfunction. Setup the netdev
+ * resources associated and initialize the auxiliary device.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int
+ice_sf_eth_activate(struct ice_dynamic_port *dyn_port,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = dyn_port->pf;
+ struct ice_sf_dev *sf_dev;
+ struct pci_dev *pdev;
+ int err;
+ u32 id;
+
+ err = xa_alloc(&ice_sf_aux_id, &id, NULL, xa_limit_32b,
+ GFP_KERNEL);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Could not allocate SF ID");
+ return err;
+ }
+
+ sf_dev = kzalloc(sizeof(*sf_dev), GFP_KERNEL);
+ if (!sf_dev) {
+ err = -ENOMEM;
+ NL_SET_ERR_MSG_MOD(extack, "Could not allocate SF memory");
+ goto xa_erase;
+ }
+ pdev = pf->pdev;
+
+ sf_dev->dyn_port = dyn_port;
+ sf_dev->adev.id = id;
+ sf_dev->adev.name = "sf";
+ sf_dev->adev.dev.release = ice_sf_dev_release;
+ sf_dev->adev.dev.parent = &pdev->dev;
+
+ err = auxiliary_device_init(&sf_dev->adev);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to initialize SF device");
+ goto sf_dev_free;
+ }
+
+ err = auxiliary_device_add(&sf_dev->adev);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to add SF device");
+ goto aux_dev_uninit;
+ }
+
+ dyn_port->sf_dev = sf_dev;
+
+ return 0;
+
+aux_dev_uninit:
+ auxiliary_device_uninit(&sf_dev->adev);
+sf_dev_free:
+ kfree(sf_dev);
+xa_erase:
+ xa_erase(&ice_sf_aux_id, id);
+
+ return err;
+}
+
+/**
+ * ice_sf_eth_deactivate - Deactivate Ethernet subfunction port
+ * @dyn_port: the dynamic port instance for this subfunction
+ *
+ * Deactivate the Ethernet subfunction, removing its auxiliary device and the
+ * associated resources.
+ */
+void ice_sf_eth_deactivate(struct ice_dynamic_port *dyn_port)
+{
+ struct ice_sf_dev *sf_dev = dyn_port->sf_dev;
+
+ auxiliary_device_delete(&sf_dev->adev);
+ auxiliary_device_uninit(&sf_dev->adev);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_sf_eth.h b/drivers/net/ethernet/intel/ice/ice_sf_eth.h
new file mode 100644
index 000000000000..c558cad0a183
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_sf_eth.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024, Intel Corporation. */
+
+#ifndef _ICE_SF_ETH_H_
+#define _ICE_SF_ETH_H_
+
+#include <linux/auxiliary_bus.h>
+#include "ice.h"
+
+struct ice_sf_dev {
+ struct auxiliary_device adev;
+ struct ice_dynamic_port *dyn_port;
+ struct ice_sf_priv *priv;
+};
+
+struct ice_sf_priv {
+ struct ice_sf_dev *dev;
+ struct devlink_port devlink_port;
+};
+
+static inline struct
+ice_sf_dev *ice_adev_to_sf_dev(struct auxiliary_device *adev)
+{
+ return container_of(adev, struct ice_sf_dev, adev);
+}
+
+int ice_sf_driver_register(void);
+void ice_sf_driver_unregister(void);
+
+int ice_sf_eth_activate(struct ice_dynamic_port *dyn_port,
+ struct netlink_ext_ack *extack);
+void ice_sf_eth_deactivate(struct ice_dynamic_port *dyn_port);
+#endif /* _ICE_SF_ETH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.c
new file mode 100644
index 000000000000..3d7e96721cf9
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023, Intel Corporation. */
+
+#include "ice_vsi_vlan_ops.h"
+#include "ice_vsi_vlan_lib.h"
+#include "ice_vlan_mode.h"
+#include "ice.h"
+#include "ice_sf_vsi_vlan_ops.h"
+
+void ice_sf_vsi_init_vlan_ops(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops;
+
+ if (ice_is_dvm_ena(&vsi->back->hw))
+ vlan_ops = &vsi->outer_vlan_ops;
+ else
+ vlan_ops = &vsi->inner_vlan_ops;
+
+ vlan_ops->add_vlan = ice_vsi_add_vlan;
+ vlan_ops->del_vlan = ice_vsi_del_vlan;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.h b/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.h
new file mode 100644
index 000000000000..8c44eafceea0
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2023, Intel Corporation. */
+
+#ifndef _ICE_SF_VSI_VLAN_OPS_H_
+#define _ICE_SF_VSI_VLAN_OPS_H_
+
+#include "ice_vsi_vlan_ops.h"
+
+struct ice_vsi;
+
+void ice_sf_vsi_init_vlan_ops(struct ice_vsi *vsi);
+
+#endif /* _ICE_SF_VSI_VLAN_OPS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 55ef33208456..e34fe2516ccc 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -175,7 +175,7 @@ void ice_free_vfs(struct ice_pf *pf)
ice_for_each_vf(pf, bkt, vf) {
mutex_lock(&vf->cfg_lock);
- ice_eswitch_detach(pf, vf);
+ ice_eswitch_detach_vf(pf, vf);
ice_dis_vf_qs(vf);
if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
@@ -598,7 +598,7 @@ static int ice_start_vfs(struct ice_pf *pf)
goto teardown;
}
- retval = ice_eswitch_attach(pf, vf);
+ retval = ice_eswitch_attach_vf(pf, vf);
if (retval) {
dev_err(ice_pf_to_dev(pf), "Failed to attach VF %d to eswitch, error %d",
vf->vf_id, retval);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index c9bc3f1add5d..8208055d6e7f 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -2368,7 +2368,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
ICE_TXD_CTX_QW1_CMD_S);
ice_tstamp(tx_ring, skb, first, &offload);
- if (ice_is_switchdev_running(vsi->back))
+ if (ice_is_switchdev_running(vsi->back) && vsi->type != ICE_VSI_SF)
ice_eswitch_set_target_vsi(skb, &offload);
if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index b9e443232335..45768796691f 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -159,6 +159,7 @@ enum ice_vsi_type {
ICE_VSI_CTRL = 3, /* equates to ICE_VSI_PF with 1 queue pair */
ICE_VSI_CHNL = 4,
ICE_VSI_LB = 6,
+ ICE_VSI_SF = 9,
};
struct ice_link_status {
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 5635e9da2212..a69e91f88d81 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -766,7 +766,7 @@ void ice_reset_all_vfs(struct ice_pf *pf)
ice_for_each_vf(pf, bkt, vf) {
mutex_lock(&vf->cfg_lock);
- ice_eswitch_detach(pf, vf);
+ ice_eswitch_detach_vf(pf, vf);
vf->driver_caps = 0;
ice_vc_set_default_allowlist(vf);
@@ -782,7 +782,7 @@ void ice_reset_all_vfs(struct ice_pf *pf)
ice_vf_rebuild_vsi(vf);
ice_vf_post_vsi_rebuild(vf);
- ice_eswitch_attach(pf, vf);
+ ice_eswitch_attach_vf(pf, vf);
mutex_unlock(&vf->cfg_lock);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c
index 7aae7fdcfcdb..8c7a9b41fb63 100644
--- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c
@@ -3,6 +3,7 @@
#include "ice_pf_vsi_vlan_ops.h"
#include "ice_vf_vsi_vlan_ops.h"
+#include "ice_sf_vsi_vlan_ops.h"
#include "ice_lib.h"
#include "ice.h"
@@ -77,6 +78,9 @@ void ice_vsi_init_vlan_ops(struct ice_vsi *vsi)
case ICE_VSI_VF:
ice_vf_vsi_init_vlan_ops(vsi);
break;
+ case ICE_VSI_SF:
+ ice_sf_vsi_init_vlan_ops(vsi);
+ break;
default:
dev_dbg(ice_pf_to_dev(vsi->back), "%s does not support VLAN operations\n",
ice_vsi_type_str(vsi->type));
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 5dee829bfc47..334ae945d640 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -289,7 +289,7 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
{
int err;
- if (vsi->type != ICE_VSI_PF)
+ if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_SF)
return -EINVAL;
if (qid >= vsi->netdev->real_num_rx_queues ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 685335832a93..ea6070180c96 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -172,6 +172,16 @@ config MLX5_SW_STEERING
help
Build support for software-managed steering in the NIC.
+config MLX5_HW_STEERING
+ bool "Mellanox Technologies hardware-managed steering"
+ depends on MLX5_CORE_EN && MLX5_ESWITCH
+ default y
+ help
+ Build support for Hardware-Managed Flow Steering (HMFS) in the NIC.
+ HMFS is a new approach to managing steering rules where STEs are
+ written to ICM by HW (as opposed to SW in software-managed steering),
+ which allows higher rate of rule insertion.
+
config MLX5_SF
bool "Mellanox Technologies subfunction device support using auxiliary device"
depends on MLX5_CORE && MLX5_CORE_EN
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 1289475e7be7..5912f7e614f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -119,6 +119,27 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o
steering/dr_action.o steering/fs_dr.o \
steering/dr_definer.o steering/dr_ptrn.o \
steering/dr_arg.o steering/dr_dbg.o lib/smfs.o
+
+#
+# HW Steering
+#
+mlx5_core-$(CONFIG_MLX5_HW_STEERING) += steering/hws/mlx5hws_cmd.o \
+ steering/hws/mlx5hws_context.o \
+ steering/hws/mlx5hws_pat_arg.o \
+ steering/hws/mlx5hws_buddy.o \
+ steering/hws/mlx5hws_pool.o \
+ steering/hws/mlx5hws_table.o \
+ steering/hws/mlx5hws_action.o \
+ steering/hws/mlx5hws_rule.o \
+ steering/hws/mlx5hws_matcher.o \
+ steering/hws/mlx5hws_send.o \
+ steering/hws/mlx5hws_definer.o \
+ steering/hws/mlx5hws_bwc.o \
+ steering/hws/mlx5hws_debug.o \
+ steering/hws/mlx5hws_vport.o \
+ steering/hws/mlx5hws_bwc_complex.o
+
+
#
# SF device
#
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 78eb6b7097e1..6201647d6156 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -110,7 +110,9 @@ enum fs_flow_table_type {
FS_FT_RDMA_RX = 0X7,
FS_FT_RDMA_TX = 0X8,
FS_FT_PORT_SEL = 0X9,
- FS_FT_MAX_TYPE = FS_FT_PORT_SEL,
+ FS_FT_FDB_RX = 0xa,
+ FS_FT_FDB_TX = 0xb,
+ FS_FT_MAX_TYPE = FS_FT_FDB_TX,
};
enum fs_flow_table_op_mod {
@@ -368,7 +370,9 @@ struct mlx5_flow_root_namespace *find_root(struct fs_node *node);
(type == FS_FT_RDMA_RX) ? MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) : \
(type == FS_FT_RDMA_TX) ? MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) : \
(type == FS_FT_PORT_SEL) ? MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) : \
- (BUILD_BUG_ON_ZERO(FS_FT_PORT_SEL != FS_FT_MAX_TYPE))\
+ (type == FS_FT_FDB_RX) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \
+ (type == FS_FT_FDB_TX) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \
+ (BUILD_BUG_ON_ZERO(FS_FT_FDB_TX != FS_FT_MAX_TYPE))\
)
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index 8c2a34a0d6be..baefb9a3fa05 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -251,9 +251,9 @@ int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
output->level = MLX5_GET(query_flow_table_out, out, flow_table_context.level);
output->sw_owner_icm_root_1 = MLX5_GET64(query_flow_table_out, out,
- flow_table_context.sw_owner_icm_root_1);
+ flow_table_context.sws.sw_owner_icm_root_1);
output->sw_owner_icm_root_0 = MLX5_GET64(query_flow_table_out, out,
- flow_table_context.sw_owner_icm_root_0);
+ flow_table_context.sws.sw_owner_icm_root_0);
return 0;
}
@@ -480,15 +480,15 @@ int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
*/
if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_RX) {
MLX5_SET64(flow_table_context, ft_mdev,
- sw_owner_icm_root_0, attr->icm_addr_rx);
+ sws.sw_owner_icm_root_0, attr->icm_addr_rx);
} else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_TX) {
MLX5_SET64(flow_table_context, ft_mdev,
- sw_owner_icm_root_0, attr->icm_addr_tx);
+ sws.sw_owner_icm_root_0, attr->icm_addr_tx);
} else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB) {
MLX5_SET64(flow_table_context, ft_mdev,
- sw_owner_icm_root_0, attr->icm_addr_rx);
+ sws.sw_owner_icm_root_0, attr->icm_addr_rx);
MLX5_SET64(flow_table_context, ft_mdev,
- sw_owner_icm_root_1, attr->icm_addr_tx);
+ sws.sw_owner_icm_root_1, attr->icm_addr_tx);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/Makefile
new file mode 100644
index 000000000000..c78512eed8d7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+subdir-ccflags-y += -I$(src)/..
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
new file mode 100644
index 000000000000..9a85d4e12f77
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
@@ -0,0 +1,954 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_H_
+#define MLX5HWS_H_
+
+struct mlx5hws_context;
+struct mlx5hws_table;
+struct mlx5hws_matcher;
+struct mlx5hws_rule;
+
+enum mlx5hws_table_type {
+ MLX5HWS_TABLE_TYPE_FDB,
+ MLX5HWS_TABLE_TYPE_MAX,
+};
+
+enum mlx5hws_matcher_resource_mode {
+ /* Allocate resources based on number of rules with minimal failure probability */
+ MLX5HWS_MATCHER_RESOURCE_MODE_RULE,
+ /* Allocate fixed size hash table based on given column and rows */
+ MLX5HWS_MATCHER_RESOURCE_MODE_HTABLE,
+};
+
+enum mlx5hws_action_type {
+ MLX5HWS_ACTION_TYP_LAST,
+ MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2,
+ MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
+ MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2,
+ MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3,
+ MLX5HWS_ACTION_TYP_DROP,
+ MLX5HWS_ACTION_TYP_MISS,
+ MLX5HWS_ACTION_TYP_TBL,
+ MLX5HWS_ACTION_TYP_CTR,
+ MLX5HWS_ACTION_TYP_TAG,
+ MLX5HWS_ACTION_TYP_MODIFY_HDR,
+ MLX5HWS_ACTION_TYP_VPORT,
+ MLX5HWS_ACTION_TYP_POP_VLAN,
+ MLX5HWS_ACTION_TYP_PUSH_VLAN,
+ MLX5HWS_ACTION_TYP_ASO_METER,
+ MLX5HWS_ACTION_TYP_INSERT_HEADER,
+ MLX5HWS_ACTION_TYP_REMOVE_HEADER,
+ MLX5HWS_ACTION_TYP_RANGE,
+ MLX5HWS_ACTION_TYP_SAMPLER,
+ MLX5HWS_ACTION_TYP_DEST_ARRAY,
+ MLX5HWS_ACTION_TYP_MAX,
+};
+
+enum mlx5hws_action_flags {
+ MLX5HWS_ACTION_FLAG_HWS_FDB = 1 << 0,
+ /* Shared action can be used over a few threads, since the
+ * data is written only once at the creation of the action.
+ */
+ MLX5HWS_ACTION_FLAG_SHARED = 1 << 1,
+};
+
+enum mlx5hws_action_aso_meter_color {
+ MLX5HWS_ACTION_ASO_METER_COLOR_RED = 0x0,
+ MLX5HWS_ACTION_ASO_METER_COLOR_YELLOW = 0x1,
+ MLX5HWS_ACTION_ASO_METER_COLOR_GREEN = 0x2,
+ MLX5HWS_ACTION_ASO_METER_COLOR_UNDEFINED = 0x3,
+};
+
+enum mlx5hws_send_queue_actions {
+ /* Start executing all pending queued rules */
+ MLX5HWS_SEND_QUEUE_ACTION_DRAIN_ASYNC = 1 << 0,
+ /* Start executing all pending queued rules wait till completion */
+ MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC = 1 << 1,
+};
+
+struct mlx5hws_context_attr {
+ u16 queues;
+ u16 queue_size;
+ bool bwc; /* add support for backward compatible API*/
+};
+
+struct mlx5hws_table_attr {
+ enum mlx5hws_table_type type;
+ u32 level;
+};
+
+enum mlx5hws_matcher_flow_src {
+ MLX5HWS_MATCHER_FLOW_SRC_ANY = 0x0,
+ MLX5HWS_MATCHER_FLOW_SRC_WIRE = 0x1,
+ MLX5HWS_MATCHER_FLOW_SRC_VPORT = 0x2,
+};
+
+enum mlx5hws_matcher_insert_mode {
+ MLX5HWS_MATCHER_INSERT_BY_HASH = 0x0,
+ MLX5HWS_MATCHER_INSERT_BY_INDEX = 0x1,
+};
+
+enum mlx5hws_matcher_distribute_mode {
+ MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH = 0x0,
+ MLX5HWS_MATCHER_DISTRIBUTE_BY_LINEAR = 0x1,
+};
+
+struct mlx5hws_matcher_attr {
+ /* Processing priority inside table */
+ u32 priority;
+ /* Provide all rules with unique rule_idx in num_log range to reduce locking */
+ bool optimize_using_rule_idx;
+ /* Resource mode and corresponding size */
+ enum mlx5hws_matcher_resource_mode mode;
+ /* Optimize insertion in case packet origin is the same for all rules */
+ enum mlx5hws_matcher_flow_src optimize_flow_src;
+ /* Define the insertion and distribution modes for this matcher */
+ enum mlx5hws_matcher_insert_mode insert_mode;
+ enum mlx5hws_matcher_distribute_mode distribute_mode;
+ /* Define whether the created matcher supports resizing into a bigger matcher */
+ bool resizable;
+ union {
+ struct {
+ u8 sz_row_log;
+ u8 sz_col_log;
+ } table;
+
+ struct {
+ u8 num_log;
+ } rule;
+ };
+ /* Optional AT attach configuration - Max number of additional AT */
+ u8 max_num_of_at_attach;
+};
+
+struct mlx5hws_rule_attr {
+ void *user_data;
+ /* Valid if matcher optimize_using_rule_idx is set or
+ * if matcher is configured to insert rules by index.
+ */
+ u32 rule_idx;
+ u32 flow_source;
+ u16 queue_id;
+ u32 burst:1;
+};
+
+/* In actions that take offset, the offset is unique, pointing to a single
+ * resource and the user should not reuse the same index because data changing
+ * is not atomic.
+ */
+struct mlx5hws_rule_action {
+ struct mlx5hws_action *action;
+ union {
+ struct {
+ u32 value;
+ } tag;
+
+ struct {
+ u32 offset;
+ } counter;
+
+ struct {
+ u32 offset;
+ u8 *data;
+ } modify_header;
+
+ struct {
+ u32 offset;
+ u8 hdr_idx;
+ u8 *data;
+ } reformat;
+
+ struct {
+ __be32 vlan_hdr;
+ } push_vlan;
+
+ struct {
+ u32 offset;
+ enum mlx5hws_action_aso_meter_color init_color;
+ } aso_meter;
+ };
+};
+
+struct mlx5hws_action_reformat_header {
+ size_t sz;
+ void *data;
+};
+
+struct mlx5hws_action_insert_header {
+ struct mlx5hws_action_reformat_header hdr;
+ /* PRM start anchor to which header will be inserted */
+ u8 anchor;
+ /* Header insertion offset in bytes, from the start
+ * anchor to the location where new header will be inserted.
+ */
+ u8 offset;
+ /* Indicates this header insertion adds encapsulation header to the packet,
+ * requiring device to update offloaded fields (for example IPv4 total length).
+ */
+ bool encap;
+};
+
+struct mlx5hws_action_remove_header_attr {
+ /* PRM start anchor from which header will be removed */
+ u8 anchor;
+ /* Header remove offset in bytes, from the start
+ * anchor to the location where remove header starts.
+ */
+ u8 offset;
+ /* Indicates the removed header size in bytes */
+ size_t size;
+};
+
+struct mlx5hws_action_mh_pattern {
+ /* Byte size of modify actions provided by "data" */
+ size_t sz;
+ /* PRM format modify actions pattern */
+ __be64 *data;
+};
+
+struct mlx5hws_action_dest_attr {
+ /* Required destination action to forward the packet */
+ struct mlx5hws_action *dest;
+ /* Optional reformat action */
+ struct mlx5hws_action *reformat;
+};
+
+/* Check whether HWS is supported
+ *
+ * @param[in] mdev
+ * The device to check.
+ * @return true if supported, false otherwise.
+ */
+static inline bool mlx5hws_is_supported(struct mlx5_core_dev *mdev)
+{
+ u8 ignore_flow_level_rtc_valid;
+ u8 wqe_based_flow_table_update;
+
+ wqe_based_flow_table_update =
+ MLX5_CAP_GEN(mdev, wqe_based_flow_table_update_cap);
+ ignore_flow_level_rtc_valid =
+ MLX5_CAP_FLOWTABLE(mdev,
+ flow_table_properties_nic_receive.ignore_flow_level_rtc_valid);
+
+ return wqe_based_flow_table_update && ignore_flow_level_rtc_valid;
+}
+
+/* Open a context used for direct rule insertion using hardware steering.
+ * Each context can contain multiple tables of different types.
+ *
+ * @param[in] mdev
+ * The device to be used for HWS.
+ * @param[in] attr
+ * Attributes used for context open.
+ * @return pointer to mlx5hws_context on success NULL otherwise.
+ */
+struct mlx5hws_context *
+mlx5hws_context_open(struct mlx5_core_dev *mdev,
+ struct mlx5hws_context_attr *attr);
+
+/* Close a context used for direct hardware steering.
+ *
+ * @param[in] ctx
+ * mlx5hws context to close.
+ * @return zero on success non zero otherwise.
+ */
+int mlx5hws_context_close(struct mlx5hws_context *ctx);
+
+/* Set a peer context, each context can have multiple contexts as peers.
+ *
+ * @param[in] ctx
+ * The context in which the peer_ctx will be peered to it.
+ * @param[in] peer_ctx
+ * The peer context.
+ * @param[in] peer_vhca_id
+ * The peer context vhca id.
+ */
+void mlx5hws_context_set_peer(struct mlx5hws_context *ctx,
+ struct mlx5hws_context *peer_ctx,
+ u16 peer_vhca_id);
+
+/* Create a new direct rule table. Each table can contain multiple matchers.
+ *
+ * @param[in] ctx
+ * The context in which the new table will be opened.
+ * @param[in] attr
+ * Attributes used for table creation.
+ * @return pointer to mlx5hws_table on success NULL otherwise.
+ */
+struct mlx5hws_table *
+mlx5hws_table_create(struct mlx5hws_context *ctx,
+ struct mlx5hws_table_attr *attr);
+
+/* Destroy direct rule table.
+ *
+ * @param[in] tbl
+ * Table to destroy.
+ * @return zero on success non zero otherwise.
+ */
+int mlx5hws_table_destroy(struct mlx5hws_table *tbl);
+
+/* Get ID of the flow table.
+ *
+ * @param[in] tbl
+ * Table to get ID of.
+ * @return ID of the table.
+ */
+u32 mlx5hws_table_get_id(struct mlx5hws_table *tbl);
+
+/* Set default miss table for mlx5hws_table by using another mlx5hws_table
+ * Traffic which all table matchers miss will be forwarded to miss table.
+ *
+ * @param[in] tbl
+ * Source table
+ * @param[in] miss_tbl
+ * Target (miss) table, or NULL to remove current miss table
+ * @return zero on success non zero otherwise.
+ */
+int mlx5hws_table_set_default_miss(struct mlx5hws_table *tbl,
+ struct mlx5hws_table *miss_tbl);
+
+/* Create new match template based on items mask, the match template
+ * will be used for matcher creation.
+ *
+ * @param[in] ctx
+ * The context in which the new template will be created.
+ * @param[in] match_param
+ * Describe the mask based on PRM match parameters
+ * @param[in] match_param_sz
+ * Size of match param buffer
+ * @param[in] match_criteria_enable
+ * Bitmap for each sub-set in match_criteria buffer
+ * @return pointer to mlx5hws_match_template on success NULL otherwise
+ */
+struct mlx5hws_match_template *
+mlx5hws_match_template_create(struct mlx5hws_context *ctx,
+ u32 *match_param,
+ u32 match_param_sz,
+ u8 match_criteria_enable);
+
+/* Destroy match template.
+ *
+ * @param[in] mt
+ * Match template to destroy.
+ * @return zero on success non zero otherwise.
+ */
+int mlx5hws_match_template_destroy(struct mlx5hws_match_template *mt);
+
+/* Create new action template based on action_type array, the action template
+ * will be used for matcher creation.
+ *
+ * @param[in] action_type
+ * An array of actions based on the order of actions which will be provided
+ * with rule_actions to mlx5hws_rule_create. The last action is marked
+ * using MLX5HWS_ACTION_TYP_LAST.
+ * @return pointer to mlx5hws_action_template on success NULL otherwise
+ */
+struct mlx5hws_action_template *
+mlx5hws_action_template_create(enum mlx5hws_action_type action_type[]);
+
+/* Destroy action template.
+ *
+ * @param[in] at
+ * Action template to destroy.
+ * @return zero on success non zero otherwise.
+ */
+int mlx5hws_action_template_destroy(struct mlx5hws_action_template *at);
+
+/* Create a new direct rule matcher. Each matcher can contain multiple rules.
+ * Matchers on the table will be processed by priority. Matching fields and
+ * mask are described by the match template. In some cases multiple match
+ * templates can be used on the same matcher.
+ *
+ * @param[in] table
+ * The table in which the new matcher will be opened.
+ * @param[in] mt
+ * Array of match templates to be used on matcher.
+ * @param[in] num_of_mt
+ * Number of match templates in mt array.
+ * @param[in] at
+ * Array of action templates to be used on matcher.
+ * @param[in] num_of_at
+ * Number of action templates in mt array.
+ * @param[in] attr
+ * Attributes used for matcher creation.
+ * @return pointer to mlx5hws_matcher on success NULL otherwise.
+ */
+struct mlx5hws_matcher *
+mlx5hws_matcher_create(struct mlx5hws_table *table,
+ struct mlx5hws_match_template *mt[],
+ u8 num_of_mt,
+ struct mlx5hws_action_template *at[],
+ u8 num_of_at,
+ struct mlx5hws_matcher_attr *attr);
+
+/* Destroy direct rule matcher.
+ *
+ * @param[in] matcher
+ * Matcher to destroy.
+ * @return zero on success non zero otherwise.
+ */
+int mlx5hws_matcher_destroy(struct mlx5hws_matcher *matcher);
+
+/* Attach new action template to direct rule matcher.
+ *
+ * @param[in] matcher
+ * Matcher to attach at to.
+ * @param[in] at
+ * Action template to be attached to the matcher.
+ * @return zero on success non zero otherwise.
+ */
+int mlx5hws_matcher_attach_at(struct mlx5hws_matcher *matcher,
+ struct mlx5hws_action_template *at);
+
+/* Link two matchers and enable moving rules from src matcher to dst matcher.
+ * Both matchers must be in the same table type, must be created with 'resizable'
+ * property, and should have the same characteristics (e.g. same mt, same at).
+ *
+ * It is the user's responsibility to make sure that the dst matcher
+ * was allocated with the appropriate size.
+ *
+ * Once the function is completed, the user is:
+ * - allowed to move rules from src into dst matcher
+ * - no longer allowed to insert rules to the src matcher
+ *
+ * The user is always allowed to insert rules to the dst matcher and
+ * to delete rules from any matcher.
+ *
+ * @param[in] src_matcher
+ * source matcher for moving rules from
+ * @param[in] dst_matcher
+ * destination matcher for moving rules to
+ * @return zero on successful move, non zero otherwise.
+ */
+int mlx5hws_matcher_resize_set_target(struct mlx5hws_matcher *src_matcher,
+ struct mlx5hws_matcher *dst_matcher);
+
+/* Enqueue moving rule operation: moving rule from src matcher to a dst matcher
+ *
+ * @param[in] src_matcher
+ * matcher that the rule belongs to
+ * @param[in] rule
+ * the rule to move
+ * @param[in] attr
+ * rule attributes
+ * @return zero on success, non zero otherwise.
+ */
+int mlx5hws_matcher_resize_rule_move(struct mlx5hws_matcher *src_matcher,
+ struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr);
+
+/* Enqueue create rule operation.
+ *
+ * @param[in] matcher
+ * The matcher in which the new rule will be created.
+ * @param[in] mt_idx
+ * Match template index to create the match with.
+ * @param[in] match_param
+ * The match parameter PRM buffer used for the value matching.
+ * @param[in] rule_actions
+ * Rule action to be executed on match.
+ * @param[in] at_idx
+ * Action template index to apply the actions with.
+ * @param[in] num_of_actions
+ * Number of rule actions.
+ * @param[in] attr
+ * Rule creation attributes.
+ * @param[in, out] rule_handle
+ * A valid rule handle. The handle doesn't require any initialization.
+ * @return zero on successful enqueue non zero otherwise.
+ */
+int mlx5hws_rule_create(struct mlx5hws_matcher *matcher,
+ u8 mt_idx,
+ u32 *match_param,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[],
+ struct mlx5hws_rule_attr *attr,
+ struct mlx5hws_rule *rule_handle);
+
+/* Enqueue destroy rule operation.
+ *
+ * @param[in] rule
+ * The rule destruction to enqueue.
+ * @param[in] attr
+ * Rule destruction attributes.
+ * @return zero on successful enqueue non zero otherwise.
+ */
+int mlx5hws_rule_destroy(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr);
+
+/* Enqueue update actions on an existing rule.
+ *
+ * @param[in, out] rule_handle
+ * A valid rule handle to update.
+ * @param[in] at_idx
+ * Action template index to update the actions with.
+ * @param[in] rule_actions
+ * Rule action to be executed on match.
+ * @param[in] attr
+ * Rule update attributes.
+ * @return zero on successful enqueue non zero otherwise.
+ */
+int mlx5hws_rule_action_update(struct mlx5hws_rule *rule,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[],
+ struct mlx5hws_rule_attr *attr);
+
+/* Get action type.
+ *
+ * @param[in] action
+ * The action to get the type of.
+ * @return action type.
+ */
+enum mlx5hws_action_type
+mlx5hws_action_get_type(struct mlx5hws_action *action);
+
+/* Create direct rule drop action.
+ *
+ * @param[in] ctx
+ * The context in which the new action will be created.
+ * @param[in] flags
+ * Action creation flags. (enum mlx5hws_action_flags)
+ * @return pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_dest_drop(struct mlx5hws_context *ctx,
+ u32 flags);
+
+/* Create direct rule default miss action.
+ * Defaults are RX: Drop TX: Wire.
+ *
+ * @param[in] ctx
+ * The context in which the new action will be created.
+ * @param[in] flags
+ * Action creation flags. (enum mlx5hws_action_flags)
+ * @return pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_default_miss(struct mlx5hws_context *ctx,
+ u32 flags);
+
+/* Create direct rule goto table action.
+ *
+ * @param[in] ctx
+ * The context in which the new action will be created.
+ * @param[in] tbl
+ * Destination table.
+ * @param[in] flags
+ * Action creation flags. (enum mlx5hws_action_flags)
+ * @return pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_dest_table(struct mlx5hws_context *ctx,
+ struct mlx5hws_table *tbl,
+ u32 flags);
+
+/* Create direct rule goto table number action.
+ *
+ * @param[in] ctx
+ * The context in which the new action will be created.
+ * @param[in] tbl_num
+ * Destination table number.
+ * @param[in] flags
+ * Action creation flags. (enum mlx5hws_action_flags)
+ * @return pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_dest_table_num(struct mlx5hws_context *ctx,
+ u32 table_num, u32 flags);
+
+/* Create direct rule range match action.
+ *
+ * @param[in] ctx
+ * The context in which the new action will be created.
+ * @param[in] field
+ * Field to comapare the value.
+ * @param[in] hit_ft
+ * Flow table to go to on hit.
+ * @param[in] miss_ft
+ * Flow table to go to on miss.
+ * @param[in] min
+ * Minimal value of the field to be considered as hit.
+ * @param[in] max
+ * Maximal value of the field to be considered as hit.
+ * @param[in] flags
+ * Action creation flags. (enum mlx5hws_action_flags)
+ * @return pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_dest_match_range(struct mlx5hws_context *ctx,
+ u32 field,
+ struct mlx5_flow_table *hit_ft,
+ struct mlx5_flow_table *miss_ft,
+ u32 min, u32 max, u32 flags);
+
+/* Create direct rule flow sampler action.
+ *
+ * @param[in] ctx
+ * The context in which the new action will be created.
+ * @param[in] sampler_id
+ * Flow sampler object ID.
+ * @param[in] flags
+ * Action creation flags. (enum mlx5hws_action_flags)
+ * @return pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_flow_sampler(struct mlx5hws_context *ctx,
+ u32 sampler_id, u32 flags);
+
+/* Create direct rule goto vport action.
+ *
+ * @param[in] ctx
+ * The context in which the new action will be created.
+ * @param[in] vport_num
+ * Destination vport number.
+ * @param[in] vhca_id_valid
+ * Tells if the vhca_id parameter is valid.
+ * @param[in] vhca_id
+ * VHCA ID of the destination vport.
+ * @param[in] flags
+ * Action creation flags. (enum mlx5hws_action_flags)
+ * @return pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_dest_vport(struct mlx5hws_context *ctx,
+ u16 vport_num,
+ bool vhca_id_valid,
+ u16 vhca_id,
+ u32 flags);
+
+/* Create direct rule TAG action.
+ *
+ * @param[in] ctx
+ * The context in which the new action will be created.
+ * @param[in] flags
+ * Action creation flags. (enum mlx5hws_action_flags)
+ * @return pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_tag(struct mlx5hws_context *ctx,
+ u32 flags);
+
+/* Create direct rule counter action.
+ *
+ * @param[in] ctx
+ * The context in which the new action will be created.
+ * @param[in] obj_id
+ * Direct rule counter object ID.
+ * @param[in] flags
+ * Action creation flags. (enum mlx5hws_action_flags)
+ * @return pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_counter(struct mlx5hws_context *ctx,
+ u32 obj_id,
+ u32 flags);
+
+/* Create direct rule reformat action.
+ *
+ * @param[in] ctx
+ * The context in which the new action will be created.
+ * @param[in] reformat_type
+ * Type of reformat prefixed with MLX5HWS_ACTION_TYP_REFORMAT.
+ * @param[in] num_of_hdrs
+ * Number of provided headers in "hdrs" array.
+ * @param[in] hdrs
+ * Headers array containing header information.
+ * @param[in] log_bulk_size
+ * Number of unique values used with this reformat.
+ * @param[in] flags
+ * Action creation flags. (enum mlx5hws_action_flags)
+ * @return pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_reformat(struct mlx5hws_context *ctx,
+ enum mlx5hws_action_type reformat_type,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_reformat_header *hdrs,
+ u32 log_bulk_size,
+ u32 flags);
+
+/* Create direct rule modify header action.
+ *
+ * @param[in] ctx
+ * The context in which the new action will be created.
+ * @param[in] num_of_patterns
+ * Number of provided patterns in "patterns" array.
+ * @param[in] patterns
+ * Patterns array containing pattern information.
+ * @param[in] log_bulk_size
+ * Number of unique values used with this pattern.
+ * @param[in] flags
+ * Action creation flags. (enum mlx5hws_action_flags)
+ * @return pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_modify_header(struct mlx5hws_context *ctx,
+ u8 num_of_patterns,
+ struct mlx5hws_action_mh_pattern *patterns,
+ u32 log_bulk_size,
+ u32 flags);
+
+/* Create direct rule ASO flow meter action.
+ *
+ * @param[in] ctx
+ * The context in which the new action will be created.
+ * @param[in] obj_id
+ * ASO object ID.
+ * @param[in] return_reg_c
+ * Copy the ASO object value into this reg_c, after a packet hits a rule with this ASO object.
+ * @param[in] flags
+ * Action creation flags. (enum mlx5hws_action_flags)
+ * @return pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_aso_meter(struct mlx5hws_context *ctx,
+ u32 obj_id,
+ u8 return_reg_c,
+ u32 flags);
+
+/* Create direct rule pop vlan action.
+ * @param[in] ctx
+ * The context in which the new action will be created.
+ * @param[in] flags
+ * Action creation flags. (enum mlx5hws_action_flags)
+ * @return pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_pop_vlan(struct mlx5hws_context *ctx, u32 flags);
+
+/* Create direct rule push vlan action.
+ * @param[in] ctx
+ * The context in which the new action will be created.
+ * @param[in] flags
+ * Action creation flags. (enum mlx5hws_action_flags)
+ * @return pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_push_vlan(struct mlx5hws_context *ctx, u32 flags);
+
+/* Create a dest array action, this action can duplicate packets and forward to
+ * multiple destinations in the destination list.
+ * @param[in] ctx
+ * The context in which the new action will be created.
+ * @param[in] num_dest
+ * The number of dests attributes.
+ * @param[in] dests
+ * The destination array. Each contains a destination action and can have
+ * additional actions.
+ * @param[in] ignore_flow_level
+ * Boolean that says whether to turn on 'ignore_flow_level' for this dest.
+ * @param[in] flow_source
+ * Source port of the traffic for this actions.
+ * @param[in] flags
+ * Action creation flags. (enum mlx5hws_action_flags)
+ * @return pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx,
+ size_t num_dest,
+ struct mlx5hws_action_dest_attr *dests,
+ bool ignore_flow_level,
+ u32 flow_source,
+ u32 flags);
+
+/* Create insert header action.
+ *
+ * @param[in] ctx
+ * The context in which the new action will be created.
+ * @param[in] num_of_hdrs
+ * Number of provided headers in "hdrs" array.
+ * @param[in] hdrs
+ * Headers array containing header information.
+ * @param[in] log_bulk_size
+ * Number of unique values used with this insert header.
+ * @param[in] flags
+ * Action creation flags. (enum mlx5hws_action_flags)
+ * @return pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_insert_header(struct mlx5hws_context *ctx,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_insert_header *hdrs,
+ u32 log_bulk_size,
+ u32 flags);
+
+/* Create remove header action.
+ *
+ * @param[in] ctx
+ * The context in which the new action will be created.
+ * @param[in] attr
+ * attributes: specifies the remove header type, PRM start anchor and
+ * the PRM end anchor or the PRM start anchor and remove size in bytes.
+ * @param[in] flags
+ * Action creation flags. (enum mlx5hws_action_flags)
+ * @return pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_remove_header(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_remove_header_attr *attr,
+ u32 flags);
+
+/* Create direct rule LAST action.
+ *
+ * @param[in] ctx
+ * The context in which the new action will be created.
+ * @param[in] flags
+ * Action creation flags. (enum mlx5hws_action_flags)
+ * @return pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_last(struct mlx5hws_context *ctx, u32 flags);
+
+/* Destroy direct rule action.
+ *
+ * @param[in] action
+ * The action to destroy.
+ * @return zero on success non zero otherwise.
+ */
+int mlx5hws_action_destroy(struct mlx5hws_action *action);
+
+enum mlx5hws_flow_op_status {
+ MLX5HWS_FLOW_OP_SUCCESS,
+ MLX5HWS_FLOW_OP_ERROR,
+};
+
+struct mlx5hws_flow_op_result {
+ enum mlx5hws_flow_op_status status;
+ void *user_data;
+};
+
+/* Poll queue for rule creation and deletions completions.
+ *
+ * @param[in] ctx
+ * The context to which the queue belong to.
+ * @param[in] queue_id
+ * The id of the queue to poll.
+ * @param[in, out] res
+ * Completion array.
+ * @param[in] res_nb
+ * Maximum number of results to return.
+ * @return negative number on failure, the number of completions otherwise.
+ */
+int mlx5hws_send_queue_poll(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ struct mlx5hws_flow_op_result res[],
+ u32 res_nb);
+
+/* Perform an action on the queue
+ *
+ * @param[in] ctx
+ * The context to which the queue belong to.
+ * @param[in] queue_id
+ * The id of the queue to perform the action on.
+ * @param[in] actions
+ * Actions to perform on the queue. (enum mlx5hws_send_queue_actions)
+ * @return zero on success non zero otherwise.
+ */
+int mlx5hws_send_queue_action(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ u32 actions);
+
+/* Dump HWS info
+ *
+ * @param[in] ctx
+ * The context which to dump the info from.
+ * @return zero on success non zero otherwise.
+ */
+int mlx5hws_debug_dump(struct mlx5hws_context *ctx);
+
+struct mlx5hws_bwc_matcher;
+struct mlx5hws_bwc_rule;
+
+struct mlx5hws_match_parameters {
+ size_t match_sz;
+ u32 *match_buf; /* Device spec format */
+};
+
+/* Create a new BWC direct rule matcher.
+ * This function does the following:
+ * - creates match template based on flow items
+ * - creates an empty action template
+ * - creates a usual mlx5hws_matcher with these mt and at, setting
+ * its size to minimal
+ * Notes:
+ * - table->ctx must have BWC support
+ * - complex rules are not supported
+ *
+ * @param[in] table
+ * The table in which the new matcher will be opened
+ * @param[in] priority
+ * Priority for this BWC matcher
+ * @param[in] match_criteria_enable
+ * Bitmask that defines matching criteria
+ * @param[in] mask
+ * Match parameters
+ * @return pointer to mlx5hws_bwc_matcher on success or NULL otherwise.
+ */
+struct mlx5hws_bwc_matcher *
+mlx5hws_bwc_matcher_create(struct mlx5hws_table *table,
+ u32 priority,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask);
+
+/* Destroy BWC direct rule matcher.
+ *
+ * @param[in] bwc_matcher
+ * Matcher to destroy
+ * @return zero on success, non zero otherwise
+ */
+int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+/* Create a new BWC rule.
+ * Unlike the usual rule creation function, this one is blocking: when the
+ * function returns, the rule is written to its place (no need to poll).
+ * This function does the following:
+ * - finds matching action template based on the provided rule_actions, or
+ * creates new action template if matching action template doesn't exist
+ * - updates corresponding BWC matcher stats
+ * - if needed, the function performs rehash:
+ * - creates a new matcher based on mt, at, new_sz
+ * - moves all the existing matcher rules to the new matcher
+ * - removes the old matcher
+ * - inserts new rule
+ * - polls till completion is received
+ * Notes:
+ * - matcher->tbl->ctx must have BWC support
+ * - separate BWC ctx queues are used
+ *
+ * @param[in] bwc_matcher
+ * The BWC matcher in which the new rule will be created.
+ * @param[in] params
+ * Match perameters
+ * @param[in] flow_source
+ * Flow source for this rule
+ * @param[in] rule_actions
+ * Rule action to be executed on match
+ * @return valid BWC rule handle on success, NULL otherwise
+ */
+struct mlx5hws_bwc_rule *
+mlx5hws_bwc_rule_create(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_match_parameters *params,
+ u32 flow_source,
+ struct mlx5hws_rule_action rule_actions[]);
+
+/* Destroy BWC direct rule.
+ *
+ * @param[in] bwc_rule
+ * Rule to destroy
+ * @return zero on success, non zero otherwise
+ */
+int mlx5hws_bwc_rule_destroy(struct mlx5hws_bwc_rule *bwc_rule);
+
+/* Update actions on an existing BWC rule.
+ *
+ * @param[in] bwc_rule
+ * Rule to update
+ * @param[in] rule_actions
+ * Rule action to update with
+ * @return zero on successful update, non zero otherwise.
+ */
+int mlx5hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_rule_action rule_actions[]);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.c
new file mode 100644
index 000000000000..b27bb4106532
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.c
@@ -0,0 +1,2604 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+#define MLX5HWS_ACTION_METER_INIT_COLOR_OFFSET 1
+
+/* Header removal size limited to 128B (64 words) */
+#define MLX5HWS_ACTION_REMOVE_HEADER_MAX_SIZE 128
+
+/* This is the longest supported action sequence for FDB table:
+ * DECAP, POP_VLAN, MODIFY, CTR, ASO, PUSH_VLAN, MODIFY, ENCAP, Term.
+ */
+static const u32 action_order_arr[MLX5HWS_TABLE_TYPE_MAX][MLX5HWS_ACTION_TYP_MAX] = {
+ [MLX5HWS_TABLE_TYPE_FDB] = {
+ BIT(MLX5HWS_ACTION_TYP_REMOVE_HEADER) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2),
+ BIT(MLX5HWS_ACTION_TYP_POP_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_POP_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR),
+ BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_INSERT_HEADER) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3),
+ BIT(MLX5HWS_ACTION_TYP_CTR),
+ BIT(MLX5HWS_ACTION_TYP_TAG),
+ BIT(MLX5HWS_ACTION_TYP_ASO_METER),
+ BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR),
+ BIT(MLX5HWS_ACTION_TYP_TBL) |
+ BIT(MLX5HWS_ACTION_TYP_VPORT) |
+ BIT(MLX5HWS_ACTION_TYP_DROP) |
+ BIT(MLX5HWS_ACTION_TYP_SAMPLER) |
+ BIT(MLX5HWS_ACTION_TYP_RANGE) |
+ BIT(MLX5HWS_ACTION_TYP_DEST_ARRAY),
+ BIT(MLX5HWS_ACTION_TYP_LAST),
+ },
+};
+
+static const char * const mlx5hws_action_type_str[] = {
+ [MLX5HWS_ACTION_TYP_LAST] = "LAST",
+ [MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2] = "TNL_L2_TO_L2",
+ [MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2] = "L2_TO_TNL_L2",
+ [MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2] = "TNL_L3_TO_L2",
+ [MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3] = "L2_TO_TNL_L3",
+ [MLX5HWS_ACTION_TYP_DROP] = "DROP",
+ [MLX5HWS_ACTION_TYP_TBL] = "TBL",
+ [MLX5HWS_ACTION_TYP_CTR] = "CTR",
+ [MLX5HWS_ACTION_TYP_TAG] = "TAG",
+ [MLX5HWS_ACTION_TYP_MODIFY_HDR] = "MODIFY_HDR",
+ [MLX5HWS_ACTION_TYP_VPORT] = "VPORT",
+ [MLX5HWS_ACTION_TYP_MISS] = "DEFAULT_MISS",
+ [MLX5HWS_ACTION_TYP_POP_VLAN] = "POP_VLAN",
+ [MLX5HWS_ACTION_TYP_PUSH_VLAN] = "PUSH_VLAN",
+ [MLX5HWS_ACTION_TYP_ASO_METER] = "ASO_METER",
+ [MLX5HWS_ACTION_TYP_DEST_ARRAY] = "DEST_ARRAY",
+ [MLX5HWS_ACTION_TYP_INSERT_HEADER] = "INSERT_HEADER",
+ [MLX5HWS_ACTION_TYP_REMOVE_HEADER] = "REMOVE_HEADER",
+ [MLX5HWS_ACTION_TYP_SAMPLER] = "SAMPLER",
+ [MLX5HWS_ACTION_TYP_RANGE] = "RANGE",
+};
+
+static_assert(ARRAY_SIZE(mlx5hws_action_type_str) == MLX5HWS_ACTION_TYP_MAX,
+ "Missing mlx5hws_action_type_str");
+
+const char *mlx5hws_action_type_to_str(enum mlx5hws_action_type action_type)
+{
+ return mlx5hws_action_type_str[action_type];
+}
+
+enum mlx5hws_action_type mlx5hws_action_get_type(struct mlx5hws_action *action)
+{
+ return action->type;
+}
+
+static int hws_action_get_shared_stc_nic(struct mlx5hws_context *ctx,
+ enum mlx5hws_context_shared_stc_type stc_type,
+ u8 tbl_type)
+{
+ struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+ struct mlx5hws_action_shared_stc *shared_stc;
+ int ret;
+
+ mutex_lock(&ctx->ctrl_lock);
+ if (ctx->common_res[tbl_type].shared_stc[stc_type]) {
+ ctx->common_res[tbl_type].shared_stc[stc_type]->refcount++;
+ mutex_unlock(&ctx->ctrl_lock);
+ return 0;
+ }
+
+ shared_stc = kzalloc(sizeof(*shared_stc), GFP_KERNEL);
+ if (!shared_stc) {
+ ret = -ENOMEM;
+ goto unlock_and_out;
+ }
+ switch (stc_type) {
+ case MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3:
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE;
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+ stc_attr.remove_header.decap = 0;
+ stc_attr.remove_header.start_anchor = MLX5_HEADER_ANCHOR_PACKET_START;
+ stc_attr.remove_header.end_anchor = MLX5_HEADER_ANCHOR_IPV6_IPV4;
+ break;
+ case MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP:
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS;
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+ stc_attr.remove_words.start_anchor = MLX5_HEADER_ANCHOR_FIRST_VLAN_START;
+ stc_attr.remove_words.num_of_words = MLX5HWS_ACTION_HDR_LEN_L2_VLAN;
+ break;
+ default:
+ mlx5hws_err(ctx, "No such stc_type: %d\n", stc_type);
+ pr_warn("HWS: Invalid stc_type: %d\n", stc_type);
+ ret = -EINVAL;
+ goto unlock_and_out;
+ }
+
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
+ &shared_stc->stc_chunk);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate shared decap l2 STC\n");
+ goto free_shared_stc;
+ }
+
+ ctx->common_res[tbl_type].shared_stc[stc_type] = shared_stc;
+ ctx->common_res[tbl_type].shared_stc[stc_type]->refcount = 1;
+
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return 0;
+
+free_shared_stc:
+ kfree(shared_stc);
+unlock_and_out:
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+}
+
+static int hws_action_get_shared_stc(struct mlx5hws_action *action,
+ enum mlx5hws_context_shared_stc_type stc_type)
+{
+ struct mlx5hws_context *ctx = action->ctx;
+ int ret;
+
+ if (stc_type >= MLX5HWS_CONTEXT_SHARED_STC_MAX) {
+ pr_warn("HWS: Invalid shared stc_type: %d\n", stc_type);
+ return -EINVAL;
+ }
+
+ if (unlikely(!(action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB))) {
+ pr_warn("HWS: Invalid action->flags: %d\n", action->flags);
+ return -EINVAL;
+ }
+
+ ret = hws_action_get_shared_stc_nic(ctx, stc_type, MLX5HWS_TABLE_TYPE_FDB);
+ if (ret) {
+ mlx5hws_err(ctx,
+ "Failed to allocate memory for FDB shared STCs (type: %d)\n",
+ stc_type);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hws_action_put_shared_stc(struct mlx5hws_action *action,
+ enum mlx5hws_context_shared_stc_type stc_type)
+{
+ enum mlx5hws_table_type tbl_type = MLX5HWS_TABLE_TYPE_FDB;
+ struct mlx5hws_action_shared_stc *shared_stc;
+ struct mlx5hws_context *ctx = action->ctx;
+
+ if (stc_type >= MLX5HWS_CONTEXT_SHARED_STC_MAX) {
+ pr_warn("HWS: Invalid shared stc_type: %d\n", stc_type);
+ return;
+ }
+
+ mutex_lock(&ctx->ctrl_lock);
+ if (--ctx->common_res[tbl_type].shared_stc[stc_type]->refcount) {
+ mutex_unlock(&ctx->ctrl_lock);
+ return;
+ }
+
+ shared_stc = ctx->common_res[tbl_type].shared_stc[stc_type];
+
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &shared_stc->stc_chunk);
+ kfree(shared_stc);
+ ctx->common_res[tbl_type].shared_stc[stc_type] = NULL;
+ mutex_unlock(&ctx->ctrl_lock);
+}
+
+static void hws_action_print_combo(struct mlx5hws_context *ctx,
+ enum mlx5hws_action_type *user_actions)
+{
+ mlx5hws_err(ctx, "Invalid action_type sequence");
+ while (*user_actions != MLX5HWS_ACTION_TYP_LAST) {
+ mlx5hws_err(ctx, " %s", mlx5hws_action_type_to_str(*user_actions));
+ user_actions++;
+ }
+ mlx5hws_err(ctx, "\n");
+}
+
+bool mlx5hws_action_check_combo(struct mlx5hws_context *ctx,
+ enum mlx5hws_action_type *user_actions,
+ enum mlx5hws_table_type table_type)
+{
+ const u32 *order_arr = action_order_arr[table_type];
+ u8 order_idx = 0;
+ u8 user_idx = 0;
+ bool valid_combo;
+
+ if (table_type >= MLX5HWS_TABLE_TYPE_MAX) {
+ mlx5hws_err(ctx, "Invalid table_type %d", table_type);
+ return false;
+ }
+
+ while (order_arr[order_idx] != BIT(MLX5HWS_ACTION_TYP_LAST)) {
+ /* User action order validated move to next user action */
+ if (BIT(user_actions[user_idx]) & order_arr[order_idx])
+ user_idx++;
+
+ /* Iterate to the next supported action in the order */
+ order_idx++;
+ }
+
+ /* Combination is valid if all user action were processed */
+ valid_combo = user_actions[user_idx] == MLX5HWS_ACTION_TYP_LAST;
+ if (!valid_combo)
+ hws_action_print_combo(ctx, user_actions);
+
+ return valid_combo;
+}
+
+static bool
+hws_action_fixup_stc_attr(struct mlx5hws_context *ctx,
+ struct mlx5hws_cmd_stc_modify_attr *stc_attr,
+ struct mlx5hws_cmd_stc_modify_attr *fixup_stc_attr,
+ enum mlx5hws_table_type table_type,
+ bool is_mirror)
+{
+ bool use_fixup = false;
+ u32 fw_tbl_type;
+ u32 base_id;
+
+ fw_tbl_type = mlx5hws_table_get_res_fw_ft_type(table_type, is_mirror);
+
+ switch (stc_attr->action_type) {
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE:
+ if (is_mirror && stc_attr->ste_table.ignore_tx) {
+ fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_DROP;
+ fixup_stc_attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ fixup_stc_attr->stc_offset = stc_attr->stc_offset;
+ use_fixup = true;
+ break;
+ }
+ if (!is_mirror)
+ base_id = mlx5hws_pool_chunk_get_base_id(stc_attr->ste_table.ste_pool,
+ &stc_attr->ste_table.ste);
+ else
+ base_id =
+ mlx5hws_pool_chunk_get_base_mirror_id(stc_attr->ste_table.ste_pool,
+ &stc_attr->ste_table.ste);
+
+ *fixup_stc_attr = *stc_attr;
+ fixup_stc_attr->ste_table.ste_obj_id = base_id;
+ use_fixup = true;
+ break;
+
+ case MLX5_IFC_STC_ACTION_TYPE_TAG:
+ if (fw_tbl_type == FS_FT_FDB_TX) {
+ fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_NOP;
+ fixup_stc_attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ fixup_stc_attr->stc_offset = stc_attr->stc_offset;
+ use_fixup = true;
+ }
+ break;
+
+ case MLX5_IFC_STC_ACTION_TYPE_ALLOW:
+ if (fw_tbl_type == FS_FT_FDB_TX || fw_tbl_type == FS_FT_FDB_RX) {
+ fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT;
+ fixup_stc_attr->action_offset = stc_attr->action_offset;
+ fixup_stc_attr->stc_offset = stc_attr->stc_offset;
+ fixup_stc_attr->vport.esw_owner_vhca_id = ctx->caps->vhca_id;
+ fixup_stc_attr->vport.vport_num = ctx->caps->eswitch_manager_vport_number;
+ fixup_stc_attr->vport.eswitch_owner_vhca_id_valid =
+ ctx->caps->merged_eswitch;
+ use_fixup = true;
+ }
+ break;
+
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT:
+ if (stc_attr->vport.vport_num != MLX5_VPORT_UPLINK)
+ break;
+
+ if (fw_tbl_type == FS_FT_FDB_TX || fw_tbl_type == FS_FT_FDB_RX) {
+ /* The FW doesn't allow to go to wire in the TX/RX by JUMP_TO_VPORT */
+ fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK;
+ fixup_stc_attr->action_offset = stc_attr->action_offset;
+ fixup_stc_attr->stc_offset = stc_attr->stc_offset;
+ fixup_stc_attr->vport.vport_num = 0;
+ fixup_stc_attr->vport.esw_owner_vhca_id = stc_attr->vport.esw_owner_vhca_id;
+ fixup_stc_attr->vport.eswitch_owner_vhca_id_valid =
+ stc_attr->vport.eswitch_owner_vhca_id_valid;
+ }
+ use_fixup = true;
+ break;
+
+ default:
+ break;
+ }
+
+ return use_fixup;
+}
+
+int mlx5hws_action_alloc_single_stc(struct mlx5hws_context *ctx,
+ struct mlx5hws_cmd_stc_modify_attr *stc_attr,
+ u32 table_type,
+ struct mlx5hws_pool_chunk *stc)
+__must_hold(&ctx->ctrl_lock)
+{
+ struct mlx5hws_cmd_stc_modify_attr cleanup_stc_attr = {0};
+ struct mlx5hws_pool *stc_pool = ctx->stc_pool[table_type];
+ struct mlx5hws_cmd_stc_modify_attr fixup_stc_attr = {0};
+ bool use_fixup;
+ u32 obj_0_id;
+ int ret;
+
+ ret = mlx5hws_pool_chunk_alloc(stc_pool, stc);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate single action STC\n");
+ return ret;
+ }
+
+ stc_attr->stc_offset = stc->offset;
+
+ /* Dynamic reparse not supported, overwrite and use default */
+ if (!mlx5hws_context_cap_dynamic_reparse(ctx))
+ stc_attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+
+ obj_0_id = mlx5hws_pool_chunk_get_base_id(stc_pool, stc);
+
+ /* According to table/action limitation change the stc_attr */
+ use_fixup = hws_action_fixup_stc_attr(ctx, stc_attr, &fixup_stc_attr, table_type, false);
+ ret = mlx5hws_cmd_stc_modify(ctx->mdev, obj_0_id,
+ use_fixup ? &fixup_stc_attr : stc_attr);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to modify STC action_type %d tbl_type %d\n",
+ stc_attr->action_type, table_type);
+ goto free_chunk;
+ }
+
+ /* Modify the FDB peer */
+ if (table_type == MLX5HWS_TABLE_TYPE_FDB) {
+ u32 obj_1_id;
+
+ obj_1_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, stc);
+
+ use_fixup = hws_action_fixup_stc_attr(ctx, stc_attr,
+ &fixup_stc_attr,
+ table_type, true);
+ ret = mlx5hws_cmd_stc_modify(ctx->mdev, obj_1_id,
+ use_fixup ? &fixup_stc_attr : stc_attr);
+ if (ret) {
+ mlx5hws_err(ctx,
+ "Failed to modify peer STC action_type %d tbl_type %d\n",
+ stc_attr->action_type, table_type);
+ goto clean_obj_0;
+ }
+ }
+
+ return 0;
+
+clean_obj_0:
+ cleanup_stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_DROP;
+ cleanup_stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ cleanup_stc_attr.stc_offset = stc->offset;
+ mlx5hws_cmd_stc_modify(ctx->mdev, obj_0_id, &cleanup_stc_attr);
+free_chunk:
+ mlx5hws_pool_chunk_free(stc_pool, stc);
+ return ret;
+}
+
+void mlx5hws_action_free_single_stc(struct mlx5hws_context *ctx,
+ u32 table_type,
+ struct mlx5hws_pool_chunk *stc)
+__must_hold(&ctx->ctrl_lock)
+{
+ struct mlx5hws_pool *stc_pool = ctx->stc_pool[table_type];
+ struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+ u32 obj_id;
+
+ /* Modify the STC not to point to an object */
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_DROP;
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ stc_attr.stc_offset = stc->offset;
+ obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, stc);
+ mlx5hws_cmd_stc_modify(ctx->mdev, obj_id, &stc_attr);
+
+ if (table_type == MLX5HWS_TABLE_TYPE_FDB) {
+ obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, stc);
+ mlx5hws_cmd_stc_modify(ctx->mdev, obj_id, &stc_attr);
+ }
+
+ mlx5hws_pool_chunk_free(stc_pool, stc);
+}
+
+static u32 hws_action_get_mh_stc_type(struct mlx5hws_context *ctx,
+ __be64 pattern)
+{
+ u8 action_type = MLX5_GET(set_action_in, &pattern, action_type);
+
+ switch (action_type) {
+ case MLX5_MODIFICATION_TYPE_SET:
+ return MLX5_IFC_STC_ACTION_TYPE_SET;
+ case MLX5_MODIFICATION_TYPE_ADD:
+ return MLX5_IFC_STC_ACTION_TYPE_ADD;
+ case MLX5_MODIFICATION_TYPE_COPY:
+ return MLX5_IFC_STC_ACTION_TYPE_COPY;
+ case MLX5_MODIFICATION_TYPE_ADD_FIELD:
+ return MLX5_IFC_STC_ACTION_TYPE_ADD_FIELD;
+ default:
+ mlx5hws_err(ctx, "Unsupported action type: 0x%x\n", action_type);
+ return MLX5_IFC_STC_ACTION_TYPE_NOP;
+ }
+}
+
+static void hws_action_fill_stc_attr(struct mlx5hws_action *action,
+ u32 obj_id,
+ struct mlx5hws_cmd_stc_modify_attr *attr)
+{
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+
+ switch (action->type) {
+ case MLX5HWS_ACTION_TYP_TAG:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_TAG;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ break;
+ case MLX5HWS_ACTION_TYP_DROP:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_DROP;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ break;
+ case MLX5HWS_ACTION_TYP_MISS:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_ALLOW;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ break;
+ case MLX5HWS_ACTION_TYP_CTR:
+ attr->id = obj_id;
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_COUNTER;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW0;
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
+ case MLX5HWS_ACTION_TYP_MODIFY_HDR:
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW6;
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+ if (action->modify_header.require_reparse)
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+
+ if (action->modify_header.num_of_actions == 1) {
+ attr->modify_action.data = action->modify_header.single_action;
+ attr->action_type = hws_action_get_mh_stc_type(action->ctx,
+ attr->modify_action.data);
+
+ if (attr->action_type == MLX5_IFC_STC_ACTION_TYPE_ADD ||
+ attr->action_type == MLX5_IFC_STC_ACTION_TYPE_SET)
+ MLX5_SET(set_action_in, &attr->modify_action.data, data, 0);
+ } else {
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST;
+ attr->modify_header.arg_id = action->modify_header.arg_id;
+ attr->modify_header.pattern_id = action->modify_header.pat_id;
+ }
+ break;
+ case MLX5HWS_ACTION_TYP_TBL:
+ case MLX5HWS_ACTION_TYP_DEST_ARRAY:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ attr->dest_table_id = obj_id;
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+ attr->remove_header.decap = 1;
+ attr->remove_header.start_anchor = MLX5_HEADER_ANCHOR_PACKET_START;
+ attr->remove_header.end_anchor = MLX5_HEADER_ANCHOR_INNER_MAC;
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ case MLX5HWS_ACTION_TYP_INSERT_HEADER:
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+ if (!action->reformat.require_reparse)
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW6;
+ attr->insert_header.encap = action->reformat.encap;
+ attr->insert_header.insert_anchor = action->reformat.anchor;
+ attr->insert_header.arg_id = action->reformat.arg_id;
+ attr->insert_header.header_size = action->reformat.header_size;
+ attr->insert_header.insert_offset = action->reformat.offset;
+ break;
+ case MLX5HWS_ACTION_TYP_ASO_METER:
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW6;
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_ASO;
+ attr->aso.aso_type = ASO_OPC_MOD_POLICER;
+ attr->aso.devx_obj_id = obj_id;
+ attr->aso.return_reg_id = action->aso.return_reg_id;
+ break;
+ case MLX5HWS_ACTION_TYP_VPORT:
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT;
+ attr->vport.vport_num = action->vport.vport_num;
+ attr->vport.esw_owner_vhca_id = action->vport.esw_owner_vhca_id;
+ attr->vport.eswitch_owner_vhca_id_valid = action->vport.esw_owner_vhca_id_valid;
+ break;
+ case MLX5HWS_ACTION_TYP_POP_VLAN:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+ attr->remove_words.start_anchor = MLX5_HEADER_ANCHOR_FIRST_VLAN_START;
+ attr->remove_words.num_of_words = MLX5HWS_ACTION_HDR_LEN_L2_VLAN / 2;
+ break;
+ case MLX5HWS_ACTION_TYP_PUSH_VLAN:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW6;
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+ attr->insert_header.encap = 0;
+ attr->insert_header.is_inline = 1;
+ attr->insert_header.insert_anchor = MLX5_HEADER_ANCHOR_PACKET_START;
+ attr->insert_header.insert_offset = MLX5HWS_ACTION_HDR_LEN_L2_MACS;
+ attr->insert_header.header_size = MLX5HWS_ACTION_HDR_LEN_L2_VLAN;
+ break;
+ case MLX5HWS_ACTION_TYP_REMOVE_HEADER:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS;
+ attr->remove_header.decap = 0; /* the mode we support decap is 0 */
+ attr->remove_words.start_anchor = action->remove_header.anchor;
+ /* the size is in already in words */
+ attr->remove_words.num_of_words = action->remove_header.size;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+ break;
+ default:
+ mlx5hws_err(action->ctx, "Invalid action type %d\n", action->type);
+ }
+}
+
+static int
+hws_action_create_stcs(struct mlx5hws_action *action, u32 obj_id)
+{
+ struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+ struct mlx5hws_context *ctx = action->ctx;
+ int ret;
+
+ hws_action_fill_stc_attr(action, obj_id, &stc_attr);
+
+ /* Block unsupported parallel obj modify over the same base */
+ mutex_lock(&ctx->ctrl_lock);
+
+ /* Allocate STC for FDB */
+ if (action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB) {
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr,
+ MLX5HWS_TABLE_TYPE_FDB,
+ &action->stc[MLX5HWS_TABLE_TYPE_FDB]);
+ if (ret)
+ goto out_err;
+ }
+
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return 0;
+
+out_err:
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+}
+
+static void
+hws_action_destroy_stcs(struct mlx5hws_action *action)
+{
+ struct mlx5hws_context *ctx = action->ctx;
+
+ /* Block unsupported parallel obj modify over the same base */
+ mutex_lock(&ctx->ctrl_lock);
+
+ if (action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB)
+ mlx5hws_action_free_single_stc(ctx, MLX5HWS_TABLE_TYPE_FDB,
+ &action->stc[MLX5HWS_TABLE_TYPE_FDB]);
+
+ mutex_unlock(&ctx->ctrl_lock);
+}
+
+static bool hws_action_is_flag_hws_fdb(u32 flags)
+{
+ return flags & MLX5HWS_ACTION_FLAG_HWS_FDB;
+}
+
+static bool
+hws_action_validate_hws_action(struct mlx5hws_context *ctx, u32 flags)
+{
+ if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT)) {
+ mlx5hws_err(ctx, "Cannot create HWS action since HWS is not supported\n");
+ return false;
+ }
+
+ if ((flags & MLX5HWS_ACTION_FLAG_HWS_FDB) && !ctx->caps->eswitch_manager) {
+ mlx5hws_err(ctx, "Cannot create HWS action for FDB for non-eswitch-manager\n");
+ return false;
+ }
+
+ return true;
+}
+
+static struct mlx5hws_action *
+hws_action_create_generic_bulk(struct mlx5hws_context *ctx,
+ u32 flags,
+ enum mlx5hws_action_type action_type,
+ u8 bulk_sz)
+{
+ struct mlx5hws_action *action;
+ int i;
+
+ if (!hws_action_is_flag_hws_fdb(flags)) {
+ mlx5hws_err(ctx,
+ "Action (type: %d) flags must specify only HWS FDB\n", action_type);
+ return NULL;
+ }
+
+ if (!hws_action_validate_hws_action(ctx, flags))
+ return NULL;
+
+ action = kcalloc(bulk_sz, sizeof(*action), GFP_KERNEL);
+ if (!action)
+ return NULL;
+
+ for (i = 0; i < bulk_sz; i++) {
+ action[i].ctx = ctx;
+ action[i].flags = flags;
+ action[i].type = action_type;
+ }
+
+ return action;
+}
+
+static struct mlx5hws_action *
+hws_action_create_generic(struct mlx5hws_context *ctx,
+ u32 flags,
+ enum mlx5hws_action_type action_type)
+{
+ return hws_action_create_generic_bulk(ctx, flags, action_type, 1);
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_dest_table_num(struct mlx5hws_context *ctx,
+ u32 table_id,
+ u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_TBL);
+ if (!action)
+ return NULL;
+
+ ret = hws_action_create_stcs(action, table_id);
+ if (ret)
+ goto free_action;
+
+ action->dest_obj.obj_id = table_id;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_dest_table(struct mlx5hws_context *ctx,
+ struct mlx5hws_table *tbl,
+ u32 flags)
+{
+ return mlx5hws_action_create_dest_table_num(ctx, tbl->ft_id, flags);
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_dest_drop(struct mlx5hws_context *ctx, u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_DROP);
+ if (!action)
+ return NULL;
+
+ ret = hws_action_create_stcs(action, 0);
+ if (ret)
+ goto free_action;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_default_miss(struct mlx5hws_context *ctx, u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_MISS);
+ if (!action)
+ return NULL;
+
+ ret = hws_action_create_stcs(action, 0);
+ if (ret)
+ goto free_action;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_tag(struct mlx5hws_context *ctx, u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_TAG);
+ if (!action)
+ return NULL;
+
+ ret = hws_action_create_stcs(action, 0);
+ if (ret)
+ goto free_action;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+static struct mlx5hws_action *
+hws_action_create_aso(struct mlx5hws_context *ctx,
+ enum mlx5hws_action_type action_type,
+ u32 obj_id,
+ u8 return_reg_id,
+ u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, action_type);
+ if (!action)
+ return NULL;
+
+ action->aso.obj_id = obj_id;
+ action->aso.return_reg_id = return_reg_id;
+
+ ret = hws_action_create_stcs(action, obj_id);
+ if (ret)
+ goto free_action;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_aso_meter(struct mlx5hws_context *ctx,
+ u32 obj_id,
+ u8 return_reg_id,
+ u32 flags)
+{
+ return hws_action_create_aso(ctx, MLX5HWS_ACTION_TYP_ASO_METER,
+ obj_id, return_reg_id, flags);
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_counter(struct mlx5hws_context *ctx,
+ u32 obj_id,
+ u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_CTR);
+ if (!action)
+ return NULL;
+
+ ret = hws_action_create_stcs(action, obj_id);
+ if (ret)
+ goto free_action;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_dest_vport(struct mlx5hws_context *ctx,
+ u16 vport_num,
+ bool vhca_id_valid,
+ u16 vhca_id,
+ u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ if (!(flags & MLX5HWS_ACTION_FLAG_HWS_FDB)) {
+ mlx5hws_err(ctx, "Vport action is supported for FDB only\n");
+ return NULL;
+ }
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_VPORT);
+ if (!action)
+ return NULL;
+
+ if (!ctx->caps->merged_eswitch && vhca_id_valid && vhca_id != ctx->caps->vhca_id) {
+ mlx5hws_err(ctx, "Non merged eswitch cannot send to other vhca\n");
+ goto free_action;
+ }
+
+ action->vport.vport_num = vport_num;
+ action->vport.esw_owner_vhca_id_valid = vhca_id_valid;
+
+ if (vhca_id_valid)
+ action->vport.esw_owner_vhca_id = vhca_id;
+
+ ret = hws_action_create_stcs(action, 0);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed creating stc for vport %d\n", vport_num);
+ goto free_action;
+ }
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_push_vlan(struct mlx5hws_context *ctx, u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_PUSH_VLAN);
+ if (!action)
+ return NULL;
+
+ ret = hws_action_create_stcs(action, 0);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed creating stc for push vlan\n");
+ goto free_action;
+ }
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_pop_vlan(struct mlx5hws_context *ctx, u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_POP_VLAN);
+ if (!action)
+ return NULL;
+
+ ret = hws_action_get_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create remove stc for reformat\n");
+ goto free_action;
+ }
+
+ ret = hws_action_create_stcs(action, 0);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed creating stc for pop vlan\n");
+ goto free_shared;
+ }
+
+ return action;
+
+free_shared:
+ hws_action_put_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP);
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+static int
+hws_action_handle_insert_with_ptr(struct mlx5hws_action *action,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_reformat_header *hdrs,
+ u32 log_bulk_sz)
+{
+ size_t max_sz = 0;
+ u32 arg_id;
+ int ret, i;
+
+ for (i = 0; i < num_of_hdrs; i++) {
+ if (hdrs[i].sz % W_SIZE != 0) {
+ mlx5hws_err(action->ctx,
+ "Header data size should be in WORD granularity\n");
+ return -EINVAL;
+ }
+ max_sz = max(hdrs[i].sz, max_sz);
+ }
+
+ /* Allocate single shared arg object for all headers */
+ ret = mlx5hws_arg_create(action->ctx,
+ hdrs->data,
+ max_sz,
+ log_bulk_sz,
+ action->flags & MLX5HWS_ACTION_FLAG_SHARED,
+ &arg_id);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_of_hdrs; i++) {
+ action[i].reformat.arg_id = arg_id;
+ action[i].reformat.header_size = hdrs[i].sz;
+ action[i].reformat.num_of_hdrs = num_of_hdrs;
+ action[i].reformat.max_hdr_sz = max_sz;
+ action[i].reformat.require_reparse = true;
+
+ if (action[i].type == MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2 ||
+ action[i].type == MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3) {
+ action[i].reformat.anchor = MLX5_HEADER_ANCHOR_PACKET_START;
+ action[i].reformat.offset = 0;
+ action[i].reformat.encap = 1;
+ }
+
+ ret = hws_action_create_stcs(&action[i], 0);
+ if (ret) {
+ mlx5hws_err(action->ctx, "Failed to create stc for reformat\n");
+ goto free_stc;
+ }
+ }
+
+ return 0;
+
+free_stc:
+ while (i--)
+ hws_action_destroy_stcs(&action[i]);
+
+ mlx5hws_arg_destroy(action->ctx, arg_id);
+ return ret;
+}
+
+static int
+hws_action_handle_l2_to_tunnel_l3(struct mlx5hws_action *action,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_reformat_header *hdrs,
+ u32 log_bulk_sz)
+{
+ int ret;
+
+ /* The action is remove-l2-header + insert-l3-header */
+ ret = hws_action_get_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3);
+ if (ret) {
+ mlx5hws_err(action->ctx, "Failed to create remove stc for reformat\n");
+ return ret;
+ }
+
+ /* Reuse the insert with pointer for the L2L3 header */
+ ret = hws_action_handle_insert_with_ptr(action,
+ num_of_hdrs,
+ hdrs,
+ log_bulk_sz);
+ if (ret)
+ goto put_shared_stc;
+
+ return 0;
+
+put_shared_stc:
+ hws_action_put_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3);
+ return ret;
+}
+
+static void hws_action_prepare_decap_l3_actions(size_t data_sz,
+ u8 *mh_data,
+ int *num_of_actions)
+{
+ int actions;
+ u32 i;
+
+ /* Remove L2L3 outer headers */
+ MLX5_SET(stc_ste_param_remove, mh_data, action_type,
+ MLX5_MODIFICATION_TYPE_REMOVE);
+ MLX5_SET(stc_ste_param_remove, mh_data, decap, 0x1);
+ MLX5_SET(stc_ste_param_remove, mh_data, remove_start_anchor,
+ MLX5_HEADER_ANCHOR_PACKET_START);
+ MLX5_SET(stc_ste_param_remove, mh_data, remove_end_anchor,
+ MLX5_HEADER_ANCHOR_INNER_IPV6_IPV4);
+ mh_data += MLX5HWS_ACTION_DOUBLE_SIZE; /* Assume every action is 2 dw */
+ actions = 1;
+
+ /* Add the new header using inline action 4Byte at a time, the header
+ * is added in reversed order to the beginning of the packet to avoid
+ * incorrect parsing by the HW. Since header is 14B or 18B an extra
+ * two bytes are padded and later removed.
+ */
+ for (i = 0; i < data_sz / MLX5HWS_ACTION_INLINE_DATA_SIZE + 1; i++) {
+ MLX5_SET(stc_ste_param_insert, mh_data, action_type,
+ MLX5_MODIFICATION_TYPE_INSERT);
+ MLX5_SET(stc_ste_param_insert, mh_data, inline_data, 0x1);
+ MLX5_SET(stc_ste_param_insert, mh_data, insert_anchor,
+ MLX5_HEADER_ANCHOR_PACKET_START);
+ MLX5_SET(stc_ste_param_insert, mh_data, insert_size, 2);
+ mh_data += MLX5HWS_ACTION_DOUBLE_SIZE;
+ actions++;
+ }
+
+ /* Remove first 2 extra bytes */
+ MLX5_SET(stc_ste_param_remove_words, mh_data, action_type,
+ MLX5_MODIFICATION_TYPE_REMOVE_WORDS);
+ MLX5_SET(stc_ste_param_remove_words, mh_data, remove_start_anchor,
+ MLX5_HEADER_ANCHOR_PACKET_START);
+ /* The hardware expects here size in words (2 bytes) */
+ MLX5_SET(stc_ste_param_remove_words, mh_data, remove_size, 1);
+ actions++;
+
+ *num_of_actions = actions;
+}
+
+static int
+hws_action_handle_tunnel_l3_to_l2(struct mlx5hws_action *action,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_reformat_header *hdrs,
+ u32 log_bulk_sz)
+{
+ u8 mh_data[MLX5HWS_ACTION_REFORMAT_DATA_SIZE] = {0};
+ struct mlx5hws_context *ctx = action->ctx;
+ u32 arg_id, pat_id;
+ int num_of_actions;
+ int mh_data_size;
+ int ret, i;
+
+ for (i = 0; i < num_of_hdrs; i++) {
+ if (hdrs[i].sz != MLX5HWS_ACTION_HDR_LEN_L2 &&
+ hdrs[i].sz != MLX5HWS_ACTION_HDR_LEN_L2_W_VLAN) {
+ mlx5hws_err(ctx, "Data size is not supported for decap-l3\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Create a full modify header action list in case shared */
+ hws_action_prepare_decap_l3_actions(hdrs->sz, mh_data, &num_of_actions);
+ if (action->flags & MLX5HWS_ACTION_FLAG_SHARED)
+ mlx5hws_action_prepare_decap_l3_data(hdrs->data, mh_data, num_of_actions);
+
+ /* All DecapL3 cases require the same max arg size */
+ ret = mlx5hws_arg_create_modify_header_arg(ctx,
+ (__be64 *)mh_data,
+ num_of_actions,
+ log_bulk_sz,
+ action->flags & MLX5HWS_ACTION_FLAG_SHARED,
+ &arg_id);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_of_hdrs; i++) {
+ memset(mh_data, 0, MLX5HWS_ACTION_REFORMAT_DATA_SIZE);
+ hws_action_prepare_decap_l3_actions(hdrs[i].sz, mh_data, &num_of_actions);
+ mh_data_size = num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE;
+
+ ret = mlx5hws_pat_get_pattern(ctx, (__be64 *)mh_data, mh_data_size, &pat_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate pattern for DecapL3\n");
+ goto free_stc_and_pat;
+ }
+
+ action[i].modify_header.max_num_of_actions = num_of_actions;
+ action[i].modify_header.num_of_actions = num_of_actions;
+ action[i].modify_header.num_of_patterns = num_of_hdrs;
+ action[i].modify_header.arg_id = arg_id;
+ action[i].modify_header.pat_id = pat_id;
+ action[i].modify_header.require_reparse =
+ mlx5hws_pat_require_reparse((__be64 *)mh_data, num_of_actions);
+
+ ret = hws_action_create_stcs(&action[i], 0);
+ if (ret) {
+ mlx5hws_pat_put_pattern(ctx, pat_id);
+ goto free_stc_and_pat;
+ }
+ }
+
+ return 0;
+
+free_stc_and_pat:
+ while (i--) {
+ hws_action_destroy_stcs(&action[i]);
+ mlx5hws_pat_put_pattern(ctx, action[i].modify_header.pat_id);
+ }
+
+ mlx5hws_arg_destroy(action->ctx, arg_id);
+ return ret;
+}
+
+static int
+hws_action_create_reformat_hws(struct mlx5hws_action *action,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_reformat_header *hdrs,
+ u32 bulk_size)
+{
+ int ret;
+
+ switch (action->type) {
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
+ ret = hws_action_create_stcs(action, 0);
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ ret = hws_action_handle_insert_with_ptr(action, num_of_hdrs, hdrs, bulk_size);
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ ret = hws_action_handle_l2_to_tunnel_l3(action, num_of_hdrs, hdrs, bulk_size);
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
+ ret = hws_action_handle_tunnel_l3_to_l2(action, num_of_hdrs, hdrs, bulk_size);
+ break;
+ default:
+ mlx5hws_err(action->ctx, "Invalid HWS reformat action type\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_reformat(struct mlx5hws_context *ctx,
+ enum mlx5hws_action_type reformat_type,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_reformat_header *hdrs,
+ u32 log_bulk_size,
+ u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ if (!num_of_hdrs) {
+ mlx5hws_err(ctx, "Reformat num_of_hdrs cannot be zero\n");
+ return NULL;
+ }
+
+ action = hws_action_create_generic_bulk(ctx, flags, reformat_type, num_of_hdrs);
+ if (!action)
+ return NULL;
+
+ if ((flags & MLX5HWS_ACTION_FLAG_SHARED) && (log_bulk_size || num_of_hdrs > 1)) {
+ mlx5hws_err(ctx, "Reformat flags don't fit HWS (flags: 0x%x)\n", flags);
+ goto free_action;
+ }
+
+ ret = hws_action_create_reformat_hws(action, num_of_hdrs, hdrs, log_bulk_size);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create HWS reformat action\n");
+ goto free_action;
+ }
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+static int
+hws_action_create_modify_header_hws(struct mlx5hws_action *action,
+ u8 num_of_patterns,
+ struct mlx5hws_action_mh_pattern *pattern,
+ u32 log_bulk_size)
+{
+ struct mlx5hws_context *ctx = action->ctx;
+ u16 num_actions, max_mh_actions = 0;
+ int i, ret, size_in_bytes;
+ u32 pat_id, arg_id = 0;
+ __be64 *new_pattern;
+ size_t pat_max_sz;
+
+ pat_max_sz = MLX5HWS_ARG_CHUNK_SIZE_MAX * MLX5HWS_ARG_DATA_SIZE;
+ size_in_bytes = pat_max_sz * sizeof(__be64);
+ new_pattern = kcalloc(num_of_patterns, size_in_bytes, GFP_KERNEL);
+ if (!new_pattern)
+ return -ENOMEM;
+
+ /* Calculate maximum number of mh actions for shared arg allocation */
+ for (i = 0; i < num_of_patterns; i++) {
+ size_t new_num_actions;
+ size_t cur_num_actions;
+ u32 nope_location;
+
+ cur_num_actions = pattern[i].sz / MLX5HWS_MODIFY_ACTION_SIZE;
+
+ mlx5hws_pat_calc_nope(pattern[i].data, cur_num_actions,
+ pat_max_sz / MLX5HWS_MODIFY_ACTION_SIZE,
+ &new_num_actions, &nope_location,
+ &new_pattern[i * pat_max_sz]);
+
+ action[i].modify_header.nope_locations = nope_location;
+ action[i].modify_header.num_of_actions = new_num_actions;
+
+ max_mh_actions = max(max_mh_actions, new_num_actions);
+ }
+
+ if (mlx5hws_arg_get_arg_log_size(max_mh_actions) >= MLX5HWS_ARG_CHUNK_SIZE_MAX) {
+ mlx5hws_err(ctx, "Num of actions (%d) bigger than allowed\n",
+ max_mh_actions);
+ ret = -EINVAL;
+ goto free_new_pat;
+ }
+
+ /* Allocate single shared arg for all patterns based on the max size */
+ if (max_mh_actions > 1) {
+ ret = mlx5hws_arg_create_modify_header_arg(ctx,
+ pattern->data,
+ max_mh_actions,
+ log_bulk_size,
+ action->flags &
+ MLX5HWS_ACTION_FLAG_SHARED,
+ &arg_id);
+ if (ret)
+ goto free_new_pat;
+ }
+
+ for (i = 0; i < num_of_patterns; i++) {
+ if (!mlx5hws_pat_verify_actions(ctx, pattern[i].data, pattern[i].sz)) {
+ mlx5hws_err(ctx, "Fail to verify pattern modify actions\n");
+ ret = -EINVAL;
+ goto free_stc_and_pat;
+ }
+ num_actions = pattern[i].sz / MLX5HWS_MODIFY_ACTION_SIZE;
+ action[i].modify_header.num_of_patterns = num_of_patterns;
+ action[i].modify_header.max_num_of_actions = max_mh_actions;
+
+ action[i].modify_header.require_reparse =
+ mlx5hws_pat_require_reparse(pattern[i].data, num_actions);
+
+ if (num_actions == 1) {
+ pat_id = 0;
+ /* Optimize single modify action to be used inline */
+ action[i].modify_header.single_action = pattern[i].data[0];
+ action[i].modify_header.single_action_type =
+ MLX5_GET(set_action_in, pattern[i].data, action_type);
+ } else {
+ /* Multiple modify actions require a pattern */
+ if (unlikely(action[i].modify_header.nope_locations)) {
+ size_t pattern_sz;
+
+ pattern_sz = action[i].modify_header.num_of_actions *
+ MLX5HWS_MODIFY_ACTION_SIZE;
+ ret =
+ mlx5hws_pat_get_pattern(ctx,
+ &new_pattern[i * pat_max_sz],
+ pattern_sz, &pat_id);
+ } else {
+ ret = mlx5hws_pat_get_pattern(ctx,
+ pattern[i].data,
+ pattern[i].sz,
+ &pat_id);
+ }
+ if (ret) {
+ mlx5hws_err(ctx,
+ "Failed to allocate pattern for modify header\n");
+ goto free_stc_and_pat;
+ }
+
+ action[i].modify_header.arg_id = arg_id;
+ action[i].modify_header.pat_id = pat_id;
+ }
+ /* Allocate STC for each action representing a header */
+ ret = hws_action_create_stcs(&action[i], 0);
+ if (ret) {
+ if (pat_id)
+ mlx5hws_pat_put_pattern(ctx, pat_id);
+ goto free_stc_and_pat;
+ }
+ }
+
+ kfree(new_pattern);
+ return 0;
+
+free_stc_and_pat:
+ while (i--) {
+ hws_action_destroy_stcs(&action[i]);
+ if (action[i].modify_header.pat_id)
+ mlx5hws_pat_put_pattern(ctx, action[i].modify_header.pat_id);
+ }
+
+ if (arg_id)
+ mlx5hws_arg_destroy(ctx, arg_id);
+free_new_pat:
+ kfree(new_pattern);
+ return ret;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_modify_header(struct mlx5hws_context *ctx,
+ u8 num_of_patterns,
+ struct mlx5hws_action_mh_pattern *patterns,
+ u32 log_bulk_size,
+ u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ if (!num_of_patterns) {
+ mlx5hws_err(ctx, "Invalid number of patterns\n");
+ return NULL;
+ }
+ action = hws_action_create_generic_bulk(ctx, flags,
+ MLX5HWS_ACTION_TYP_MODIFY_HDR,
+ num_of_patterns);
+ if (!action)
+ return NULL;
+
+ if ((flags & MLX5HWS_ACTION_FLAG_SHARED) && (log_bulk_size || num_of_patterns > 1)) {
+ mlx5hws_err(ctx, "Action cannot be shared with requested pattern or size\n");
+ goto free_action;
+ }
+
+ ret = hws_action_create_modify_header_hws(action,
+ num_of_patterns,
+ patterns,
+ log_bulk_size);
+ if (ret)
+ goto free_action;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx,
+ size_t num_dest,
+ struct mlx5hws_action_dest_attr *dests,
+ bool ignore_flow_level,
+ u32 flow_source,
+ u32 flags)
+{
+ struct mlx5hws_cmd_set_fte_dest *dest_list = NULL;
+ struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
+ struct mlx5hws_cmd_set_fte_attr fte_attr = {0};
+ struct mlx5hws_cmd_forward_tbl *fw_island;
+ struct mlx5hws_action *action;
+ u32 i /*, packet_reformat_id*/;
+ int ret;
+
+ if (num_dest <= 1) {
+ mlx5hws_err(ctx, "Action must have multiple dests\n");
+ return NULL;
+ }
+
+ if (flags == (MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED)) {
+ ft_attr.type = FS_FT_FDB;
+ ft_attr.level = ctx->caps->fdb_ft.max_level - 1;
+ } else {
+ mlx5hws_err(ctx, "Action flags not supported\n");
+ return NULL;
+ }
+
+ dest_list = kcalloc(num_dest, sizeof(*dest_list), GFP_KERNEL);
+ if (!dest_list)
+ return NULL;
+
+ for (i = 0; i < num_dest; i++) {
+ enum mlx5hws_action_type action_type = dests[i].dest->type;
+ struct mlx5hws_action *reformat_action = dests[i].reformat;
+
+ switch (action_type) {
+ case MLX5HWS_ACTION_TYP_TBL:
+ dest_list[i].destination_type =
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest_list[i].destination_id = dests[i].dest->dest_obj.obj_id;
+ fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ fte_attr.ignore_flow_level = ignore_flow_level;
+ /* ToDo: In SW steering we have a handling of 'go to WIRE'
+ * destination here by upper layer setting 'is_wire_ft' flag
+ * if the destination is wire.
+ * This is because uplink should be last dest in the list.
+ */
+ break;
+ case MLX5HWS_ACTION_TYP_VPORT:
+ dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest_list[i].destination_id = dests[i].dest->vport.vport_num;
+ fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ if (ctx->caps->merged_eswitch) {
+ dest_list[i].ext_flags |=
+ MLX5HWS_CMD_EXT_DEST_ESW_OWNER_VHCA_ID;
+ dest_list[i].esw_owner_vhca_id =
+ dests[i].dest->vport.esw_owner_vhca_id;
+ }
+ break;
+ default:
+ mlx5hws_err(ctx, "Unsupported action in dest_array\n");
+ goto free_dest_list;
+ }
+
+ if (reformat_action) {
+ mlx5hws_err(ctx, "dest_array with reformat action - unsupported\n");
+ goto free_dest_list;
+ }
+ }
+
+ fte_attr.dests_num = num_dest;
+ fte_attr.dests = dest_list;
+
+ fw_island = mlx5hws_cmd_forward_tbl_create(ctx->mdev, &ft_attr, &fte_attr);
+ if (!fw_island)
+ goto free_dest_list;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_DEST_ARRAY);
+ if (!action)
+ goto destroy_fw_island;
+
+ ret = hws_action_create_stcs(action, fw_island->ft_id);
+ if (ret)
+ goto free_action;
+
+ action->dest_array.fw_island = fw_island;
+ action->dest_array.num_dest = num_dest;
+ action->dest_array.dest_list = dest_list;
+
+ return action;
+
+free_action:
+ kfree(action);
+destroy_fw_island:
+ mlx5hws_cmd_forward_tbl_destroy(ctx->mdev, fw_island);
+free_dest_list:
+ for (i = 0; i < num_dest; i++) {
+ if (dest_list[i].ext_reformat_id)
+ mlx5hws_cmd_packet_reformat_destroy(ctx->mdev,
+ dest_list[i].ext_reformat_id);
+ }
+ kfree(dest_list);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_insert_header(struct mlx5hws_context *ctx,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_insert_header *hdrs,
+ u32 log_bulk_size,
+ u32 flags)
+{
+ struct mlx5hws_action_reformat_header *reformat_hdrs;
+ struct mlx5hws_action *action;
+ int ret;
+ int i;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_INSERT_HEADER);
+ if (!action)
+ return NULL;
+
+ reformat_hdrs = kcalloc(num_of_hdrs, sizeof(*reformat_hdrs), GFP_KERNEL);
+ if (!reformat_hdrs)
+ goto free_action;
+
+ for (i = 0; i < num_of_hdrs; i++) {
+ if (hdrs[i].offset % W_SIZE != 0) {
+ mlx5hws_err(ctx, "Header offset should be in WORD granularity\n");
+ goto free_reformat_hdrs;
+ }
+
+ action[i].reformat.anchor = hdrs[i].anchor;
+ action[i].reformat.encap = hdrs[i].encap;
+ action[i].reformat.offset = hdrs[i].offset;
+
+ reformat_hdrs[i].sz = hdrs[i].hdr.sz;
+ reformat_hdrs[i].data = hdrs[i].hdr.data;
+ }
+
+ ret = hws_action_handle_insert_with_ptr(action, num_of_hdrs,
+ reformat_hdrs, log_bulk_size);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create HWS reformat action\n");
+ goto free_reformat_hdrs;
+ }
+
+ kfree(reformat_hdrs);
+
+ return action;
+
+free_reformat_hdrs:
+ kfree(reformat_hdrs);
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_remove_header(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_remove_header_attr *attr,
+ u32 flags)
+{
+ struct mlx5hws_action *action;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_REMOVE_HEADER);
+ if (!action)
+ return NULL;
+
+ /* support only remove anchor with size */
+ if (attr->size % W_SIZE != 0) {
+ mlx5hws_err(ctx,
+ "Invalid size, HW supports header remove in WORD granularity\n");
+ goto free_action;
+ }
+
+ if (attr->size > MLX5HWS_ACTION_REMOVE_HEADER_MAX_SIZE) {
+ mlx5hws_err(ctx, "Header removal size limited to %u bytes\n",
+ MLX5HWS_ACTION_REMOVE_HEADER_MAX_SIZE);
+ goto free_action;
+ }
+
+ action->remove_header.anchor = attr->anchor;
+ action->remove_header.size = attr->size / W_SIZE;
+
+ if (hws_action_create_stcs(action, 0))
+ goto free_action;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+static struct mlx5hws_definer *
+hws_action_create_dest_match_range_definer(struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_definer *definer;
+ __be32 *tag;
+ int ret;
+
+ definer = kzalloc(sizeof(*definer), GFP_KERNEL);
+ if (!definer)
+ return NULL;
+
+ definer->dw_selector[0] = MLX5_IFC_DEFINER_FORMAT_OFFSET_OUTER_ETH_PKT_LEN / 4;
+ /* Set DW0 tag mask */
+ tag = (__force __be32 *)definer->mask.jumbo;
+ tag[MLX5HWS_RULE_JUMBO_MATCH_TAG_OFFSET_DW0] = htonl(0xffffUL << 16);
+
+ mutex_lock(&ctx->ctrl_lock);
+
+ ret = mlx5hws_definer_get_obj(ctx, definer);
+ if (ret < 0) {
+ mutex_unlock(&ctx->ctrl_lock);
+ kfree(definer);
+ return NULL;
+ }
+
+ mutex_unlock(&ctx->ctrl_lock);
+ definer->obj_id = ret;
+
+ return definer;
+}
+
+static struct mlx5hws_matcher_action_ste *
+hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx,
+ struct mlx5hws_definer *definer,
+ u32 miss_ft_id)
+{
+ struct mlx5hws_cmd_rtc_create_attr rtc_attr = {0};
+ struct mlx5hws_action_default_stc *default_stc;
+ struct mlx5hws_matcher_action_ste *table_ste;
+ struct mlx5hws_pool_attr pool_attr = {0};
+ struct mlx5hws_pool *ste_pool, *stc_pool;
+ struct mlx5hws_pool_chunk *ste;
+ u32 *rtc_0_id, *rtc_1_id;
+ u32 obj_id;
+ int ret;
+
+ /* Check if STE range is supported */
+ if (!IS_BIT_SET(ctx->caps->supp_ste_format_gen_wqe, MLX5_IFC_RTC_STE_FORMAT_RANGE)) {
+ mlx5hws_err(ctx, "Range STE format not supported\n");
+ return NULL;
+ }
+
+ table_ste = kzalloc(sizeof(*table_ste), GFP_KERNEL);
+ if (!table_ste)
+ return NULL;
+
+ mutex_lock(&ctx->ctrl_lock);
+
+ pool_attr.table_type = MLX5HWS_TABLE_TYPE_FDB;
+ pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
+ pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL;
+ pool_attr.alloc_log_sz = 1;
+ table_ste->pool = mlx5hws_pool_create(ctx, &pool_attr);
+ if (!table_ste->pool) {
+ mlx5hws_err(ctx, "Failed to allocate memory ste pool\n");
+ goto free_ste;
+ }
+
+ /* Allocate RTC */
+ rtc_0_id = &table_ste->rtc_0_id;
+ rtc_1_id = &table_ste->rtc_1_id;
+ ste_pool = table_ste->pool;
+ ste = &table_ste->ste;
+ ste->order = 1;
+
+ rtc_attr.log_size = 0;
+ rtc_attr.log_depth = 0;
+ rtc_attr.miss_ft_id = miss_ft_id;
+ rtc_attr.num_hash_definer = 1;
+ rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH;
+ rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH;
+ rtc_attr.match_definer_0 = ctx->caps->trivial_match_definer;
+ rtc_attr.fw_gen_wqe = true;
+ rtc_attr.is_scnd_range = true;
+
+ obj_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
+
+ rtc_attr.pd = ctx->pd_num;
+ rtc_attr.ste_base = obj_id;
+ rtc_attr.ste_offset = ste->offset;
+ rtc_attr.reparse_mode = mlx5hws_context_get_reparse_mode(ctx);
+ rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(MLX5HWS_TABLE_TYPE_FDB, false);
+
+ /* STC is a single resource (obj_id), use any STC for the ID */
+ stc_pool = ctx->stc_pool[MLX5HWS_TABLE_TYPE_FDB];
+ default_stc = ctx->common_res[MLX5HWS_TABLE_TYPE_FDB].default_stc;
+ obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, &default_stc->default_hit);
+ rtc_attr.stc_base = obj_id;
+
+ ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_0_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create RTC");
+ goto pool_destroy;
+ }
+
+ /* Create mirror RTC */
+ obj_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
+ rtc_attr.ste_base = obj_id;
+ rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(MLX5HWS_TABLE_TYPE_FDB, true);
+
+ obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, &default_stc->default_hit);
+ rtc_attr.stc_base = obj_id;
+
+ ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_1_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create mirror RTC");
+ goto destroy_rtc_0;
+ }
+
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return table_ste;
+
+destroy_rtc_0:
+ mlx5hws_cmd_rtc_destroy(ctx->mdev, *rtc_0_id);
+pool_destroy:
+ mlx5hws_pool_destroy(table_ste->pool);
+free_ste:
+ mutex_unlock(&ctx->ctrl_lock);
+ kfree(table_ste);
+ return NULL;
+}
+
+static void
+hws_action_destroy_dest_match_range_table(struct mlx5hws_context *ctx,
+ struct mlx5hws_matcher_action_ste *table_ste)
+{
+ mutex_lock(&ctx->ctrl_lock);
+
+ mlx5hws_cmd_rtc_destroy(ctx->mdev, table_ste->rtc_1_id);
+ mlx5hws_cmd_rtc_destroy(ctx->mdev, table_ste->rtc_0_id);
+ mlx5hws_pool_destroy(table_ste->pool);
+ kfree(table_ste);
+
+ mutex_unlock(&ctx->ctrl_lock);
+}
+
+static int
+hws_action_create_dest_match_range_fill_table(struct mlx5hws_context *ctx,
+ struct mlx5hws_matcher_action_ste *table_ste,
+ struct mlx5hws_action *hit_ft_action,
+ struct mlx5hws_definer *range_definer,
+ u32 min, u32 max)
+{
+ struct mlx5hws_wqe_gta_data_seg_ste match_wqe_data = {0};
+ struct mlx5hws_wqe_gta_data_seg_ste range_wqe_data = {0};
+ struct mlx5hws_wqe_gta_ctrl_seg wqe_ctrl = {0};
+ u32 no_use, used_rtc_0_id, used_rtc_1_id, ret;
+ struct mlx5hws_context_common_res *common_res;
+ struct mlx5hws_send_ste_attr ste_attr = {0};
+ struct mlx5hws_send_engine *queue;
+ __be32 *wqe_data_arr;
+
+ mutex_lock(&ctx->ctrl_lock);
+
+ /* Get the control queue */
+ queue = &ctx->send_queue[ctx->queues - 1];
+ if (unlikely(mlx5hws_send_engine_err(queue))) {
+ ret = -EIO;
+ goto error;
+ }
+
+ /* Init default send STE attributes */
+ ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
+ ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+ ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+ ste_attr.send_attr.user_data = &no_use;
+ ste_attr.send_attr.rule = NULL;
+ ste_attr.send_attr.fence = 1;
+ ste_attr.send_attr.notify_hw = true;
+ ste_attr.rtc_0 = table_ste->rtc_0_id;
+ ste_attr.rtc_1 = table_ste->rtc_1_id;
+ ste_attr.used_id_rtc_0 = &used_rtc_0_id;
+ ste_attr.used_id_rtc_1 = &used_rtc_1_id;
+
+ common_res = &ctx->common_res[MLX5HWS_TABLE_TYPE_FDB];
+
+ /* init an empty match STE which will always hit */
+ ste_attr.wqe_ctrl = &wqe_ctrl;
+ ste_attr.wqe_data = &match_wqe_data;
+ ste_attr.send_attr.match_definer_id = ctx->caps->trivial_match_definer;
+
+ /* Fill WQE control data */
+ wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] =
+ htonl(common_res->default_stc->nop_ctr.offset);
+ wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] =
+ htonl(common_res->default_stc->nop_dw5.offset);
+ wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] =
+ htonl(common_res->default_stc->nop_dw6.offset);
+ wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] =
+ htonl(common_res->default_stc->nop_dw7.offset);
+ wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] |=
+ htonl(MLX5HWS_ACTION_STC_IDX_LAST_COMBO2 << 29);
+ wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_HIT] =
+ htonl(hit_ft_action->stc[MLX5HWS_TABLE_TYPE_FDB].offset);
+
+ wqe_data_arr = (__force __be32 *)&range_wqe_data;
+
+ ste_attr.range_wqe_data = &range_wqe_data;
+ ste_attr.send_attr.len += MLX5HWS_WQE_SZ_GTA_DATA;
+ ste_attr.send_attr.range_definer_id = mlx5hws_definer_get_id(range_definer);
+
+ /* Fill range matching fields,
+ * min/max_value_2 corresponds to match_dw_0 in its definer,
+ * min_value_2 sets in DW0 in the STE and max_value_2 sets in DW1 in the STE.
+ */
+ wqe_data_arr[MLX5HWS_MATCHER_OFFSET_TAG_DW0] = htonl(min << 16);
+ wqe_data_arr[MLX5HWS_MATCHER_OFFSET_TAG_DW1] = htonl(max << 16);
+
+ /* Send WQEs to FW */
+ mlx5hws_send_stes_fw(ctx, queue, &ste_attr);
+
+ /* Poll for completion */
+ ret = mlx5hws_send_queue_action(ctx, ctx->queues - 1,
+ MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to drain control queue");
+ goto error;
+ }
+
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return 0;
+
+error:
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_dest_match_range(struct mlx5hws_context *ctx,
+ u32 field,
+ struct mlx5_flow_table *hit_ft,
+ struct mlx5_flow_table *miss_ft,
+ u32 min, u32 max, u32 flags)
+{
+ struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+ struct mlx5hws_matcher_action_ste *table_ste;
+ struct mlx5hws_action *hit_ft_action;
+ struct mlx5hws_definer *definer;
+ struct mlx5hws_action *action;
+ u32 miss_ft_id = miss_ft->id;
+ u32 hit_ft_id = hit_ft->id;
+ int ret;
+
+ if (field != MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN ||
+ min > 0xffff || max > 0xffff) {
+ mlx5hws_err(ctx, "Invalid match range parameters\n");
+ return NULL;
+ }
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_RANGE);
+ if (!action)
+ return NULL;
+
+ definer = hws_action_create_dest_match_range_definer(ctx);
+ if (!definer)
+ goto free_action;
+
+ table_ste = hws_action_create_dest_match_range_table(ctx, definer, miss_ft_id);
+ if (!table_ste)
+ goto destroy_definer;
+
+ hit_ft_action = mlx5hws_action_create_dest_table_num(ctx, hit_ft_id, flags);
+ if (!hit_ft_action)
+ goto destroy_table_ste;
+
+ ret = hws_action_create_dest_match_range_fill_table(ctx, table_ste,
+ hit_ft_action,
+ definer, min, max);
+ if (ret)
+ goto destroy_hit_ft_action;
+
+ action->range.table_ste = table_ste;
+ action->range.definer = definer;
+ action->range.hit_ft_action = hit_ft_action;
+
+ /* Allocate STC for jumps to STE */
+ mutex_lock(&ctx->ctrl_lock);
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE;
+ stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+ stc_attr.ste_table.ste = table_ste->ste;
+ stc_attr.ste_table.ste_pool = table_ste->pool;
+ stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer;
+
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, MLX5HWS_TABLE_TYPE_FDB,
+ &action->stc[MLX5HWS_TABLE_TYPE_FDB]);
+ if (ret)
+ goto error_unlock;
+
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return action;
+
+error_unlock:
+ mutex_unlock(&ctx->ctrl_lock);
+destroy_hit_ft_action:
+ mlx5hws_action_destroy(hit_ft_action);
+destroy_table_ste:
+ hws_action_destroy_dest_match_range_table(ctx, table_ste);
+destroy_definer:
+ mlx5hws_definer_free(ctx, definer);
+free_action:
+ kfree(action);
+ mlx5hws_err(ctx, "Failed to create action dest match range");
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_last(struct mlx5hws_context *ctx, u32 flags)
+{
+ return hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_LAST);
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_flow_sampler(struct mlx5hws_context *ctx,
+ u32 sampler_id, u32 flags)
+{
+ mlx5hws_err(ctx, "Flow sampler action - unsupported\n");
+ return NULL;
+}
+
+static void hws_action_destroy_hws(struct mlx5hws_action *action)
+{
+ u32 ext_reformat_id;
+ bool shared_arg;
+ u32 obj_id;
+ u32 i;
+
+ switch (action->type) {
+ case MLX5HWS_ACTION_TYP_MISS:
+ case MLX5HWS_ACTION_TYP_TAG:
+ case MLX5HWS_ACTION_TYP_DROP:
+ case MLX5HWS_ACTION_TYP_CTR:
+ case MLX5HWS_ACTION_TYP_TBL:
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
+ case MLX5HWS_ACTION_TYP_ASO_METER:
+ case MLX5HWS_ACTION_TYP_PUSH_VLAN:
+ case MLX5HWS_ACTION_TYP_REMOVE_HEADER:
+ case MLX5HWS_ACTION_TYP_VPORT:
+ hws_action_destroy_stcs(action);
+ break;
+ case MLX5HWS_ACTION_TYP_POP_VLAN:
+ hws_action_destroy_stcs(action);
+ hws_action_put_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP);
+ break;
+ case MLX5HWS_ACTION_TYP_DEST_ARRAY:
+ hws_action_destroy_stcs(action);
+ mlx5hws_cmd_forward_tbl_destroy(action->ctx->mdev, action->dest_array.fw_island);
+ for (i = 0; i < action->dest_array.num_dest; i++) {
+ ext_reformat_id = action->dest_array.dest_list[i].ext_reformat_id;
+ if (ext_reformat_id)
+ mlx5hws_cmd_packet_reformat_destroy(action->ctx->mdev,
+ ext_reformat_id);
+ }
+ kfree(action->dest_array.dest_list);
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
+ case MLX5HWS_ACTION_TYP_MODIFY_HDR:
+ shared_arg = false;
+ for (i = 0; i < action->modify_header.num_of_patterns; i++) {
+ hws_action_destroy_stcs(&action[i]);
+ if (action[i].modify_header.num_of_actions > 1) {
+ mlx5hws_pat_put_pattern(action[i].ctx,
+ action[i].modify_header.pat_id);
+ /* Save shared arg object to be freed after */
+ obj_id = action[i].modify_header.arg_id;
+ shared_arg = true;
+ }
+ }
+ if (shared_arg)
+ mlx5hws_arg_destroy(action->ctx, obj_id);
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ hws_action_put_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3);
+ for (i = 0; i < action->reformat.num_of_hdrs; i++)
+ hws_action_destroy_stcs(&action[i]);
+ mlx5hws_arg_destroy(action->ctx, action->reformat.arg_id);
+ break;
+ case MLX5HWS_ACTION_TYP_INSERT_HEADER:
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ for (i = 0; i < action->reformat.num_of_hdrs; i++)
+ hws_action_destroy_stcs(&action[i]);
+ mlx5hws_arg_destroy(action->ctx, action->reformat.arg_id);
+ break;
+ case MLX5HWS_ACTION_TYP_RANGE:
+ hws_action_destroy_stcs(action);
+ hws_action_destroy_dest_match_range_table(action->ctx, action->range.table_ste);
+ mlx5hws_definer_free(action->ctx, action->range.definer);
+ mlx5hws_action_destroy(action->range.hit_ft_action);
+ break;
+ case MLX5HWS_ACTION_TYP_LAST:
+ break;
+ default:
+ pr_warn("HWS: Invalid action type: %d\n", action->type);
+ }
+}
+
+int mlx5hws_action_destroy(struct mlx5hws_action *action)
+{
+ hws_action_destroy_hws(action);
+
+ kfree(action);
+ return 0;
+}
+
+int mlx5hws_action_get_default_stc(struct mlx5hws_context *ctx, u8 tbl_type)
+__must_hold(&ctx->ctrl_lock)
+{
+ struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+ struct mlx5hws_action_default_stc *default_stc;
+ int ret;
+
+ if (ctx->common_res[tbl_type].default_stc) {
+ ctx->common_res[tbl_type].default_stc->refcount++;
+ return 0;
+ }
+
+ default_stc = kzalloc(sizeof(*default_stc), GFP_KERNEL);
+ if (!default_stc)
+ return -ENOMEM;
+
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_NOP;
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW0;
+ stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
+ &default_stc->nop_ctr);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate default counter STC\n");
+ goto free_default_stc;
+ }
+
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
+ &default_stc->nop_dw5);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate default NOP DW5 STC\n");
+ goto free_nop_ctr;
+ }
+
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW6;
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
+ &default_stc->nop_dw6);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate default NOP DW6 STC\n");
+ goto free_nop_dw5;
+ }
+
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW7;
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
+ &default_stc->nop_dw7);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate default NOP DW7 STC\n");
+ goto free_nop_dw6;
+ }
+
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_ALLOW;
+
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
+ &default_stc->default_hit);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate default allow STC\n");
+ goto free_nop_dw7;
+ }
+
+ ctx->common_res[tbl_type].default_stc = default_stc;
+ ctx->common_res[tbl_type].default_stc->refcount++;
+
+ return 0;
+
+free_nop_dw7:
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw7);
+free_nop_dw6:
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw6);
+free_nop_dw5:
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw5);
+free_nop_ctr:
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_ctr);
+free_default_stc:
+ kfree(default_stc);
+ return ret;
+}
+
+void mlx5hws_action_put_default_stc(struct mlx5hws_context *ctx, u8 tbl_type)
+__must_hold(&ctx->ctrl_lock)
+{
+ struct mlx5hws_action_default_stc *default_stc;
+
+ default_stc = ctx->common_res[tbl_type].default_stc;
+
+ default_stc = ctx->common_res[tbl_type].default_stc;
+ if (--default_stc->refcount)
+ return;
+
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->default_hit);
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw7);
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw6);
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw5);
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_ctr);
+ kfree(default_stc);
+ ctx->common_res[tbl_type].default_stc = NULL;
+}
+
+static void hws_action_modify_write(struct mlx5hws_send_engine *queue,
+ u32 arg_idx,
+ u8 *arg_data,
+ u16 num_of_actions,
+ u32 nope_locations)
+{
+ u8 *new_arg_data = NULL;
+ int i, j;
+
+ if (unlikely(nope_locations)) {
+ new_arg_data = kcalloc(num_of_actions,
+ MLX5HWS_MODIFY_ACTION_SIZE, GFP_KERNEL);
+ if (unlikely(!new_arg_data))
+ return;
+
+ for (i = 0, j = 0; i < num_of_actions; i++, j++) {
+ memcpy(&new_arg_data[j], arg_data, MLX5HWS_MODIFY_ACTION_SIZE);
+ if (BIT(i) & nope_locations)
+ j++;
+ }
+ }
+
+ mlx5hws_arg_write(queue, NULL, arg_idx,
+ new_arg_data ? new_arg_data : arg_data,
+ num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE);
+
+ kfree(new_arg_data);
+}
+
+void mlx5hws_action_prepare_decap_l3_data(u8 *src, u8 *dst, u16 num_of_actions)
+{
+ u8 *e_src;
+ int i;
+
+ /* num_of_actions = remove l3l2 + 4/5 inserts + remove extra 2 bytes
+ * copy from end of src to the start of dst.
+ * move to the end, 2 is the leftover from 14B or 18B
+ */
+ if (num_of_actions == DECAP_L3_NUM_ACTIONS_W_NO_VLAN)
+ e_src = src + MLX5HWS_ACTION_HDR_LEN_L2;
+ else
+ e_src = src + MLX5HWS_ACTION_HDR_LEN_L2_W_VLAN;
+
+ /* Move dst over the first remove action + zero data */
+ dst += MLX5HWS_ACTION_DOUBLE_SIZE;
+ /* Move dst over the first insert ctrl action */
+ dst += MLX5HWS_ACTION_DOUBLE_SIZE / 2;
+ /* Actions:
+ * no vlan: r_h-insert_4b-insert_4b-insert_4b-insert_4b-remove_2b.
+ * with vlan: r_h-insert_4b-insert_4b-insert_4b-insert_4b-insert_4b-remove_2b.
+ * the loop is without the last insertion.
+ */
+ for (i = 0; i < num_of_actions - 3; i++) {
+ e_src -= MLX5HWS_ACTION_INLINE_DATA_SIZE;
+ memcpy(dst, e_src, MLX5HWS_ACTION_INLINE_DATA_SIZE); /* data */
+ dst += MLX5HWS_ACTION_DOUBLE_SIZE;
+ }
+ /* Copy the last 2 bytes after a gap of 2 bytes which will be removed */
+ e_src -= MLX5HWS_ACTION_INLINE_DATA_SIZE / 2;
+ dst += MLX5HWS_ACTION_INLINE_DATA_SIZE / 2;
+ memcpy(dst, e_src, 2);
+}
+
+static int
+hws_action_get_shared_stc_offset(struct mlx5hws_context_common_res *common_res,
+ enum mlx5hws_context_shared_stc_type stc_type)
+{
+ return common_res->shared_stc[stc_type]->stc_chunk.offset;
+}
+
+static struct mlx5hws_actions_wqe_setter *
+hws_action_setter_find_first(struct mlx5hws_actions_wqe_setter *setter,
+ u8 req_flags)
+{
+ /* Use a new setter if requested flags are taken */
+ while (setter->flags & req_flags)
+ setter++;
+
+ /* Use current setter in required flags are not used */
+ return setter;
+}
+
+static void
+hws_action_apply_stc(struct mlx5hws_actions_apply_data *apply,
+ enum mlx5hws_action_stc_idx stc_idx,
+ u8 action_idx)
+{
+ struct mlx5hws_action *action = apply->rule_action[action_idx].action;
+
+ apply->wqe_ctrl->stc_ix[stc_idx] =
+ htonl(action->stc[apply->tbl_type].offset);
+}
+
+static void
+hws_action_setter_push_vlan(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ struct mlx5hws_rule_action *rule_action;
+
+ rule_action = &apply->rule_action[setter->idx_double];
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = rule_action->push_vlan.vlan_hdr;
+
+ hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_DW6, setter->idx_double);
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
+}
+
+static void
+hws_action_setter_modify_header(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ struct mlx5hws_rule_action *rule_action;
+ struct mlx5hws_action *action;
+ u32 arg_sz, arg_idx;
+ u8 *single_action;
+ __be32 stc_idx;
+
+ rule_action = &apply->rule_action[setter->idx_double];
+ action = rule_action->action;
+
+ stc_idx = htonl(action->stc[apply->tbl_type].offset);
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
+
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
+
+ if (action->modify_header.num_of_actions == 1) {
+ if (action->modify_header.single_action_type ==
+ MLX5_MODIFICATION_TYPE_COPY ||
+ action->modify_header.single_action_type ==
+ MLX5_MODIFICATION_TYPE_ADD_FIELD) {
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = 0;
+ return;
+ }
+
+ if (action->flags & MLX5HWS_ACTION_FLAG_SHARED)
+ single_action = (u8 *)&action->modify_header.single_action;
+ else
+ single_action = rule_action->modify_header.data;
+
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] =
+ *(__be32 *)MLX5_ADDR_OF(set_action_in, single_action, data);
+ } else {
+ /* Argument offset multiple with number of args per these actions */
+ arg_sz = mlx5hws_arg_get_arg_size(action->modify_header.max_num_of_actions);
+ arg_idx = rule_action->modify_header.offset * arg_sz;
+
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
+
+ if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) {
+ apply->require_dep = 1;
+ hws_action_modify_write(apply->queue,
+ action->modify_header.arg_id + arg_idx,
+ rule_action->modify_header.data,
+ action->modify_header.num_of_actions,
+ action->modify_header.nope_locations);
+ }
+ }
+}
+
+static void
+hws_action_setter_insert_ptr(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ struct mlx5hws_rule_action *rule_action;
+ struct mlx5hws_action *action;
+ u32 arg_idx, arg_sz;
+ __be32 stc_idx;
+
+ rule_action = &apply->rule_action[setter->idx_double];
+ action = rule_action->action + rule_action->reformat.hdr_idx;
+
+ /* Argument offset multiple on args required for header size */
+ arg_sz = mlx5hws_arg_data_size_to_arg_size(action->reformat.max_hdr_sz);
+ arg_idx = rule_action->reformat.offset * arg_sz;
+
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
+
+ stc_idx = htonl(action->stc[apply->tbl_type].offset);
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
+
+ if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) {
+ apply->require_dep = 1;
+ mlx5hws_arg_write(apply->queue, NULL,
+ action->reformat.arg_id + arg_idx,
+ rule_action->reformat.data,
+ action->reformat.header_size);
+ }
+}
+
+static void
+hws_action_setter_tnl_l3_to_l2(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ struct mlx5hws_rule_action *rule_action;
+ struct mlx5hws_action *action;
+ u32 arg_sz, arg_idx;
+ __be32 stc_idx;
+
+ rule_action = &apply->rule_action[setter->idx_double];
+ action = rule_action->action + rule_action->reformat.hdr_idx;
+
+ /* Argument offset multiple on args required for num of actions */
+ arg_sz = mlx5hws_arg_get_arg_size(action->modify_header.max_num_of_actions);
+ arg_idx = rule_action->reformat.offset * arg_sz;
+
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
+
+ stc_idx = htonl(action->stc[apply->tbl_type].offset);
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
+
+ if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) {
+ apply->require_dep = 1;
+ mlx5hws_arg_decapl3_write(apply->queue,
+ action->modify_header.arg_id + arg_idx,
+ rule_action->reformat.data,
+ action->modify_header.num_of_actions);
+ }
+}
+
+static void
+hws_action_setter_aso(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ struct mlx5hws_rule_action *rule_action;
+ u32 exe_aso_ctrl;
+ u32 offset;
+
+ rule_action = &apply->rule_action[setter->idx_double];
+
+ switch (rule_action->action->type) {
+ case MLX5HWS_ACTION_TYP_ASO_METER:
+ /* exe_aso_ctrl format:
+ * [STC only and reserved bits 29b][init_color 2b][meter_id 1b]
+ */
+ offset = rule_action->aso_meter.offset / MLX5_ASO_METER_NUM_PER_OBJ;
+ exe_aso_ctrl = rule_action->aso_meter.offset % MLX5_ASO_METER_NUM_PER_OBJ;
+ exe_aso_ctrl |= rule_action->aso_meter.init_color <<
+ MLX5HWS_ACTION_METER_INIT_COLOR_OFFSET;
+ break;
+ default:
+ mlx5hws_err(rule_action->action->ctx,
+ "Unsupported ASO action type: %d\n", rule_action->action->type);
+ return;
+ }
+
+ /* aso_object_offset format: [24B] */
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = htonl(offset);
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(exe_aso_ctrl);
+
+ hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_DW6, setter->idx_double);
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
+}
+
+static void
+hws_action_setter_tag(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ struct mlx5hws_rule_action *rule_action;
+
+ rule_action = &apply->rule_action[setter->idx_single];
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = htonl(rule_action->tag.value);
+ hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_DW5, setter->idx_single);
+}
+
+static void
+hws_action_setter_ctrl_ctr(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ struct mlx5hws_rule_action *rule_action;
+
+ rule_action = &apply->rule_action[setter->idx_ctr];
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW0] = htonl(rule_action->counter.offset);
+ hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_CTRL, setter->idx_ctr);
+}
+
+static void
+hws_action_setter_single(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0;
+ hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_DW5, setter->idx_single);
+}
+
+static void
+hws_action_setter_single_double_pop(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] =
+ htonl(hws_action_get_shared_stc_offset(apply->common_res,
+ MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP));
+}
+
+static void
+hws_action_setter_hit(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_HIT_LSB] = 0;
+ hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_HIT, setter->idx_hit);
+}
+
+static void
+hws_action_setter_default_hit(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_HIT_LSB] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_HIT] =
+ htonl(apply->common_res->default_stc->default_hit.offset);
+}
+
+static void
+hws_action_setter_hit_next_action(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_HIT_LSB] = htonl(apply->next_direct_idx << 6);
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_HIT] = htonl(apply->jump_to_action_stc);
+}
+
+static void
+hws_action_setter_common_decap(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] =
+ htonl(hws_action_get_shared_stc_offset(apply->common_res,
+ MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3));
+}
+
+static void
+hws_action_setter_range(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ /* Always jump to index zero */
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_HIT_LSB] = 0;
+ hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_HIT, setter->idx_hit);
+}
+
+int mlx5hws_action_template_process(struct mlx5hws_action_template *at)
+{
+ struct mlx5hws_actions_wqe_setter *start_setter = at->setters + 1;
+ enum mlx5hws_action_type *action_type = at->action_type_arr;
+ struct mlx5hws_actions_wqe_setter *setter = at->setters;
+ struct mlx5hws_actions_wqe_setter *pop_setter = NULL;
+ struct mlx5hws_actions_wqe_setter *last_setter;
+ int i;
+
+ /* Note: Given action combination must be valid */
+
+ /* Check if action were already processed */
+ if (at->num_of_action_stes)
+ return 0;
+
+ for (i = 0; i < MLX5HWS_ACTION_MAX_STE; i++)
+ setter[i].set_hit = &hws_action_setter_hit_next_action;
+
+ /* The same action template setters can be used with jumbo or match
+ * STE, to support both cases we reserve the first setter for cases
+ * with jumbo STE to allow jump to the first action STE.
+ * This extra setter can be reduced in some cases on rule creation.
+ */
+ setter = start_setter;
+ last_setter = start_setter;
+
+ for (i = 0; i < at->num_actions; i++) {
+ switch (action_type[i]) {
+ case MLX5HWS_ACTION_TYP_DROP:
+ case MLX5HWS_ACTION_TYP_TBL:
+ case MLX5HWS_ACTION_TYP_DEST_ARRAY:
+ case MLX5HWS_ACTION_TYP_VPORT:
+ case MLX5HWS_ACTION_TYP_MISS:
+ /* Hit action */
+ last_setter->flags |= ASF_HIT;
+ last_setter->set_hit = &hws_action_setter_hit;
+ last_setter->idx_hit = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_RANGE:
+ last_setter->flags |= ASF_HIT;
+ last_setter->set_hit = &hws_action_setter_range;
+ last_setter->idx_hit = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_POP_VLAN:
+ /* Single remove header to header */
+ if (pop_setter) {
+ /* We have 2 pops, use the shared */
+ pop_setter->set_single = &hws_action_setter_single_double_pop;
+ break;
+ }
+ setter = hws_action_setter_find_first(last_setter,
+ ASF_SINGLE1 | ASF_MODIFY |
+ ASF_INSERT);
+ setter->flags |= ASF_SINGLE1 | ASF_REMOVE;
+ setter->set_single = &hws_action_setter_single;
+ setter->idx_single = i;
+ pop_setter = setter;
+ break;
+
+ case MLX5HWS_ACTION_TYP_PUSH_VLAN:
+ /* Double insert inline */
+ setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE);
+ setter->flags |= ASF_DOUBLE | ASF_INSERT;
+ setter->set_double = &hws_action_setter_push_vlan;
+ setter->idx_double = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_MODIFY_HDR:
+ /* Double modify header list */
+ setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE);
+ setter->flags |= ASF_DOUBLE | ASF_MODIFY;
+ setter->set_double = &hws_action_setter_modify_header;
+ setter->idx_double = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_ASO_METER:
+ /* Double ASO action */
+ setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE);
+ setter->flags |= ASF_DOUBLE;
+ setter->set_double = &hws_action_setter_aso;
+ setter->idx_double = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_REMOVE_HEADER:
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
+ /* Single remove header to header */
+ setter = hws_action_setter_find_first(last_setter,
+ ASF_SINGLE1 | ASF_MODIFY);
+ setter->flags |= ASF_SINGLE1 | ASF_REMOVE;
+ setter->set_single = &hws_action_setter_single;
+ setter->idx_single = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_INSERT_HEADER:
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ /* Double insert header with pointer */
+ setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE);
+ setter->flags |= ASF_DOUBLE | ASF_INSERT;
+ setter->set_double = &hws_action_setter_insert_ptr;
+ setter->idx_double = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ /* Single remove + Double insert header with pointer */
+ setter = hws_action_setter_find_first(last_setter,
+ ASF_SINGLE1 | ASF_DOUBLE);
+ setter->flags |= ASF_SINGLE1 | ASF_DOUBLE;
+ setter->set_double = &hws_action_setter_insert_ptr;
+ setter->idx_double = i;
+ setter->set_single = &hws_action_setter_common_decap;
+ setter->idx_single = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
+ /* Double modify header list with remove and push inline */
+ setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE);
+ setter->flags |= ASF_DOUBLE | ASF_MODIFY | ASF_INSERT;
+ setter->set_double = &hws_action_setter_tnl_l3_to_l2;
+ setter->idx_double = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_TAG:
+ /* Single TAG action, search for any room from the start */
+ setter = hws_action_setter_find_first(start_setter, ASF_SINGLE1);
+ setter->flags |= ASF_SINGLE1;
+ setter->set_single = &hws_action_setter_tag;
+ setter->idx_single = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_CTR:
+ /* Control counter action
+ * TODO: Current counter executed first. Support is needed
+ * for single ation counter action which is done last.
+ * Example: Decap + CTR
+ */
+ setter = hws_action_setter_find_first(start_setter, ASF_CTR);
+ setter->flags |= ASF_CTR;
+ setter->set_ctr = &hws_action_setter_ctrl_ctr;
+ setter->idx_ctr = i;
+ break;
+ default:
+ pr_warn("HWS: Invalid action type in processingaction template: action_type[%d]=%d\n",
+ i, action_type[i]);
+ return -EOPNOTSUPP;
+ }
+
+ last_setter = max(setter, last_setter);
+ }
+
+ /* Set default hit on the last STE if no hit action provided */
+ if (!(last_setter->flags & ASF_HIT))
+ last_setter->set_hit = &hws_action_setter_default_hit;
+
+ at->num_of_action_stes = last_setter - start_setter + 1;
+
+ /* Check if action template doesn't require any action DWs */
+ at->only_term = (at->num_of_action_stes == 1) &&
+ !(last_setter->flags & ~(ASF_CTR | ASF_HIT));
+
+ return 0;
+}
+
+struct mlx5hws_action_template *
+mlx5hws_action_template_create(enum mlx5hws_action_type action_type[])
+{
+ struct mlx5hws_action_template *at;
+ u8 num_actions = 0;
+ int i;
+
+ at = kzalloc(sizeof(*at), GFP_KERNEL);
+ if (!at)
+ return NULL;
+
+ while (action_type[num_actions++] != MLX5HWS_ACTION_TYP_LAST)
+ ;
+
+ at->num_actions = num_actions - 1;
+ at->action_type_arr = kcalloc(num_actions, sizeof(*action_type), GFP_KERNEL);
+ if (!at->action_type_arr)
+ goto free_at;
+
+ for (i = 0; i < num_actions; i++)
+ at->action_type_arr[i] = action_type[i];
+
+ return at;
+
+free_at:
+ kfree(at);
+ return NULL;
+}
+
+int mlx5hws_action_template_destroy(struct mlx5hws_action_template *at)
+{
+ kfree(at->action_type_arr);
+ kfree(at);
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.h
new file mode 100644
index 000000000000..bf5c1b241006
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.h
@@ -0,0 +1,307 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_ACTION_H_
+#define MLX5HWS_ACTION_H_
+
+/* Max number of STEs needed for a rule (including match) */
+#define MLX5HWS_ACTION_MAX_STE 20
+
+/* Max number of internal subactions of ipv6_ext */
+#define MLX5HWS_ACTION_IPV6_EXT_MAX_SA 4
+
+enum mlx5hws_action_stc_idx {
+ MLX5HWS_ACTION_STC_IDX_CTRL = 0,
+ MLX5HWS_ACTION_STC_IDX_HIT = 1,
+ MLX5HWS_ACTION_STC_IDX_DW5 = 2,
+ MLX5HWS_ACTION_STC_IDX_DW6 = 3,
+ MLX5HWS_ACTION_STC_IDX_DW7 = 4,
+ MLX5HWS_ACTION_STC_IDX_MAX = 5,
+ /* STC Jumvo STE combo: CTR, Hit */
+ MLX5HWS_ACTION_STC_IDX_LAST_JUMBO_STE = 1,
+ /* STC combo1: CTR, SINGLE, DOUBLE, Hit */
+ MLX5HWS_ACTION_STC_IDX_LAST_COMBO1 = 3,
+ /* STC combo2: CTR, 3 x SINGLE, Hit */
+ MLX5HWS_ACTION_STC_IDX_LAST_COMBO2 = 4,
+ /* STC combo2: CTR, TRIPLE, Hit */
+ MLX5HWS_ACTION_STC_IDX_LAST_COMBO3 = 2,
+};
+
+enum mlx5hws_action_offset {
+ MLX5HWS_ACTION_OFFSET_DW0 = 0,
+ MLX5HWS_ACTION_OFFSET_DW5 = 5,
+ MLX5HWS_ACTION_OFFSET_DW6 = 6,
+ MLX5HWS_ACTION_OFFSET_DW7 = 7,
+ MLX5HWS_ACTION_OFFSET_HIT = 3,
+ MLX5HWS_ACTION_OFFSET_HIT_LSB = 4,
+};
+
+enum {
+ MLX5HWS_ACTION_DOUBLE_SIZE = 8,
+ MLX5HWS_ACTION_INLINE_DATA_SIZE = 4,
+ MLX5HWS_ACTION_HDR_LEN_L2_MACS = 12,
+ MLX5HWS_ACTION_HDR_LEN_L2_VLAN = 4,
+ MLX5HWS_ACTION_HDR_LEN_L2_ETHER = 2,
+ MLX5HWS_ACTION_HDR_LEN_L2 = (MLX5HWS_ACTION_HDR_LEN_L2_MACS +
+ MLX5HWS_ACTION_HDR_LEN_L2_ETHER),
+ MLX5HWS_ACTION_HDR_LEN_L2_W_VLAN = (MLX5HWS_ACTION_HDR_LEN_L2 +
+ MLX5HWS_ACTION_HDR_LEN_L2_VLAN),
+ MLX5HWS_ACTION_REFORMAT_DATA_SIZE = 64,
+ DECAP_L3_NUM_ACTIONS_W_NO_VLAN = 6,
+ DECAP_L3_NUM_ACTIONS_W_VLAN = 7,
+};
+
+enum mlx5hws_action_setter_flag {
+ ASF_SINGLE1 = 1 << 0,
+ ASF_SINGLE2 = 1 << 1,
+ ASF_SINGLE3 = 1 << 2,
+ ASF_DOUBLE = ASF_SINGLE2 | ASF_SINGLE3,
+ ASF_TRIPLE = ASF_SINGLE1 | ASF_DOUBLE,
+ ASF_INSERT = 1 << 3,
+ ASF_REMOVE = 1 << 4,
+ ASF_MODIFY = 1 << 5,
+ ASF_CTR = 1 << 6,
+ ASF_HIT = 1 << 7,
+};
+
+struct mlx5hws_action_default_stc {
+ struct mlx5hws_pool_chunk nop_ctr;
+ struct mlx5hws_pool_chunk nop_dw5;
+ struct mlx5hws_pool_chunk nop_dw6;
+ struct mlx5hws_pool_chunk nop_dw7;
+ struct mlx5hws_pool_chunk default_hit;
+ u32 refcount;
+};
+
+struct mlx5hws_action_shared_stc {
+ struct mlx5hws_pool_chunk stc_chunk;
+ u32 refcount;
+};
+
+struct mlx5hws_actions_apply_data {
+ struct mlx5hws_send_engine *queue;
+ struct mlx5hws_rule_action *rule_action;
+ __be32 *wqe_data;
+ struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
+ u32 jump_to_action_stc;
+ struct mlx5hws_context_common_res *common_res;
+ enum mlx5hws_table_type tbl_type;
+ u32 next_direct_idx;
+ u8 require_dep;
+};
+
+struct mlx5hws_actions_wqe_setter;
+
+typedef void (*mlx5hws_action_setter_fp)(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter);
+
+struct mlx5hws_actions_wqe_setter {
+ mlx5hws_action_setter_fp set_single;
+ mlx5hws_action_setter_fp set_double;
+ mlx5hws_action_setter_fp set_triple;
+ mlx5hws_action_setter_fp set_hit;
+ mlx5hws_action_setter_fp set_ctr;
+ u8 idx_single;
+ u8 idx_double;
+ u8 idx_triple;
+ u8 idx_ctr;
+ u8 idx_hit;
+ u8 stage_idx;
+ u8 flags;
+};
+
+struct mlx5hws_action_template {
+ struct mlx5hws_actions_wqe_setter setters[MLX5HWS_ACTION_MAX_STE];
+ enum mlx5hws_action_type *action_type_arr;
+ u8 num_of_action_stes;
+ u8 num_actions;
+ u8 only_term;
+};
+
+struct mlx5hws_action {
+ u8 type;
+ u8 flags;
+ struct mlx5hws_context *ctx;
+ union {
+ struct {
+ struct mlx5hws_pool_chunk stc[MLX5HWS_TABLE_TYPE_MAX];
+ union {
+ struct {
+ u32 pat_id;
+ u32 arg_id;
+ __be64 single_action;
+ u32 nope_locations;
+ u8 num_of_patterns;
+ u8 single_action_type;
+ u8 num_of_actions;
+ u8 max_num_of_actions;
+ u8 require_reparse;
+ } modify_header;
+ struct {
+ u32 arg_id;
+ u32 header_size;
+ u16 max_hdr_sz;
+ u8 num_of_hdrs;
+ u8 anchor;
+ u8 e_anchor;
+ u8 offset;
+ bool encap;
+ u8 require_reparse;
+ } reformat;
+ struct {
+ u32 obj_id;
+ u8 return_reg_id;
+ } aso;
+ struct {
+ u16 vport_num;
+ u16 esw_owner_vhca_id;
+ bool esw_owner_vhca_id_valid;
+ } vport;
+ struct {
+ u32 obj_id;
+ } dest_obj;
+ struct {
+ struct mlx5hws_cmd_forward_tbl *fw_island;
+ size_t num_dest;
+ struct mlx5hws_cmd_set_fte_dest *dest_list;
+ } dest_array;
+ struct {
+ u8 type;
+ u8 start_anchor;
+ u8 end_anchor;
+ u8 num_of_words;
+ bool decap;
+ } insert_hdr;
+ struct {
+ /* PRM start anchor from which header will be removed */
+ u8 anchor;
+ /* Header remove offset in bytes, from the start
+ * anchor to the location where remove header starts.
+ */
+ u8 offset;
+ /* Indicates the removed header size in bytes */
+ size_t size;
+ } remove_header;
+ struct {
+ struct mlx5hws_matcher_action_ste *table_ste;
+ struct mlx5hws_action *hit_ft_action;
+ struct mlx5hws_definer *definer;
+ } range;
+ };
+ };
+
+ struct ibv_flow_action *flow_action;
+ u32 obj_id;
+ struct ibv_qp *qp;
+ };
+};
+
+const char *mlx5hws_action_type_to_str(enum mlx5hws_action_type action_type);
+
+int mlx5hws_action_get_default_stc(struct mlx5hws_context *ctx,
+ u8 tbl_type);
+
+void mlx5hws_action_put_default_stc(struct mlx5hws_context *ctx,
+ u8 tbl_type);
+
+void mlx5hws_action_prepare_decap_l3_data(u8 *src, u8 *dst,
+ u16 num_of_actions);
+
+int mlx5hws_action_template_process(struct mlx5hws_action_template *at);
+
+bool mlx5hws_action_check_combo(struct mlx5hws_context *ctx,
+ enum mlx5hws_action_type *user_actions,
+ enum mlx5hws_table_type table_type);
+
+int mlx5hws_action_alloc_single_stc(struct mlx5hws_context *ctx,
+ struct mlx5hws_cmd_stc_modify_attr *stc_attr,
+ u32 table_type,
+ struct mlx5hws_pool_chunk *stc);
+
+void mlx5hws_action_free_single_stc(struct mlx5hws_context *ctx,
+ u32 table_type,
+ struct mlx5hws_pool_chunk *stc);
+
+static inline void
+mlx5hws_action_setter_default_single(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] =
+ htonl(apply->common_res->default_stc->nop_dw5.offset);
+}
+
+static inline void
+mlx5hws_action_setter_default_double(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] =
+ htonl(apply->common_res->default_stc->nop_dw6.offset);
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] =
+ htonl(apply->common_res->default_stc->nop_dw7.offset);
+}
+
+static inline void
+mlx5hws_action_setter_default_ctr(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW0] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] =
+ htonl(apply->common_res->default_stc->nop_ctr.offset);
+}
+
+static inline void
+mlx5hws_action_apply_setter(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter,
+ bool is_jumbo)
+{
+ u8 num_of_actions;
+
+ /* Set control counter */
+ if (setter->set_ctr)
+ setter->set_ctr(apply, setter);
+ else
+ mlx5hws_action_setter_default_ctr(apply, setter);
+
+ if (!is_jumbo) {
+ if (unlikely(setter->set_triple)) {
+ /* Set triple on match */
+ setter->set_triple(apply, setter);
+ num_of_actions = MLX5HWS_ACTION_STC_IDX_LAST_COMBO3;
+ } else {
+ /* Set single and double on match */
+ if (setter->set_single)
+ setter->set_single(apply, setter);
+ else
+ mlx5hws_action_setter_default_single(apply, setter);
+
+ if (setter->set_double)
+ setter->set_double(apply, setter);
+ else
+ mlx5hws_action_setter_default_double(apply, setter);
+
+ num_of_actions = setter->set_double ?
+ MLX5HWS_ACTION_STC_IDX_LAST_COMBO1 :
+ MLX5HWS_ACTION_STC_IDX_LAST_COMBO2;
+ }
+ } else {
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0;
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
+ num_of_actions = MLX5HWS_ACTION_STC_IDX_LAST_JUMBO_STE;
+ }
+
+ /* Set next/final hit action */
+ setter->set_hit(apply, setter);
+
+ /* Set number of actions */
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] |=
+ htonl(num_of_actions << 29);
+}
+
+#endif /* MLX5HWS_ACTION_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.c
new file mode 100644
index 000000000000..e6ed66202a40
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+#include "mlx5hws_buddy.h"
+
+static int hws_buddy_init(struct mlx5hws_buddy_mem *buddy, u32 max_order)
+{
+ int i, s, ret = 0;
+
+ buddy->max_order = max_order;
+
+ buddy->bitmap = kcalloc(buddy->max_order + 1,
+ sizeof(*buddy->bitmap),
+ GFP_KERNEL);
+ if (!buddy->bitmap)
+ return -ENOMEM;
+
+ buddy->num_free = kcalloc(buddy->max_order + 1,
+ sizeof(*buddy->num_free),
+ GFP_KERNEL);
+ if (!buddy->num_free) {
+ ret = -ENOMEM;
+ goto err_out_free_bits;
+ }
+
+ for (i = 0; i <= (int)buddy->max_order; ++i) {
+ s = 1 << (buddy->max_order - i);
+
+ buddy->bitmap[i] = bitmap_zalloc(s, GFP_KERNEL);
+ if (!buddy->bitmap[i]) {
+ ret = -ENOMEM;
+ goto err_out_free_num_free;
+ }
+ }
+
+ bitmap_set(buddy->bitmap[buddy->max_order], 0, 1);
+ buddy->num_free[buddy->max_order] = 1;
+
+ return 0;
+
+err_out_free_num_free:
+ for (i = 0; i <= (int)buddy->max_order; ++i)
+ bitmap_free(buddy->bitmap[i]);
+
+ kfree(buddy->num_free);
+
+err_out_free_bits:
+ kfree(buddy->bitmap);
+ return ret;
+}
+
+struct mlx5hws_buddy_mem *mlx5hws_buddy_create(u32 max_order)
+{
+ struct mlx5hws_buddy_mem *buddy;
+
+ buddy = kzalloc(sizeof(*buddy), GFP_KERNEL);
+ if (!buddy)
+ return NULL;
+
+ if (hws_buddy_init(buddy, max_order))
+ goto free_buddy;
+
+ return buddy;
+
+free_buddy:
+ kfree(buddy);
+ return NULL;
+}
+
+void mlx5hws_buddy_cleanup(struct mlx5hws_buddy_mem *buddy)
+{
+ int i;
+
+ for (i = 0; i <= (int)buddy->max_order; ++i)
+ bitmap_free(buddy->bitmap[i]);
+
+ kfree(buddy->num_free);
+ kfree(buddy->bitmap);
+}
+
+static int hws_buddy_find_free_seg(struct mlx5hws_buddy_mem *buddy,
+ u32 start_order,
+ u32 *segment,
+ u32 *order)
+{
+ unsigned int seg, order_iter, m;
+
+ for (order_iter = start_order;
+ order_iter <= buddy->max_order; ++order_iter) {
+ if (!buddy->num_free[order_iter])
+ continue;
+
+ m = 1 << (buddy->max_order - order_iter);
+ seg = find_first_bit(buddy->bitmap[order_iter], m);
+
+ if (WARN(seg >= m,
+ "ICM Buddy: failed finding free mem for order %d\n",
+ order_iter))
+ return -ENOMEM;
+
+ break;
+ }
+
+ if (order_iter > buddy->max_order)
+ return -ENOMEM;
+
+ *segment = seg;
+ *order = order_iter;
+ return 0;
+}
+
+int mlx5hws_buddy_alloc_mem(struct mlx5hws_buddy_mem *buddy, u32 order)
+{
+ u32 seg, order_iter, err;
+
+ err = hws_buddy_find_free_seg(buddy, order, &seg, &order_iter);
+ if (err)
+ return err;
+
+ bitmap_clear(buddy->bitmap[order_iter], seg, 1);
+ --buddy->num_free[order_iter];
+
+ while (order_iter > order) {
+ --order_iter;
+ seg <<= 1;
+ bitmap_set(buddy->bitmap[order_iter], seg ^ 1, 1);
+ ++buddy->num_free[order_iter];
+ }
+
+ seg <<= order;
+
+ return seg;
+}
+
+void mlx5hws_buddy_free_mem(struct mlx5hws_buddy_mem *buddy, u32 seg, u32 order)
+{
+ seg >>= order;
+
+ while (test_bit(seg ^ 1, buddy->bitmap[order])) {
+ bitmap_clear(buddy->bitmap[order], seg ^ 1, 1);
+ --buddy->num_free[order];
+ seg >>= 1;
+ ++order;
+ }
+
+ bitmap_set(buddy->bitmap[order], seg, 1);
+ ++buddy->num_free[order];
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.h
new file mode 100644
index 000000000000..338c44bbedaf
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_BUDDY_H_
+#define MLX5HWS_BUDDY_H_
+
+struct mlx5hws_buddy_mem {
+ unsigned long **bitmap;
+ unsigned int *num_free;
+ u32 max_order;
+};
+
+struct mlx5hws_buddy_mem *mlx5hws_buddy_create(u32 max_order);
+
+void mlx5hws_buddy_cleanup(struct mlx5hws_buddy_mem *buddy);
+
+int mlx5hws_buddy_alloc_mem(struct mlx5hws_buddy_mem *buddy, u32 order);
+
+void mlx5hws_buddy_free_mem(struct mlx5hws_buddy_mem *buddy, u32 seg, u32 order);
+
+#endif /* MLX5HWS_BUDDY_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c
new file mode 100644
index 000000000000..bd52b05db367
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c
@@ -0,0 +1,997 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+static u16 hws_bwc_gen_queue_idx(struct mlx5hws_context *ctx)
+{
+ /* assign random queue */
+ return get_random_u8() % mlx5hws_bwc_queues(ctx);
+}
+
+static u16
+hws_bwc_get_burst_th(struct mlx5hws_context *ctx, u16 queue_id)
+{
+ return min(ctx->send_queue[queue_id].num_entries / 2,
+ MLX5HWS_BWC_MATCHER_REHASH_BURST_TH);
+}
+
+static struct mutex *
+hws_bwc_get_queue_lock(struct mlx5hws_context *ctx, u16 idx)
+{
+ return &ctx->bwc_send_queue_locks[idx];
+}
+
+static void hws_bwc_lock_all_queues(struct mlx5hws_context *ctx)
+{
+ u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+ struct mutex *queue_lock; /* Protect the queue */
+ int i;
+
+ for (i = 0; i < bwc_queues; i++) {
+ queue_lock = hws_bwc_get_queue_lock(ctx, i);
+ mutex_lock(queue_lock);
+ }
+}
+
+static void hws_bwc_unlock_all_queues(struct mlx5hws_context *ctx)
+{
+ u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+ struct mutex *queue_lock; /* Protect the queue */
+ int i = bwc_queues;
+
+ while (i--) {
+ queue_lock = hws_bwc_get_queue_lock(ctx, i);
+ mutex_unlock(queue_lock);
+ }
+}
+
+static void hws_bwc_matcher_init_attr(struct mlx5hws_matcher_attr *attr,
+ u32 priority,
+ u8 size_log)
+{
+ memset(attr, 0, sizeof(*attr));
+
+ attr->priority = priority;
+ attr->optimize_using_rule_idx = 0;
+ attr->mode = MLX5HWS_MATCHER_RESOURCE_MODE_RULE;
+ attr->optimize_flow_src = MLX5HWS_MATCHER_FLOW_SRC_ANY;
+ attr->insert_mode = MLX5HWS_MATCHER_INSERT_BY_HASH;
+ attr->distribute_mode = MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH;
+ attr->rule.num_log = size_log;
+ attr->resizable = true;
+ attr->max_num_of_at_attach = MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM;
+}
+
+int mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_table *table,
+ u32 priority,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask,
+ enum mlx5hws_action_type action_types[])
+{
+ enum mlx5hws_action_type init_action_types[1] = { MLX5HWS_ACTION_TYP_LAST };
+ struct mlx5hws_context *ctx = table->ctx;
+ u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+ struct mlx5hws_matcher_attr attr = {0};
+ int i;
+
+ bwc_matcher->rules = kcalloc(bwc_queues, sizeof(*bwc_matcher->rules), GFP_KERNEL);
+ if (!bwc_matcher->rules)
+ goto err;
+
+ for (i = 0; i < bwc_queues; i++)
+ INIT_LIST_HEAD(&bwc_matcher->rules[i]);
+
+ hws_bwc_matcher_init_attr(&attr,
+ priority,
+ MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG);
+
+ bwc_matcher->priority = priority;
+ bwc_matcher->size_log = MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG;
+
+ /* create dummy action template */
+ bwc_matcher->at[0] =
+ mlx5hws_action_template_create(action_types ?
+ action_types : init_action_types);
+ if (!bwc_matcher->at[0]) {
+ mlx5hws_err(table->ctx, "BWC matcher: failed creating action template\n");
+ goto free_bwc_matcher_rules;
+ }
+
+ bwc_matcher->num_of_at = 1;
+
+ bwc_matcher->mt = mlx5hws_match_template_create(ctx,
+ mask->match_buf,
+ mask->match_sz,
+ match_criteria_enable);
+ if (!bwc_matcher->mt) {
+ mlx5hws_err(table->ctx, "BWC matcher: failed creating match template\n");
+ goto free_at;
+ }
+
+ bwc_matcher->matcher = mlx5hws_matcher_create(table,
+ &bwc_matcher->mt, 1,
+ &bwc_matcher->at[0],
+ bwc_matcher->num_of_at,
+ &attr);
+ if (!bwc_matcher->matcher) {
+ mlx5hws_err(table->ctx, "BWC matcher: failed creating HWS matcher\n");
+ goto free_mt;
+ }
+
+ return 0;
+
+free_mt:
+ mlx5hws_match_template_destroy(bwc_matcher->mt);
+free_at:
+ mlx5hws_action_template_destroy(bwc_matcher->at[0]);
+free_bwc_matcher_rules:
+ kfree(bwc_matcher->rules);
+err:
+ return -EINVAL;
+}
+
+struct mlx5hws_bwc_matcher *
+mlx5hws_bwc_matcher_create(struct mlx5hws_table *table,
+ u32 priority,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher;
+ bool is_complex;
+ int ret;
+
+ if (!mlx5hws_context_bwc_supported(table->ctx)) {
+ mlx5hws_err(table->ctx,
+ "BWC matcher: context created w/o BWC API compatibility\n");
+ return NULL;
+ }
+
+ bwc_matcher = kzalloc(sizeof(*bwc_matcher), GFP_KERNEL);
+ if (!bwc_matcher)
+ return NULL;
+
+ /* Check if the required match params can be all matched
+ * in single STE, otherwise complex matcher is needed.
+ */
+
+ is_complex = mlx5hws_bwc_match_params_is_complex(table->ctx, match_criteria_enable, mask);
+ if (is_complex)
+ ret = mlx5hws_bwc_matcher_create_complex(bwc_matcher,
+ table,
+ priority,
+ match_criteria_enable,
+ mask);
+ else
+ ret = mlx5hws_bwc_matcher_create_simple(bwc_matcher,
+ table,
+ priority,
+ match_criteria_enable,
+ mask,
+ NULL);
+ if (ret)
+ goto free_bwc_matcher;
+
+ return bwc_matcher;
+
+free_bwc_matcher:
+ kfree(bwc_matcher);
+
+ return NULL;
+}
+
+int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ int i;
+
+ mlx5hws_matcher_destroy(bwc_matcher->matcher);
+ bwc_matcher->matcher = NULL;
+
+ for (i = 0; i < bwc_matcher->num_of_at; i++)
+ mlx5hws_action_template_destroy(bwc_matcher->at[i]);
+
+ mlx5hws_match_template_destroy(bwc_matcher->mt);
+ kfree(bwc_matcher->rules);
+
+ return 0;
+}
+
+int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ if (bwc_matcher->num_of_rules)
+ mlx5hws_err(bwc_matcher->matcher->tbl->ctx,
+ "BWC matcher destroy: matcher still has %d rules\n",
+ bwc_matcher->num_of_rules);
+
+ mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
+
+ kfree(bwc_matcher);
+ return 0;
+}
+
+static int hws_bwc_queue_poll(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ u32 *pending_rules,
+ bool drain)
+{
+ struct mlx5hws_flow_op_result comp[MLX5HWS_BWC_MATCHER_REHASH_BURST_TH];
+ u16 burst_th = hws_bwc_get_burst_th(ctx, queue_id);
+ bool got_comp = *pending_rules >= burst_th;
+ bool queue_full;
+ int err = 0;
+ int ret;
+ int i;
+
+ /* Check if there are any completions at all */
+ if (!got_comp && !drain)
+ return 0;
+
+ queue_full = mlx5hws_send_engine_full(&ctx->send_queue[queue_id]);
+ while (queue_full || ((got_comp || drain) && *pending_rules)) {
+ ret = mlx5hws_send_queue_poll(ctx, queue_id, comp, burst_th);
+ if (unlikely(ret < 0)) {
+ mlx5hws_err(ctx, "BWC poll error: polling queue %d returned %d\n",
+ queue_id, ret);
+ return -EINVAL;
+ }
+
+ if (ret) {
+ (*pending_rules) -= ret;
+ for (i = 0; i < ret; i++) {
+ if (unlikely(comp[i].status != MLX5HWS_FLOW_OP_SUCCESS)) {
+ mlx5hws_err(ctx,
+ "BWC poll error: polling queue %d returned completion with error\n",
+ queue_id);
+ err = -EINVAL;
+ }
+ }
+ queue_full = false;
+ }
+
+ got_comp = !!ret;
+ }
+
+ return err;
+}
+
+void
+mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
+ u16 bwc_queue_idx,
+ u32 flow_source,
+ struct mlx5hws_rule_attr *rule_attr)
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+
+ /* no use of INSERT_BY_INDEX in bwc rule */
+ rule_attr->rule_idx = 0;
+
+ /* notify HW at each rule insertion/deletion */
+ rule_attr->burst = 0;
+
+ /* We don't need user data, but the API requires it to exist */
+ rule_attr->user_data = (void *)0xFACADE;
+
+ rule_attr->queue_id = mlx5hws_bwc_get_queue_id(ctx, bwc_queue_idx);
+ rule_attr->flow_source = flow_source;
+}
+
+struct mlx5hws_bwc_rule *
+mlx5hws_bwc_rule_alloc(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_bwc_rule *bwc_rule;
+
+ bwc_rule = kzalloc(sizeof(*bwc_rule), GFP_KERNEL);
+ if (unlikely(!bwc_rule))
+ goto out_err;
+
+ bwc_rule->rule = kzalloc(sizeof(*bwc_rule->rule), GFP_KERNEL);
+ if (unlikely(!bwc_rule->rule))
+ goto free_rule;
+
+ bwc_rule->bwc_matcher = bwc_matcher;
+ return bwc_rule;
+
+free_rule:
+ kfree(bwc_rule);
+out_err:
+ return NULL;
+}
+
+void mlx5hws_bwc_rule_free(struct mlx5hws_bwc_rule *bwc_rule)
+{
+ if (likely(bwc_rule->rule))
+ kfree(bwc_rule->rule);
+ kfree(bwc_rule);
+}
+
+static void hws_bwc_rule_list_add(struct mlx5hws_bwc_rule *bwc_rule, u16 idx)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+
+ bwc_matcher->num_of_rules++;
+ bwc_rule->bwc_queue_idx = idx;
+ list_add(&bwc_rule->list_node, &bwc_matcher->rules[idx]);
+}
+
+static void hws_bwc_rule_list_remove(struct mlx5hws_bwc_rule *bwc_rule)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+
+ bwc_matcher->num_of_rules--;
+ list_del_init(&bwc_rule->list_node);
+}
+
+static int
+hws_bwc_rule_destroy_hws_async(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ return mlx5hws_rule_destroy(bwc_rule->rule, attr);
+}
+
+static int
+hws_bwc_rule_destroy_hws_sync(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_rule_attr *rule_attr)
+{
+ struct mlx5hws_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_flow_op_result completion;
+ int ret;
+
+ ret = hws_bwc_rule_destroy_hws_async(bwc_rule, rule_attr);
+ if (unlikely(ret))
+ return ret;
+
+ do {
+ ret = mlx5hws_send_queue_poll(ctx, rule_attr->queue_id, &completion, 1);
+ } while (ret != 1);
+
+ if (unlikely(completion.status != MLX5HWS_FLOW_OP_SUCCESS ||
+ (bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETED &&
+ bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETING))) {
+ mlx5hws_err(ctx, "Failed destroying BWC rule: completion %d, rule status %d\n",
+ completion.status, bwc_rule->rule->status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ u16 idx = bwc_rule->bwc_queue_idx;
+ struct mlx5hws_rule_attr attr;
+ struct mutex *queue_lock; /* Protect the queue */
+ int ret;
+
+ mlx5hws_bwc_rule_fill_attr(bwc_matcher, idx, 0, &attr);
+
+ queue_lock = hws_bwc_get_queue_lock(ctx, idx);
+
+ mutex_lock(queue_lock);
+
+ ret = hws_bwc_rule_destroy_hws_sync(bwc_rule, &attr);
+ hws_bwc_rule_list_remove(bwc_rule);
+
+ mutex_unlock(queue_lock);
+
+ return ret;
+}
+
+int mlx5hws_bwc_rule_destroy(struct mlx5hws_bwc_rule *bwc_rule)
+{
+ int ret;
+
+ ret = mlx5hws_bwc_rule_destroy_simple(bwc_rule);
+
+ mlx5hws_bwc_rule_free(bwc_rule);
+ return ret;
+}
+
+static int
+hws_bwc_rule_create_async(struct mlx5hws_bwc_rule *bwc_rule,
+ u32 *match_param,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[],
+ struct mlx5hws_rule_attr *rule_attr)
+{
+ return mlx5hws_rule_create(bwc_rule->bwc_matcher->matcher,
+ 0, /* only one match template supported */
+ match_param,
+ at_idx,
+ rule_actions,
+ rule_attr,
+ bwc_rule->rule);
+}
+
+static int
+hws_bwc_rule_create_sync(struct mlx5hws_bwc_rule *bwc_rule,
+ u32 *match_param,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[],
+ struct mlx5hws_rule_attr *rule_attr)
+
+{
+ struct mlx5hws_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx;
+ u32 expected_completions = 1;
+ int ret;
+
+ ret = hws_bwc_rule_create_async(bwc_rule, match_param,
+ at_idx, rule_actions,
+ rule_attr);
+ if (unlikely(ret))
+ return ret;
+
+ ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true);
+
+ return ret;
+}
+
+static int
+hws_bwc_rule_update_sync(struct mlx5hws_bwc_rule *bwc_rule,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[],
+ struct mlx5hws_rule_attr *rule_attr)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ u32 expected_completions = 1;
+ int ret;
+
+ ret = mlx5hws_rule_action_update(bwc_rule->rule,
+ at_idx,
+ rule_actions,
+ rule_attr);
+ if (unlikely(ret))
+ return ret;
+
+ ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true);
+ if (unlikely(ret))
+ mlx5hws_err(ctx, "Failed updating BWC rule (%d)\n", ret);
+
+ return ret;
+}
+
+static bool
+hws_bwc_matcher_size_maxed_out(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_cmd_query_caps *caps = bwc_matcher->matcher->tbl->ctx->caps;
+
+ return bwc_matcher->size_log + MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH >=
+ caps->ste_alloc_log_max - 1;
+}
+
+static bool
+hws_bwc_matcher_rehash_size_needed(struct mlx5hws_bwc_matcher *bwc_matcher,
+ u32 num_of_rules)
+{
+ if (unlikely(hws_bwc_matcher_size_maxed_out(bwc_matcher)))
+ return false;
+
+ if (unlikely((num_of_rules * 100 / MLX5HWS_BWC_MATCHER_REHASH_PERCENT_TH) >=
+ (1UL << bwc_matcher->size_log)))
+ return true;
+
+ return false;
+}
+
+static void
+hws_bwc_rule_actions_to_action_types(struct mlx5hws_rule_action rule_actions[],
+ enum mlx5hws_action_type action_types[])
+{
+ int i = 0;
+
+ for (i = 0;
+ rule_actions[i].action && (rule_actions[i].action->type != MLX5HWS_ACTION_TYP_LAST);
+ i++) {
+ action_types[i] = (enum mlx5hws_action_type)rule_actions[i].action->type;
+ }
+
+ action_types[i] = MLX5HWS_ACTION_TYP_LAST;
+}
+
+static int
+hws_bwc_matcher_extend_at(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_rule_action rule_actions[])
+{
+ enum mlx5hws_action_type action_types[MLX5HWS_BWC_MAX_ACTS];
+
+ hws_bwc_rule_actions_to_action_types(rule_actions, action_types);
+
+ bwc_matcher->at[bwc_matcher->num_of_at] =
+ mlx5hws_action_template_create(action_types);
+
+ if (unlikely(!bwc_matcher->at[bwc_matcher->num_of_at]))
+ return -ENOMEM;
+
+ bwc_matcher->num_of_at++;
+ return 0;
+}
+
+static int
+hws_bwc_matcher_extend_size(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_cmd_query_caps *caps = ctx->caps;
+
+ if (unlikely(hws_bwc_matcher_size_maxed_out(bwc_matcher))) {
+ mlx5hws_err(ctx, "Can't resize matcher: depth exceeds limit %d\n",
+ caps->rtc_log_depth_max);
+ return -ENOMEM;
+ }
+
+ bwc_matcher->size_log =
+ min(bwc_matcher->size_log + MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP,
+ caps->ste_alloc_log_max - MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH);
+
+ return 0;
+}
+
+static int
+hws_bwc_matcher_find_at(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_rule_action rule_actions[])
+{
+ enum mlx5hws_action_type *action_type_arr;
+ int i, j;
+
+ /* start from index 1 - first action template is a dummy */
+ for (i = 1; i < bwc_matcher->num_of_at; i++) {
+ j = 0;
+ action_type_arr = bwc_matcher->at[i]->action_type_arr;
+
+ while (rule_actions[j].action &&
+ rule_actions[j].action->type != MLX5HWS_ACTION_TYP_LAST) {
+ if (action_type_arr[j] != rule_actions[j].action->type)
+ break;
+ j++;
+ }
+
+ if (action_type_arr[j] == MLX5HWS_ACTION_TYP_LAST &&
+ (!rule_actions[j].action ||
+ rule_actions[j].action->type == MLX5HWS_ACTION_TYP_LAST))
+ return i;
+ }
+
+ return -1;
+}
+
+static int hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+ struct mlx5hws_bwc_rule **bwc_rules;
+ struct mlx5hws_rule_attr rule_attr;
+ u32 *pending_rules;
+ int i, j, ret = 0;
+ bool all_done;
+ u16 burst_th;
+
+ mlx5hws_bwc_rule_fill_attr(bwc_matcher, 0, 0, &rule_attr);
+
+ pending_rules = kcalloc(bwc_queues, sizeof(*pending_rules), GFP_KERNEL);
+ if (!pending_rules)
+ return -ENOMEM;
+
+ bwc_rules = kcalloc(bwc_queues, sizeof(*bwc_rules), GFP_KERNEL);
+ if (!bwc_rules) {
+ ret = -ENOMEM;
+ goto free_pending_rules;
+ }
+
+ for (i = 0; i < bwc_queues; i++) {
+ if (list_empty(&bwc_matcher->rules[i]))
+ bwc_rules[i] = NULL;
+ else
+ bwc_rules[i] = list_first_entry(&bwc_matcher->rules[i],
+ struct mlx5hws_bwc_rule,
+ list_node);
+ }
+
+ do {
+ all_done = true;
+
+ for (i = 0; i < bwc_queues; i++) {
+ rule_attr.queue_id = mlx5hws_bwc_get_queue_id(ctx, i);
+ burst_th = hws_bwc_get_burst_th(ctx, rule_attr.queue_id);
+
+ for (j = 0; j < burst_th && bwc_rules[i]; j++) {
+ rule_attr.burst = !!((j + 1) % burst_th);
+ ret = mlx5hws_matcher_resize_rule_move(bwc_matcher->matcher,
+ bwc_rules[i]->rule,
+ &rule_attr);
+ if (unlikely(ret)) {
+ mlx5hws_err(ctx,
+ "Moving BWC rule failed during rehash (%d)\n",
+ ret);
+ goto free_bwc_rules;
+ }
+
+ all_done = false;
+ pending_rules[i]++;
+ bwc_rules[i] = list_is_last(&bwc_rules[i]->list_node,
+ &bwc_matcher->rules[i]) ?
+ NULL : list_next_entry(bwc_rules[i], list_node);
+
+ ret = hws_bwc_queue_poll(ctx, rule_attr.queue_id,
+ &pending_rules[i], false);
+ if (unlikely(ret))
+ goto free_bwc_rules;
+ }
+ }
+ } while (!all_done);
+
+ /* drain all the bwc queues */
+ for (i = 0; i < bwc_queues; i++) {
+ if (pending_rules[i]) {
+ u16 queue_id = mlx5hws_bwc_get_queue_id(ctx, i);
+
+ mlx5hws_send_engine_flush_queue(&ctx->send_queue[queue_id]);
+ ret = hws_bwc_queue_poll(ctx, queue_id,
+ &pending_rules[i], true);
+ if (unlikely(ret))
+ goto free_bwc_rules;
+ }
+ }
+
+free_bwc_rules:
+ kfree(bwc_rules);
+free_pending_rules:
+ kfree(pending_rules);
+
+ return ret;
+}
+
+static int hws_bwc_matcher_move_all(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ return hws_bwc_matcher_move_all_simple(bwc_matcher);
+}
+
+static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_matcher_attr matcher_attr = {0};
+ struct mlx5hws_matcher *old_matcher;
+ struct mlx5hws_matcher *new_matcher;
+ int ret;
+
+ hws_bwc_matcher_init_attr(&matcher_attr,
+ bwc_matcher->priority,
+ bwc_matcher->size_log);
+
+ old_matcher = bwc_matcher->matcher;
+ new_matcher = mlx5hws_matcher_create(old_matcher->tbl,
+ &bwc_matcher->mt, 1,
+ bwc_matcher->at,
+ bwc_matcher->num_of_at,
+ &matcher_attr);
+ if (!new_matcher) {
+ mlx5hws_err(ctx, "Rehash error: matcher creation failed\n");
+ return -ENOMEM;
+ }
+
+ ret = mlx5hws_matcher_resize_set_target(old_matcher, new_matcher);
+ if (ret) {
+ mlx5hws_err(ctx, "Rehash error: failed setting resize target\n");
+ return ret;
+ }
+
+ ret = hws_bwc_matcher_move_all(bwc_matcher);
+ if (ret) {
+ mlx5hws_err(ctx, "Rehash error: moving rules failed\n");
+ return -ENOMEM;
+ }
+
+ bwc_matcher->matcher = new_matcher;
+ mlx5hws_matcher_destroy(old_matcher);
+
+ return 0;
+}
+
+static int
+hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ u32 num_of_rules;
+ int ret;
+
+ /* If the current matcher size is already at its max size, we can't
+ * do the rehash. Skip it and try adding the rule again - perhaps
+ * there was some change.
+ */
+ if (hws_bwc_matcher_size_maxed_out(bwc_matcher))
+ return 0;
+
+ /* It is possible that other rule has already performed rehash.
+ * Need to check again if we really need rehash.
+ * If the reason for rehash was size, but not any more - skip rehash.
+ */
+ num_of_rules = __atomic_load_n(&bwc_matcher->num_of_rules, __ATOMIC_RELAXED);
+ if (!hws_bwc_matcher_rehash_size_needed(bwc_matcher, num_of_rules))
+ return 0;
+
+ /* Now we're done all the checking - do the rehash:
+ * - extend match RTC size
+ * - create new matcher
+ * - move all the rules to the new matcher
+ * - destroy the old matcher
+ */
+
+ ret = hws_bwc_matcher_extend_size(bwc_matcher);
+ if (ret)
+ return ret;
+
+ return hws_bwc_matcher_move(bwc_matcher);
+}
+
+static int
+hws_bwc_matcher_rehash_at(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ /* Rehash by action template doesn't require any additional checking.
+ * The bwc_matcher already contains the new action template.
+ * Just do the usual rehash:
+ * - create new matcher
+ * - move all the rules to the new matcher
+ * - destroy the old matcher
+ */
+ return hws_bwc_matcher_move(bwc_matcher);
+}
+
+int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
+ u32 *match_param,
+ struct mlx5hws_rule_action rule_actions[],
+ u32 flow_source,
+ u16 bwc_queue_idx)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_rule_attr rule_attr;
+ struct mutex *queue_lock; /* Protect the queue */
+ u32 num_of_rules;
+ int ret = 0;
+ int at_idx;
+
+ mlx5hws_bwc_rule_fill_attr(bwc_matcher, bwc_queue_idx, flow_source, &rule_attr);
+
+ queue_lock = hws_bwc_get_queue_lock(ctx, bwc_queue_idx);
+
+ mutex_lock(queue_lock);
+
+ /* check if rehash needed due to missing action template */
+ at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
+ if (unlikely(at_idx < 0)) {
+ /* we need to extend BWC matcher action templates array */
+ mutex_unlock(queue_lock);
+ hws_bwc_lock_all_queues(ctx);
+
+ ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions);
+ if (unlikely(ret)) {
+ hws_bwc_unlock_all_queues(ctx);
+ return ret;
+ }
+
+ /* action templates array was extended, we need the last idx */
+ at_idx = bwc_matcher->num_of_at - 1;
+
+ ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher,
+ bwc_matcher->at[at_idx]);
+ if (unlikely(ret)) {
+ /* Action template attach failed, possibly due to
+ * requiring more action STEs.
+ * Need to attempt creating new matcher with all
+ * the action templates, including the new one.
+ */
+ ret = hws_bwc_matcher_rehash_at(bwc_matcher);
+ if (unlikely(ret)) {
+ mlx5hws_action_template_destroy(bwc_matcher->at[at_idx]);
+ bwc_matcher->at[at_idx] = NULL;
+ bwc_matcher->num_of_at--;
+
+ hws_bwc_unlock_all_queues(ctx);
+
+ mlx5hws_err(ctx,
+ "BWC rule insertion: rehash AT failed (%d)\n", ret);
+ return ret;
+ }
+ }
+
+ hws_bwc_unlock_all_queues(ctx);
+ mutex_lock(queue_lock);
+ }
+
+ /* check if number of rules require rehash */
+ num_of_rules = bwc_matcher->num_of_rules;
+
+ if (unlikely(hws_bwc_matcher_rehash_size_needed(bwc_matcher, num_of_rules))) {
+ mutex_unlock(queue_lock);
+
+ hws_bwc_lock_all_queues(ctx);
+ ret = hws_bwc_matcher_rehash_size(bwc_matcher);
+ hws_bwc_unlock_all_queues(ctx);
+
+ if (ret) {
+ mlx5hws_err(ctx, "BWC rule insertion: rehash size [%d -> %d] failed (%d)\n",
+ bwc_matcher->size_log - MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP,
+ bwc_matcher->size_log,
+ ret);
+ return ret;
+ }
+
+ mutex_lock(queue_lock);
+ }
+
+ ret = hws_bwc_rule_create_sync(bwc_rule,
+ match_param,
+ at_idx,
+ rule_actions,
+ &rule_attr);
+ if (likely(!ret)) {
+ hws_bwc_rule_list_add(bwc_rule, bwc_queue_idx);
+ mutex_unlock(queue_lock);
+ return 0; /* rule inserted successfully */
+ }
+
+ /* At this point the rule wasn't added.
+ * It could be because there was collision, or some other problem.
+ * If we don't dive deeper than API, the only thing we know is that
+ * the status of completion is RTE_FLOW_OP_ERROR.
+ * Try rehash by size and insert rule again - last chance.
+ */
+
+ mutex_unlock(queue_lock);
+
+ hws_bwc_lock_all_queues(ctx);
+ ret = hws_bwc_matcher_rehash_size(bwc_matcher);
+ hws_bwc_unlock_all_queues(ctx);
+
+ if (ret) {
+ mlx5hws_err(ctx, "BWC rule insertion: rehash failed (%d)\n", ret);
+ return ret;
+ }
+
+ /* Rehash done, but we still have that pesky rule to add */
+ mutex_lock(queue_lock);
+
+ ret = hws_bwc_rule_create_sync(bwc_rule,
+ match_param,
+ at_idx,
+ rule_actions,
+ &rule_attr);
+
+ if (unlikely(ret)) {
+ mutex_unlock(queue_lock);
+ mlx5hws_err(ctx, "BWC rule insertion failed (%d)\n", ret);
+ return ret;
+ }
+
+ hws_bwc_rule_list_add(bwc_rule, bwc_queue_idx);
+ mutex_unlock(queue_lock);
+
+ return 0;
+}
+
+struct mlx5hws_bwc_rule *
+mlx5hws_bwc_rule_create(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_match_parameters *params,
+ u32 flow_source,
+ struct mlx5hws_rule_action rule_actions[])
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_bwc_rule *bwc_rule;
+ u16 bwc_queue_idx;
+ int ret;
+
+ if (unlikely(!mlx5hws_context_bwc_supported(ctx))) {
+ mlx5hws_err(ctx, "BWC rule: Context created w/o BWC API compatibility\n");
+ return NULL;
+ }
+
+ bwc_rule = mlx5hws_bwc_rule_alloc(bwc_matcher);
+ if (unlikely(!bwc_rule))
+ return NULL;
+
+ bwc_queue_idx = hws_bwc_gen_queue_idx(ctx);
+
+ ret = mlx5hws_bwc_rule_create_simple(bwc_rule,
+ params->match_buf,
+ rule_actions,
+ flow_source,
+ bwc_queue_idx);
+ if (unlikely(ret)) {
+ mlx5hws_bwc_rule_free(bwc_rule);
+ return NULL;
+ }
+
+ return bwc_rule;
+}
+
+static int
+hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_rule_action rule_actions[])
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_rule_attr rule_attr;
+ struct mutex *queue_lock; /* Protect the queue */
+ int at_idx, ret;
+ u16 idx;
+
+ idx = bwc_rule->bwc_queue_idx;
+
+ mlx5hws_bwc_rule_fill_attr(bwc_matcher, idx, 0, &rule_attr);
+ queue_lock = hws_bwc_get_queue_lock(ctx, idx);
+
+ mutex_lock(queue_lock);
+
+ /* check if rehash needed due to missing action template */
+ at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
+ if (unlikely(at_idx < 0)) {
+ /* we need to extend BWC matcher action templates array */
+ mutex_unlock(queue_lock);
+ hws_bwc_lock_all_queues(ctx);
+
+ /* check again - perhaps other thread already did extend_at */
+ at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
+ if (likely(at_idx < 0)) {
+ ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions);
+ if (unlikely(ret)) {
+ hws_bwc_unlock_all_queues(ctx);
+ mlx5hws_err(ctx, "BWC rule update: failed extending AT (%d)", ret);
+ return -EINVAL;
+ }
+
+ /* action templates array was extended, we need the last idx */
+ at_idx = bwc_matcher->num_of_at - 1;
+
+ ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher,
+ bwc_matcher->at[at_idx]);
+ if (unlikely(ret)) {
+ /* Action template attach failed, possibly due to
+ * requiring more action STEs.
+ * Need to attempt creating new matcher with all
+ * the action templates, including the new one.
+ */
+ ret = hws_bwc_matcher_rehash_at(bwc_matcher);
+ if (unlikely(ret)) {
+ mlx5hws_action_template_destroy(bwc_matcher->at[at_idx]);
+ bwc_matcher->at[at_idx] = NULL;
+ bwc_matcher->num_of_at--;
+
+ hws_bwc_unlock_all_queues(ctx);
+
+ mlx5hws_err(ctx,
+ "BWC rule update: rehash AT failed (%d)\n",
+ ret);
+ return ret;
+ }
+ }
+ }
+
+ hws_bwc_unlock_all_queues(ctx);
+ mutex_lock(queue_lock);
+ }
+
+ ret = hws_bwc_rule_update_sync(bwc_rule,
+ at_idx,
+ rule_actions,
+ &rule_attr);
+ mutex_unlock(queue_lock);
+
+ if (unlikely(ret))
+ mlx5hws_err(ctx, "BWC rule: update failed (%d)\n", ret);
+
+ return ret;
+}
+
+int mlx5hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_rule_action rule_actions[])
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+
+ if (unlikely(!mlx5hws_context_bwc_supported(ctx))) {
+ mlx5hws_err(ctx, "BWC rule: Context created w/o BWC API compatibility\n");
+ return -EINVAL;
+ }
+
+ return hws_bwc_rule_action_update(bwc_rule, rule_actions);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h
new file mode 100644
index 000000000000..4fe8c32d8fbe
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_BWC_H_
+#define MLX5HWS_BWC_H_
+
+#define MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG 1
+#define MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP 1
+#define MLX5HWS_BWC_MATCHER_REHASH_PERCENT_TH 70
+#define MLX5HWS_BWC_MATCHER_REHASH_BURST_TH 32
+#define MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM 255
+
+#define MLX5HWS_BWC_MAX_ACTS 16
+
+struct mlx5hws_bwc_matcher {
+ struct mlx5hws_matcher *matcher;
+ struct mlx5hws_match_template *mt;
+ struct mlx5hws_action_template *at[MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM];
+ u8 num_of_at;
+ u16 priority;
+ u8 size_log;
+ u32 num_of_rules; /* atomically accessed */
+ struct list_head *rules;
+};
+
+struct mlx5hws_bwc_rule {
+ struct mlx5hws_bwc_matcher *bwc_matcher;
+ struct mlx5hws_rule *rule;
+ u16 bwc_queue_idx;
+ struct list_head list_node;
+};
+
+int
+mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_table *table,
+ u32 priority,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask,
+ enum mlx5hws_action_type action_types[]);
+
+int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+struct mlx5hws_bwc_rule *mlx5hws_bwc_rule_alloc(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+void mlx5hws_bwc_rule_free(struct mlx5hws_bwc_rule *bwc_rule);
+
+int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
+ u32 *match_param,
+ struct mlx5hws_rule_action rule_actions[],
+ u32 flow_source,
+ u16 bwc_queue_idx);
+
+int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule);
+
+void mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
+ u16 bwc_queue_idx,
+ u32 flow_source,
+ struct mlx5hws_rule_attr *rule_attr);
+
+static inline u16 mlx5hws_bwc_queues(struct mlx5hws_context *ctx)
+{
+ /* Besides the control queue, half of the queues are
+ * reguler HWS queues, and the other half are BWC queues.
+ */
+ return (ctx->queues - 1) / 2;
+}
+
+static inline u16 mlx5hws_bwc_get_queue_id(struct mlx5hws_context *ctx, u16 idx)
+{
+ return idx + mlx5hws_bwc_queues(ctx);
+}
+
+#endif /* MLX5HWS_BWC_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c
new file mode 100644
index 000000000000..bb563f50ef09
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask)
+{
+ struct mlx5hws_definer match_layout = {0};
+ struct mlx5hws_match_template *mt;
+ bool is_complex = false;
+ int ret;
+
+ if (!match_criteria_enable)
+ return false; /* empty matcher */
+
+ mt = mlx5hws_match_template_create(ctx,
+ mask->match_buf,
+ mask->match_sz,
+ match_criteria_enable);
+ if (!mt) {
+ mlx5hws_err(ctx, "BWC: failed creating match template\n");
+ return false;
+ }
+
+ ret = mlx5hws_definer_calc_layout(ctx, mt, &match_layout);
+ if (ret) {
+ /* The only case that we're interested in is E2BIG,
+ * which means that the match parameters need to be
+ * split into complex martcher.
+ * For all other cases (good or bad) - just return true
+ * and let the usual match creation path handle it,
+ * both for good and bad flows.
+ */
+ if (ret == E2BIG) {
+ is_complex = true;
+ mlx5hws_dbg(ctx, "Matcher definer layout: need complex matcher\n");
+ } else {
+ mlx5hws_err(ctx, "Failed to calculate matcher definer layout\n");
+ }
+ }
+
+ mlx5hws_match_template_destroy(mt);
+
+ return is_complex;
+}
+
+int mlx5hws_bwc_matcher_create_complex(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_table *table,
+ u32 priority,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask)
+{
+ mlx5hws_err(table->ctx, "Complex matcher is not supported yet\n");
+ return -EOPNOTSUPP;
+}
+
+void
+mlx5hws_bwc_matcher_destroy_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ /* nothing to do here */
+}
+
+int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_match_parameters *params,
+ u32 flow_source,
+ struct mlx5hws_rule_action rule_actions[],
+ u16 bwc_queue_idx)
+{
+ mlx5hws_err(bwc_rule->bwc_matcher->matcher->tbl->ctx,
+ "Complex rule is not supported yet\n");
+ return -EOPNOTSUPP;
+}
+
+int mlx5hws_bwc_rule_destroy_complex(struct mlx5hws_bwc_rule *bwc_rule)
+{
+ return 0;
+}
+
+int mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ mlx5hws_err(bwc_matcher->matcher->tbl->ctx,
+ "Moving complex rule is not supported yet\n");
+ return -EOPNOTSUPP;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h
new file mode 100644
index 000000000000..068ee8118609
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_BWC_COMPLEX_H_
+#define MLX5HWS_BWC_COMPLEX_H_
+
+bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask);
+
+int mlx5hws_bwc_matcher_create_complex(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_table *table,
+ u32 priority,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask);
+
+void mlx5hws_bwc_matcher_destroy_complex(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+int mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_match_parameters *params,
+ u32 flow_source,
+ struct mlx5hws_rule_action rule_actions[],
+ u16 bwc_queue_idx);
+
+int mlx5hws_bwc_rule_destroy_complex(struct mlx5hws_bwc_rule *bwc_rule);
+
+#endif /* MLX5HWS_BWC_COMPLEX_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.c
new file mode 100644
index 000000000000..2c7b14172049
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.c
@@ -0,0 +1,1300 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+static enum mlx5_ifc_flow_destination_type
+hws_cmd_dest_type_to_ifc_dest_type(enum mlx5_flow_destination_type type)
+{
+ switch (type) {
+ case MLX5_FLOW_DESTINATION_TYPE_VPORT:
+ return MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
+ return MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ case MLX5_FLOW_DESTINATION_TYPE_TIR:
+ return MLX5_IFC_FLOW_DESTINATION_TYPE_TIR;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
+ return MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
+ case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
+ return MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK;
+ case MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE:
+ return MLX5_IFC_FLOW_DESTINATION_TYPE_TABLE_TYPE;
+ case MLX5_FLOW_DESTINATION_TYPE_NONE:
+ case MLX5_FLOW_DESTINATION_TYPE_PORT:
+ case MLX5_FLOW_DESTINATION_TYPE_COUNTER:
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
+ case MLX5_FLOW_DESTINATION_TYPE_RANGE:
+ default:
+ pr_warn("HWS: unknown flow dest type %d\n", type);
+ return 0;
+ }
+};
+
+static int hws_cmd_general_obj_destroy(struct mlx5_core_dev *mdev,
+ u32 object_type,
+ u32 object_id)
+{
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, object_type);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, object_id);
+
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5hws_cmd_flow_table_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ft_create_attr *ft_attr,
+ u32 *table_id)
+{
+ u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
+ void *ft_ctx;
+ int ret;
+
+ MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
+ MLX5_SET(create_flow_table_in, in, table_type, ft_attr->type);
+
+ ft_ctx = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
+ MLX5_SET(flow_table_context, ft_ctx, level, ft_attr->level);
+ MLX5_SET(flow_table_context, ft_ctx, rtc_valid, ft_attr->rtc_valid);
+ MLX5_SET(flow_table_context, ft_ctx, reformat_en, ft_attr->reformat_en);
+ MLX5_SET(flow_table_context, ft_ctx, decap_en, ft_attr->decap_en);
+
+ ret = mlx5_cmd_exec_inout(mdev, create_flow_table, in, out);
+ if (ret)
+ return ret;
+
+ *table_id = MLX5_GET(create_flow_table_out, out, table_id);
+
+ return 0;
+}
+
+int mlx5hws_cmd_flow_table_modify(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ft_modify_attr *ft_attr,
+ u32 table_id)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0};
+ void *ft_ctx;
+
+ MLX5_SET(modify_flow_table_in, in, opcode, MLX5_CMD_OP_MODIFY_FLOW_TABLE);
+ MLX5_SET(modify_flow_table_in, in, table_type, ft_attr->type);
+ MLX5_SET(modify_flow_table_in, in, modify_field_select, ft_attr->modify_fs);
+ MLX5_SET(modify_flow_table_in, in, table_id, table_id);
+
+ ft_ctx = MLX5_ADDR_OF(modify_flow_table_in, in, flow_table_context);
+
+ MLX5_SET(flow_table_context, ft_ctx, table_miss_action, ft_attr->table_miss_action);
+ MLX5_SET(flow_table_context, ft_ctx, table_miss_id, ft_attr->table_miss_id);
+ MLX5_SET(flow_table_context, ft_ctx, hws.rtc_id_0, ft_attr->rtc_id_0);
+ MLX5_SET(flow_table_context, ft_ctx, hws.rtc_id_1, ft_attr->rtc_id_1);
+
+ return mlx5_cmd_exec_in(mdev, modify_flow_table, in);
+}
+
+int mlx5hws_cmd_flow_table_query(struct mlx5_core_dev *mdev,
+ u32 table_id,
+ struct mlx5hws_cmd_ft_query_attr *ft_attr,
+ u64 *icm_addr_0, u64 *icm_addr_1)
+{
+ u32 out[MLX5_ST_SZ_DW(query_flow_table_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_flow_table_in)] = {0};
+ void *ft_ctx;
+ int ret;
+
+ MLX5_SET(query_flow_table_in, in, opcode, MLX5_CMD_OP_QUERY_FLOW_TABLE);
+ MLX5_SET(query_flow_table_in, in, table_type, ft_attr->type);
+ MLX5_SET(query_flow_table_in, in, table_id, table_id);
+
+ ret = mlx5_cmd_exec_inout(mdev, query_flow_table, in, out);
+ if (ret)
+ return ret;
+
+ ft_ctx = MLX5_ADDR_OF(query_flow_table_out, out, flow_table_context);
+ *icm_addr_0 = MLX5_GET64(flow_table_context, ft_ctx, sws.sw_owner_icm_root_0);
+ *icm_addr_1 = MLX5_GET64(flow_table_context, ft_ctx, sws.sw_owner_icm_root_1);
+
+ return ret;
+}
+
+int mlx5hws_cmd_flow_table_destroy(struct mlx5_core_dev *mdev,
+ u8 fw_ft_type, u32 table_id)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0};
+
+ MLX5_SET(destroy_flow_table_in, in, opcode, MLX5_CMD_OP_DESTROY_FLOW_TABLE);
+ MLX5_SET(destroy_flow_table_in, in, table_type, fw_ft_type);
+ MLX5_SET(destroy_flow_table_in, in, table_id, table_id);
+
+ return mlx5_cmd_exec_in(mdev, destroy_flow_table, in);
+}
+
+void mlx5hws_cmd_alias_flow_table_destroy(struct mlx5_core_dev *mdev,
+ u32 table_id)
+{
+ hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_FT_ALIAS, table_id);
+}
+
+static int hws_cmd_flow_group_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_fg_attr *fg_attr,
+ u32 *group_id)
+{
+ u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ u32 *in;
+ int ret;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
+ MLX5_SET(create_flow_group_in, in, table_type, fg_attr->table_type);
+ MLX5_SET(create_flow_group_in, in, table_id, fg_attr->table_id);
+
+ ret = mlx5_cmd_exec_inout(mdev, create_flow_group, in, out);
+ if (ret)
+ goto out;
+
+ *group_id = MLX5_GET(create_flow_group_out, out, group_id);
+
+out:
+ kvfree(in);
+ return ret;
+}
+
+static int hws_cmd_flow_group_destroy(struct mlx5_core_dev *mdev,
+ u32 ft_id, u32 fg_id, u8 ft_type)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
+
+ MLX5_SET(destroy_flow_group_in, in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP);
+ MLX5_SET(destroy_flow_group_in, in, table_type, ft_type);
+ MLX5_SET(destroy_flow_group_in, in, table_id, ft_id);
+ MLX5_SET(destroy_flow_group_in, in, group_id, fg_id);
+
+ return mlx5_cmd_exec_in(mdev, destroy_flow_group, in);
+}
+
+int mlx5hws_cmd_set_fte(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u32 table_id,
+ u32 group_id,
+ struct mlx5hws_cmd_set_fte_attr *fte_attr)
+{
+ u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
+ void *in_flow_context;
+ u32 dest_entry_sz;
+ u32 total_dest_sz;
+ u32 action_flags;
+ u8 *in_dests;
+ u32 inlen;
+ u32 *in;
+ int ret;
+ u32 i;
+
+ dest_entry_sz = fte_attr->extended_dest ?
+ MLX5_ST_SZ_BYTES(extended_dest_format) :
+ MLX5_ST_SZ_BYTES(dest_format);
+ total_dest_sz = dest_entry_sz * fte_attr->dests_num;
+ inlen = align((MLX5_ST_SZ_BYTES(set_fte_in) + total_dest_sz), DW_SIZE);
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
+ MLX5_SET(set_fte_in, in, table_type, table_type);
+ MLX5_SET(set_fte_in, in, table_id, table_id);
+
+ in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
+ MLX5_SET(flow_context, in_flow_context, group_id, group_id);
+ MLX5_SET(flow_context, in_flow_context, flow_source, fte_attr->flow_source);
+ MLX5_SET(flow_context, in_flow_context, extended_destination, fte_attr->extended_dest);
+ MLX5_SET(set_fte_in, in, ignore_flow_level, fte_attr->ignore_flow_level);
+
+ action_flags = fte_attr->action_flags;
+ MLX5_SET(flow_context, in_flow_context, action, action_flags);
+
+ if (action_flags & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
+ MLX5_SET(flow_context, in_flow_context,
+ packet_reformat_id, fte_attr->packet_reformat_id);
+ }
+
+ if (action_flags & (MLX5_FLOW_CONTEXT_ACTION_DECRYPT | MLX5_FLOW_CONTEXT_ACTION_ENCRYPT)) {
+ MLX5_SET(flow_context, in_flow_context,
+ encrypt_decrypt_type, fte_attr->encrypt_decrypt_type);
+ MLX5_SET(flow_context, in_flow_context,
+ encrypt_decrypt_obj_id, fte_attr->encrypt_decrypt_obj_id);
+ }
+
+ if (action_flags & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ in_dests = (u8 *)MLX5_ADDR_OF(flow_context, in_flow_context, destination);
+
+ for (i = 0; i < fte_attr->dests_num; i++) {
+ struct mlx5hws_cmd_set_fte_dest *dest = &fte_attr->dests[i];
+ enum mlx5_ifc_flow_destination_type ifc_dest_type =
+ hws_cmd_dest_type_to_ifc_dest_type(dest->destination_type);
+
+ switch (dest->destination_type) {
+ case MLX5_FLOW_DESTINATION_TYPE_VPORT:
+ if (dest->ext_flags & MLX5HWS_CMD_EXT_DEST_ESW_OWNER_VHCA_ID) {
+ MLX5_SET(dest_format, in_dests,
+ destination_eswitch_owner_vhca_id_valid, 1);
+ MLX5_SET(dest_format, in_dests,
+ destination_eswitch_owner_vhca_id,
+ dest->esw_owner_vhca_id);
+ }
+ fallthrough;
+ case MLX5_FLOW_DESTINATION_TYPE_TIR:
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
+ MLX5_SET(dest_format, in_dests, destination_type, ifc_dest_type);
+ MLX5_SET(dest_format, in_dests, destination_id,
+ dest->destination_id);
+ if (dest->ext_flags & MLX5HWS_CMD_EXT_DEST_REFORMAT) {
+ MLX5_SET(dest_format, in_dests, packet_reformat, 1);
+ MLX5_SET(extended_dest_format, in_dests, packet_reformat_id,
+ dest->ext_reformat_id);
+ }
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ in_dests = in_dests + dest_entry_sz;
+ }
+ MLX5_SET(flow_context, in_flow_context, destination_list_size, fte_attr->dests_num);
+ }
+
+ ret = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+ if (ret)
+ mlx5_core_err(mdev, "Failed creating FLOW_TABLE_ENTRY\n");
+
+out:
+ kfree(in);
+ return ret;
+}
+
+int mlx5hws_cmd_delete_fte(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u32 table_id)
+{
+ u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
+
+ MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
+ MLX5_SET(delete_fte_in, in, table_type, table_type);
+ MLX5_SET(delete_fte_in, in, table_id, table_id);
+
+ return mlx5_cmd_exec_in(mdev, delete_fte, in);
+}
+
+struct mlx5hws_cmd_forward_tbl *
+mlx5hws_cmd_forward_tbl_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ft_create_attr *ft_attr,
+ struct mlx5hws_cmd_set_fte_attr *fte_attr)
+{
+ struct mlx5hws_cmd_fg_attr fg_attr = {0};
+ struct mlx5hws_cmd_forward_tbl *tbl;
+ int ret;
+
+ tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
+ if (!tbl)
+ return NULL;
+
+ ret = mlx5hws_cmd_flow_table_create(mdev, ft_attr, &tbl->ft_id);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create FT\n");
+ goto free_tbl;
+ }
+
+ fg_attr.table_id = tbl->ft_id;
+ fg_attr.table_type = ft_attr->type;
+
+ ret = hws_cmd_flow_group_create(mdev, &fg_attr, &tbl->fg_id);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create FG\n");
+ goto free_ft;
+ }
+
+ ret = mlx5hws_cmd_set_fte(mdev, ft_attr->type,
+ tbl->ft_id, tbl->fg_id, fte_attr);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create FTE\n");
+ goto free_fg;
+ }
+
+ tbl->type = ft_attr->type;
+ return tbl;
+
+free_fg:
+ hws_cmd_flow_group_destroy(mdev, tbl->ft_id, tbl->fg_id, ft_attr->type);
+free_ft:
+ mlx5hws_cmd_flow_table_destroy(mdev, ft_attr->type, tbl->ft_id);
+free_tbl:
+ kfree(tbl);
+ return NULL;
+}
+
+void mlx5hws_cmd_forward_tbl_destroy(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_forward_tbl *tbl)
+{
+ mlx5hws_cmd_delete_fte(mdev, tbl->type, tbl->ft_id);
+ hws_cmd_flow_group_destroy(mdev, tbl->ft_id, tbl->fg_id, tbl->type);
+ mlx5hws_cmd_flow_table_destroy(mdev, tbl->type, tbl->ft_id);
+ kfree(tbl);
+}
+
+void mlx5hws_cmd_set_attr_connect_miss_tbl(struct mlx5hws_context *ctx,
+ u32 fw_ft_type,
+ enum mlx5hws_table_type type,
+ struct mlx5hws_cmd_ft_modify_attr *ft_attr)
+{
+ u32 default_miss_tbl;
+
+ if (type != MLX5HWS_TABLE_TYPE_FDB)
+ return;
+
+ ft_attr->modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION;
+ ft_attr->type = fw_ft_type;
+ ft_attr->table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL;
+
+ default_miss_tbl = ctx->common_res[type].default_miss->ft_id;
+ if (!default_miss_tbl) {
+ pr_warn("HWS: no flow table ID for default miss\n");
+ return;
+ }
+
+ ft_attr->table_miss_id = default_miss_tbl;
+}
+
+int mlx5hws_cmd_rtc_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_rtc_create_attr *rtc_attr,
+ u32 *rtc_id)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_rtc_in)] = {0};
+ void *attr;
+ int ret;
+
+ attr = MLX5_ADDR_OF(create_rtc_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, obj_type, MLX5_OBJ_TYPE_RTC);
+
+ attr = MLX5_ADDR_OF(create_rtc_in, in, rtc);
+ MLX5_SET(rtc, attr, ste_format_0, rtc_attr->is_frst_jumbo ?
+ MLX5_IFC_RTC_STE_FORMAT_11DW :
+ MLX5_IFC_RTC_STE_FORMAT_8DW);
+
+ if (rtc_attr->is_scnd_range) {
+ MLX5_SET(rtc, attr, ste_format_1, MLX5_IFC_RTC_STE_FORMAT_RANGE);
+ MLX5_SET(rtc, attr, num_match_ste, 2);
+ }
+
+ MLX5_SET(rtc, attr, pd, rtc_attr->pd);
+ MLX5_SET(rtc, attr, update_method, rtc_attr->fw_gen_wqe);
+ MLX5_SET(rtc, attr, update_index_mode, rtc_attr->update_index_mode);
+ MLX5_SET(rtc, attr, access_index_mode, rtc_attr->access_index_mode);
+ MLX5_SET(rtc, attr, num_hash_definer, rtc_attr->num_hash_definer);
+ MLX5_SET(rtc, attr, log_depth, rtc_attr->log_depth);
+ MLX5_SET(rtc, attr, log_hash_size, rtc_attr->log_size);
+ MLX5_SET(rtc, attr, table_type, rtc_attr->table_type);
+ MLX5_SET(rtc, attr, num_hash_definer, rtc_attr->num_hash_definer);
+ MLX5_SET(rtc, attr, match_definer_0, rtc_attr->match_definer_0);
+ MLX5_SET(rtc, attr, match_definer_1, rtc_attr->match_definer_1);
+ MLX5_SET(rtc, attr, stc_id, rtc_attr->stc_base);
+ MLX5_SET(rtc, attr, ste_table_base_id, rtc_attr->ste_base);
+ MLX5_SET(rtc, attr, ste_table_offset, rtc_attr->ste_offset);
+ MLX5_SET(rtc, attr, miss_flow_table_id, rtc_attr->miss_ft_id);
+ MLX5_SET(rtc, attr, reparse_mode, rtc_attr->reparse_mode);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create RTC\n");
+ goto out;
+ }
+
+ *rtc_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ return ret;
+}
+
+void mlx5hws_cmd_rtc_destroy(struct mlx5_core_dev *mdev, u32 rtc_id)
+{
+ hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_RTC, rtc_id);
+}
+
+int mlx5hws_cmd_stc_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_stc_create_attr *stc_attr,
+ u32 *stc_id)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_stc_in)] = {0};
+ void *attr;
+ int ret;
+
+ attr = MLX5_ADDR_OF(create_stc_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, obj_type, MLX5_OBJ_TYPE_STC);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, op_param.create.log_obj_range, stc_attr->log_obj_range);
+
+ attr = MLX5_ADDR_OF(create_stc_in, in, stc);
+ MLX5_SET(stc, attr, table_type, stc_attr->table_type);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create STC\n");
+ goto out;
+ }
+
+ *stc_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ return ret;
+}
+
+void mlx5hws_cmd_stc_destroy(struct mlx5_core_dev *mdev, u32 stc_id)
+{
+ hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_STC, stc_id);
+}
+
+static int
+hws_cmd_stc_modify_set_stc_param(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_stc_modify_attr *stc_attr,
+ void *stc_param)
+{
+ switch (stc_attr->action_type) {
+ case MLX5_IFC_STC_ACTION_TYPE_COUNTER:
+ MLX5_SET(stc_ste_param_flow_counter, stc_param, flow_counter_id, stc_attr->id);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_TIR:
+ MLX5_SET(stc_ste_param_tir, stc_param, tirn, stc_attr->dest_tir_num);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT:
+ MLX5_SET(stc_ste_param_table, stc_param, table_id, stc_attr->dest_table_id);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST:
+ MLX5_SET(stc_ste_param_header_modify_list, stc_param,
+ header_modify_pattern_id, stc_attr->modify_header.pattern_id);
+ MLX5_SET(stc_ste_param_header_modify_list, stc_param,
+ header_modify_argument_id, stc_attr->modify_header.arg_id);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE:
+ MLX5_SET(stc_ste_param_remove, stc_param, action_type,
+ MLX5_MODIFICATION_TYPE_REMOVE);
+ MLX5_SET(stc_ste_param_remove, stc_param, decap,
+ stc_attr->remove_header.decap);
+ MLX5_SET(stc_ste_param_remove, stc_param, remove_start_anchor,
+ stc_attr->remove_header.start_anchor);
+ MLX5_SET(stc_ste_param_remove, stc_param, remove_end_anchor,
+ stc_attr->remove_header.end_anchor);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT:
+ MLX5_SET(stc_ste_param_insert, stc_param, action_type,
+ MLX5_MODIFICATION_TYPE_INSERT);
+ MLX5_SET(stc_ste_param_insert, stc_param, encap,
+ stc_attr->insert_header.encap);
+ MLX5_SET(stc_ste_param_insert, stc_param, inline_data,
+ stc_attr->insert_header.is_inline);
+ MLX5_SET(stc_ste_param_insert, stc_param, insert_anchor,
+ stc_attr->insert_header.insert_anchor);
+ /* HW gets the next 2 sizes in words */
+ MLX5_SET(stc_ste_param_insert, stc_param, insert_size,
+ stc_attr->insert_header.header_size / W_SIZE);
+ MLX5_SET(stc_ste_param_insert, stc_param, insert_offset,
+ stc_attr->insert_header.insert_offset / W_SIZE);
+ MLX5_SET(stc_ste_param_insert, stc_param, insert_argument,
+ stc_attr->insert_header.arg_id);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_COPY:
+ case MLX5_IFC_STC_ACTION_TYPE_SET:
+ case MLX5_IFC_STC_ACTION_TYPE_ADD:
+ case MLX5_IFC_STC_ACTION_TYPE_ADD_FIELD:
+ *(__be64 *)stc_param = stc_attr->modify_action.data;
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT:
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK:
+ MLX5_SET(stc_ste_param_vport, stc_param, vport_number,
+ stc_attr->vport.vport_num);
+ MLX5_SET(stc_ste_param_vport, stc_param, eswitch_owner_vhca_id,
+ stc_attr->vport.esw_owner_vhca_id);
+ MLX5_SET(stc_ste_param_vport, stc_param, eswitch_owner_vhca_id_valid,
+ stc_attr->vport.eswitch_owner_vhca_id_valid);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_DROP:
+ case MLX5_IFC_STC_ACTION_TYPE_NOP:
+ case MLX5_IFC_STC_ACTION_TYPE_TAG:
+ case MLX5_IFC_STC_ACTION_TYPE_ALLOW:
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_ASO:
+ MLX5_SET(stc_ste_param_execute_aso, stc_param, aso_object_id,
+ stc_attr->aso.devx_obj_id);
+ MLX5_SET(stc_ste_param_execute_aso, stc_param, return_reg_id,
+ stc_attr->aso.return_reg_id);
+ MLX5_SET(stc_ste_param_execute_aso, stc_param, aso_type,
+ stc_attr->aso.aso_type);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE:
+ MLX5_SET(stc_ste_param_ste_table, stc_param, ste_obj_id,
+ stc_attr->ste_table.ste_obj_id);
+ MLX5_SET(stc_ste_param_ste_table, stc_param, match_definer_id,
+ stc_attr->ste_table.match_definer_id);
+ MLX5_SET(stc_ste_param_ste_table, stc_param, log_hash_size,
+ stc_attr->ste_table.log_hash_size);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS:
+ MLX5_SET(stc_ste_param_remove_words, stc_param, action_type,
+ MLX5_MODIFICATION_TYPE_REMOVE_WORDS);
+ MLX5_SET(stc_ste_param_remove_words, stc_param, remove_start_anchor,
+ stc_attr->remove_words.start_anchor);
+ MLX5_SET(stc_ste_param_remove_words, stc_param,
+ remove_size, stc_attr->remove_words.num_of_words);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_CRYPTO_IPSEC_ENCRYPTION:
+ MLX5_SET(stc_ste_param_ipsec_encrypt, stc_param, ipsec_object_id,
+ stc_attr->id);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_CRYPTO_IPSEC_DECRYPTION:
+ MLX5_SET(stc_ste_param_ipsec_decrypt, stc_param, ipsec_object_id,
+ stc_attr->id);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_TRAILER:
+ MLX5_SET(stc_ste_param_trailer, stc_param, command,
+ stc_attr->reformat_trailer.op);
+ MLX5_SET(stc_ste_param_trailer, stc_param, type,
+ stc_attr->reformat_trailer.type);
+ MLX5_SET(stc_ste_param_trailer, stc_param, length,
+ stc_attr->reformat_trailer.size);
+ break;
+ default:
+ mlx5_core_err(mdev, "Not supported type %d\n", stc_attr->action_type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int mlx5hws_cmd_stc_modify(struct mlx5_core_dev *mdev,
+ u32 stc_id,
+ struct mlx5hws_cmd_stc_modify_attr *stc_attr)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_stc_in)] = {0};
+ void *stc_param;
+ void *attr;
+ int ret;
+
+ attr = MLX5_ADDR_OF(create_stc_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, obj_type, MLX5_OBJ_TYPE_STC);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, stc_id);
+ MLX5_SET(general_obj_in_cmd_hdr, in,
+ op_param.query.obj_offset, stc_attr->stc_offset);
+
+ attr = MLX5_ADDR_OF(create_stc_in, in, stc);
+ MLX5_SET(stc, attr, ste_action_offset, stc_attr->action_offset);
+ MLX5_SET(stc, attr, action_type, stc_attr->action_type);
+ MLX5_SET(stc, attr, reparse_mode, stc_attr->reparse_mode);
+ MLX5_SET64(stc, attr, modify_field_select,
+ MLX5_IFC_MODIFY_STC_FIELD_SELECT_NEW_STC);
+
+ /* Set destination TIRN, TAG, FT ID, STE ID */
+ stc_param = MLX5_ADDR_OF(stc, attr, stc_param);
+ ret = hws_cmd_stc_modify_set_stc_param(mdev, stc_attr, stc_param);
+ if (ret)
+ return ret;
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret)
+ mlx5_core_err(mdev, "Failed to modify STC FW action_type %d\n",
+ stc_attr->action_type);
+
+ return ret;
+}
+
+int mlx5hws_cmd_arg_create(struct mlx5_core_dev *mdev,
+ u16 log_obj_range,
+ u32 pd,
+ u32 *arg_id)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_arg_in)] = {0};
+ void *attr;
+ int ret;
+
+ attr = MLX5_ADDR_OF(create_arg_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, obj_type, MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, op_param.create.log_obj_range, log_obj_range);
+
+ attr = MLX5_ADDR_OF(create_arg_in, in, arg);
+ MLX5_SET(arg, attr, access_pd, pd);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create ARG\n");
+ goto out;
+ }
+
+ *arg_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ return ret;
+}
+
+void mlx5hws_cmd_arg_destroy(struct mlx5_core_dev *mdev,
+ u32 arg_id)
+{
+ hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT, arg_id);
+}
+
+int mlx5hws_cmd_header_modify_pattern_create(struct mlx5_core_dev *mdev,
+ u32 pattern_length,
+ u8 *actions,
+ u32 *ptrn_id)
+{
+ u32 in[MLX5_ST_SZ_DW(create_header_modify_pattern_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ int num_of_actions;
+ u64 *pattern_data;
+ void *pattern;
+ void *attr;
+ int ret;
+ int i;
+
+ if (pattern_length > MLX5_MAX_ACTIONS_DATA_IN_HEADER_MODIFY) {
+ mlx5_core_err(mdev, "Pattern length %d exceeds limit %d\n",
+ pattern_length, MLX5_MAX_ACTIONS_DATA_IN_HEADER_MODIFY);
+ return -EINVAL;
+ }
+
+ attr = MLX5_ADDR_OF(create_header_modify_pattern_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, obj_type, MLX5_OBJ_TYPE_MODIFY_HDR_PATTERN);
+
+ pattern = MLX5_ADDR_OF(create_header_modify_pattern_in, in, pattern);
+ /* Pattern_length is in ddwords */
+ MLX5_SET(header_modify_pattern_in, pattern, pattern_length, pattern_length / (2 * DW_SIZE));
+
+ pattern_data = (u64 *)MLX5_ADDR_OF(header_modify_pattern_in, pattern, pattern_data);
+ memcpy(pattern_data, actions, pattern_length);
+
+ num_of_actions = pattern_length / MLX5HWS_MODIFY_ACTION_SIZE;
+ for (i = 0; i < num_of_actions; i++) {
+ int type;
+
+ type = MLX5_GET(set_action_in, &pattern_data[i], action_type);
+ if (type != MLX5_MODIFICATION_TYPE_COPY &&
+ type != MLX5_MODIFICATION_TYPE_ADD_FIELD)
+ /* Action typ-copy use all bytes for control */
+ MLX5_SET(set_action_in, &pattern_data[i], data, 0);
+ }
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create header_modify_pattern\n");
+ goto out;
+ }
+
+ *ptrn_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ return ret;
+}
+
+void mlx5hws_cmd_header_modify_pattern_destroy(struct mlx5_core_dev *mdev,
+ u32 ptrn_id)
+{
+ hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_MODIFY_HDR_PATTERN, ptrn_id);
+}
+
+int mlx5hws_cmd_ste_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ste_create_attr *ste_attr,
+ u32 *ste_id)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_ste_in)] = {0};
+ void *attr;
+ int ret;
+
+ attr = MLX5_ADDR_OF(create_ste_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, obj_type, MLX5_OBJ_TYPE_STE);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, op_param.create.log_obj_range, ste_attr->log_obj_range);
+
+ attr = MLX5_ADDR_OF(create_ste_in, in, ste);
+ MLX5_SET(ste, attr, table_type, ste_attr->table_type);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create STE\n");
+ goto out;
+ }
+
+ *ste_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ return ret;
+}
+
+void mlx5hws_cmd_ste_destroy(struct mlx5_core_dev *mdev, u32 ste_id)
+{
+ hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_STE, ste_id);
+}
+
+int mlx5hws_cmd_definer_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_definer_create_attr *def_attr,
+ u32 *definer_id)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_definer_in)] = {0};
+ void *ptr;
+ int ret;
+
+ MLX5_SET(general_obj_in_cmd_hdr,
+ in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ in, obj_type, MLX5_OBJ_TYPE_MATCH_DEFINER);
+
+ ptr = MLX5_ADDR_OF(create_definer_in, in, definer);
+ MLX5_SET(definer, ptr, format_id, MLX5_IFC_DEFINER_FORMAT_ID_SELECT);
+
+ MLX5_SET(definer, ptr, format_select_dw0, def_attr->dw_selector[0]);
+ MLX5_SET(definer, ptr, format_select_dw1, def_attr->dw_selector[1]);
+ MLX5_SET(definer, ptr, format_select_dw2, def_attr->dw_selector[2]);
+ MLX5_SET(definer, ptr, format_select_dw3, def_attr->dw_selector[3]);
+ MLX5_SET(definer, ptr, format_select_dw4, def_attr->dw_selector[4]);
+ MLX5_SET(definer, ptr, format_select_dw5, def_attr->dw_selector[5]);
+ MLX5_SET(definer, ptr, format_select_dw6, def_attr->dw_selector[6]);
+ MLX5_SET(definer, ptr, format_select_dw7, def_attr->dw_selector[7]);
+ MLX5_SET(definer, ptr, format_select_dw8, def_attr->dw_selector[8]);
+
+ MLX5_SET(definer, ptr, format_select_byte0, def_attr->byte_selector[0]);
+ MLX5_SET(definer, ptr, format_select_byte1, def_attr->byte_selector[1]);
+ MLX5_SET(definer, ptr, format_select_byte2, def_attr->byte_selector[2]);
+ MLX5_SET(definer, ptr, format_select_byte3, def_attr->byte_selector[3]);
+ MLX5_SET(definer, ptr, format_select_byte4, def_attr->byte_selector[4]);
+ MLX5_SET(definer, ptr, format_select_byte5, def_attr->byte_selector[5]);
+ MLX5_SET(definer, ptr, format_select_byte6, def_attr->byte_selector[6]);
+ MLX5_SET(definer, ptr, format_select_byte7, def_attr->byte_selector[7]);
+
+ ptr = MLX5_ADDR_OF(definer, ptr, match_mask);
+ memcpy(ptr, def_attr->match_mask, MLX5_FLD_SZ_BYTES(definer, match_mask));
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create Definer\n");
+ goto out;
+ }
+
+ *definer_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ return ret;
+}
+
+void mlx5hws_cmd_definer_destroy(struct mlx5_core_dev *mdev,
+ u32 definer_id)
+{
+ hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_MATCH_DEFINER, definer_id);
+}
+
+int mlx5hws_cmd_packet_reformat_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_packet_reformat_create_attr *attr,
+ u32 *reformat_id)
+{
+ u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_out)] = {0};
+ size_t insz, cmd_data_sz, cmd_total_sz;
+ void *prctx;
+ void *pdata;
+ void *in;
+ int ret;
+
+ cmd_total_sz = MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in);
+ cmd_total_sz += MLX5_ST_SZ_BYTES(packet_reformat_context_in);
+ cmd_data_sz = MLX5_FLD_SZ_BYTES(packet_reformat_context_in, reformat_data);
+ insz = align(cmd_total_sz + attr->data_sz - cmd_data_sz, DW_SIZE);
+ in = kzalloc(insz, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
+
+ prctx = MLX5_ADDR_OF(alloc_packet_reformat_context_in, in,
+ packet_reformat_context);
+ pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data);
+
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_type, attr->type);
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_param_0, attr->reformat_param_0);
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, attr->data_sz);
+ memcpy(pdata, attr->data, attr->data_sz);
+
+ ret = mlx5_cmd_exec(mdev, in, insz, out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create packet reformat\n");
+ goto out;
+ }
+
+ *reformat_id = MLX5_GET(alloc_packet_reformat_out, out, packet_reformat_id);
+out:
+ kfree(in);
+ return ret;
+}
+
+int mlx5hws_cmd_packet_reformat_destroy(struct mlx5_core_dev *mdev,
+ u32 reformat_id)
+{
+ u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_in)] = {0};
+ int ret;
+
+ MLX5_SET(dealloc_packet_reformat_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
+ MLX5_SET(dealloc_packet_reformat_in, in,
+ packet_reformat_id, reformat_id);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret)
+ mlx5_core_err(mdev, "Failed to destroy packet_reformat\n");
+
+ return ret;
+}
+
+int mlx5hws_cmd_sq_modify_rdy(struct mlx5_core_dev *mdev, u32 sqn)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_sq_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(modify_sq_in)] = {0};
+ void *sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+ int ret;
+
+ MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
+ MLX5_SET(modify_sq_in, in, sqn, sqn);
+ MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret)
+ mlx5_core_err(mdev, "Failed to modify SQ\n");
+
+ return ret;
+}
+
+int mlx5hws_cmd_allow_other_vhca_access(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_allow_other_vhca_access_attr *attr)
+{
+ u32 out[MLX5_ST_SZ_DW(allow_other_vhca_access_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(allow_other_vhca_access_in)] = {0};
+ void *key;
+ int ret;
+
+ MLX5_SET(allow_other_vhca_access_in,
+ in, opcode, MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS);
+ MLX5_SET(allow_other_vhca_access_in,
+ in, object_type_to_be_accessed, attr->obj_type);
+ MLX5_SET(allow_other_vhca_access_in,
+ in, object_id_to_be_accessed, attr->obj_id);
+
+ key = MLX5_ADDR_OF(allow_other_vhca_access_in, in, access_key);
+ memcpy(key, attr->access_key, sizeof(attr->access_key));
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret)
+ mlx5_core_err(mdev, "Failed to execute ALLOW_OTHER_VHCA_ACCESS command\n");
+
+ return ret;
+}
+
+int mlx5hws_cmd_alias_obj_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_alias_obj_create_attr *alias_attr,
+ u32 *obj_id)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_alias_obj_in)] = {0};
+ void *attr;
+ void *key;
+ int ret;
+
+ attr = MLX5_ADDR_OF(create_alias_obj_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, obj_type, alias_attr->obj_type);
+ MLX5_SET(general_obj_in_cmd_hdr, attr, op_param.create.alias_object, 1);
+
+ attr = MLX5_ADDR_OF(create_alias_obj_in, in, alias_ctx);
+ MLX5_SET(alias_context, attr, vhca_id_to_be_accessed, alias_attr->vhca_id);
+ MLX5_SET(alias_context, attr, object_id_to_be_accessed, alias_attr->obj_id);
+
+ key = MLX5_ADDR_OF(alias_context, attr, access_key);
+ memcpy(key, alias_attr->access_key, sizeof(alias_attr->access_key));
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create ALIAS OBJ\n");
+ goto out;
+ }
+
+ *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ return ret;
+}
+
+int mlx5hws_cmd_alias_obj_destroy(struct mlx5_core_dev *mdev,
+ u16 obj_type,
+ u32 obj_id)
+{
+ return hws_cmd_general_obj_destroy(mdev, obj_type, obj_id);
+}
+
+int mlx5hws_cmd_generate_wqe(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_generate_wqe_attr *attr,
+ struct mlx5_cqe64 *ret_cqe)
+{
+ u32 out[MLX5_ST_SZ_DW(generate_wqe_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(generate_wqe_in)] = {0};
+ u8 status;
+ void *ptr;
+ int ret;
+
+ MLX5_SET(generate_wqe_in, in, opcode, MLX5_CMD_OP_GENERATE_WQE);
+ MLX5_SET(generate_wqe_in, in, pdn, attr->pdn);
+
+ ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_ctrl);
+ memcpy(ptr, attr->wqe_ctrl, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_ctrl));
+
+ ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_ctrl);
+ memcpy(ptr, attr->gta_ctrl, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_ctrl));
+
+ ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_data_0);
+ memcpy(ptr, attr->gta_data_0, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_data_0));
+
+ if (attr->gta_data_1) {
+ ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_data_1);
+ memcpy(ptr, attr->gta_data_1, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_data_1));
+ }
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to write GTA WQE using FW\n");
+ return ret;
+ }
+
+ status = MLX5_GET(generate_wqe_out, out, status);
+ if (status) {
+ mlx5_core_err(mdev, "Invalid FW CQE status %d\n", status);
+ return -EINVAL;
+ }
+
+ ptr = MLX5_ADDR_OF(generate_wqe_out, out, cqe_data);
+ memcpy(ret_cqe, ptr, sizeof(*ret_cqe));
+
+ return ret;
+}
+
+int mlx5hws_cmd_query_caps(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_query_caps *caps)
+{
+ u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0};
+ u32 out_size;
+ u32 *out;
+ int ret;
+
+ out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ out = kzalloc(out_size, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE | HCA_CAP_OPMOD_GET_CUR);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to query device caps\n");
+ goto out;
+ }
+
+ caps->wqe_based_update =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.wqe_based_flow_table_update_cap);
+
+ caps->eswitch_manager = MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.eswitch_manager);
+
+ caps->flex_protocols = MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.flex_parser_protocols);
+
+ if (caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_TLV_OPTION_0_ENABLED)
+ caps->flex_parser_id_geneve_tlv_option_0 =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.flex_parser_id_geneve_tlv_option_0);
+
+ if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED)
+ caps->flex_parser_id_mpls_over_gre =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.flex_parser_id_outer_first_mpls_over_gre);
+
+ if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)
+ caps->flex_parser_id_mpls_over_udp =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.flex_parser_id_outer_first_mpls_over_udp_label);
+
+ caps->log_header_modify_argument_granularity =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.log_header_modify_argument_granularity);
+
+ caps->log_header_modify_argument_granularity -=
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.log_header_modify_argument_granularity_offset);
+
+ caps->log_header_modify_argument_max_alloc =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.log_header_modify_argument_max_alloc);
+
+ caps->definer_format_sup =
+ MLX5_GET64(query_hca_cap_out, out,
+ capability.cmd_hca_cap.match_definer_format_supported);
+
+ caps->vhca_id = MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.vhca_id);
+
+ caps->sq_ts_format = MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.sq_ts_format);
+
+ caps->ipsec_offload = MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.ipsec_offload);
+
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 | HCA_CAP_OPMOD_GET_CUR);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to query device caps 2\n");
+ goto out;
+ }
+
+ caps->full_dw_jumbo_support =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap_2.format_select_dw_8_6_ext);
+
+ caps->format_select_gtpu_dw_0 =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap_2.format_select_dw_gtpu_dw_0);
+
+ caps->format_select_gtpu_dw_1 =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap_2.format_select_dw_gtpu_dw_1);
+
+ caps->format_select_gtpu_dw_2 =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap_2.format_select_dw_gtpu_dw_2);
+
+ caps->format_select_gtpu_ext_dw_0 =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap_2.format_select_dw_gtpu_first_ext_dw_0);
+
+ caps->supp_type_gen_wqe =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap_2.generate_wqe_type);
+
+ caps->flow_table_hash_type =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap_2.flow_table_hash_type);
+
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE | HCA_CAP_OPMOD_GET_CUR);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to query flow table caps\n");
+ goto out;
+ }
+
+ caps->nic_ft.max_level =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.flow_table_nic_cap.flow_table_properties_nic_receive.max_ft_level);
+
+ caps->nic_ft.reparse =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.flow_table_nic_cap.flow_table_properties_nic_receive.reparse);
+
+ caps->nic_ft.ignore_flow_level_rtc_valid =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.flow_table_nic_cap.flow_table_properties_nic_receive.ignore_flow_level_rtc_valid);
+
+ caps->flex_parser_ok_bits_supp =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.flow_table_nic_cap.flow_table_properties_nic_receive.ft_field_support.geneve_tlv_option_0_exist);
+
+ if (caps->wqe_based_update) {
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_GET_HCA_CAP_OP_MOD_WQE_BASED_FLOW_TABLE | HCA_CAP_OPMOD_GET_CUR);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to query WQE based FT caps\n");
+ goto out;
+ }
+
+ caps->rtc_reparse_mode =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.rtc_reparse_mode);
+
+ caps->ste_format =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.ste_format);
+
+ caps->rtc_index_mode =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.rtc_index_mode);
+
+ caps->rtc_log_depth_max =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.rtc_log_depth_max);
+
+ caps->ste_alloc_log_max =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.ste_alloc_log_max);
+
+ caps->ste_alloc_log_gran =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.ste_alloc_log_granularity);
+
+ caps->trivial_match_definer =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.trivial_match_definer);
+
+ caps->stc_alloc_log_max =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.stc_alloc_log_max);
+
+ caps->stc_alloc_log_gran =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.stc_alloc_log_granularity);
+
+ caps->rtc_hash_split_table =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.rtc_hash_split_table);
+
+ caps->rtc_linear_lookup_table =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.rtc_linear_lookup_table);
+
+ caps->access_index_mode =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.access_index_mode);
+
+ caps->linear_match_definer =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.linear_match_definer_reg_c3);
+
+ caps->rtc_max_hash_def_gen_wqe =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.rtc_max_num_hash_definer_gen_wqe);
+
+ caps->supp_ste_format_gen_wqe =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.ste_format_gen_wqe);
+
+ caps->fdb_tir_stc =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.fdb_jump_to_tir_stc);
+ }
+
+ if (caps->eswitch_manager) {
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_GET_HCA_CAP_OP_MOD_ESW_FLOW_TABLE | HCA_CAP_OPMOD_GET_CUR);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to query flow table esw caps\n");
+ goto out;
+ }
+
+ caps->fdb_ft.max_level =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.flow_table_nic_cap.flow_table_properties_nic_receive.max_ft_level);
+
+ caps->fdb_ft.reparse =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.flow_table_nic_cap.flow_table_properties_nic_receive.reparse);
+
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_ESW | HCA_CAP_OPMOD_GET_CUR);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to query eswitch capabilities\n");
+ goto out;
+ }
+
+ if (MLX5_GET(query_hca_cap_out, out,
+ capability.esw_cap.esw_manager_vport_number_valid))
+ caps->eswitch_manager_vport_number =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.esw_cap.esw_manager_vport_number);
+
+ caps->merged_eswitch = MLX5_GET(query_hca_cap_out, out,
+ capability.esw_cap.merged_eswitch);
+ }
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to query device attributes\n");
+ goto out;
+ }
+
+ snprintf(caps->fw_ver, sizeof(caps->fw_ver), "%d.%d.%d",
+ fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev));
+
+ caps->is_ecpf = mlx5_core_is_ecpf_esw_manager(mdev);
+
+out:
+ kfree(out);
+ return ret;
+}
+
+int mlx5hws_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_function,
+ u16 vport_number, u16 *gvmi)
+{
+ bool ec_vf_func = other_function ? mlx5_core_is_ec_vf_vport(mdev, vport_number) : false;
+ u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
+ int out_size;
+ void *out;
+ int err;
+
+ out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ out = kzalloc(out_size, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+ MLX5_SET(query_hca_cap_in, in, other_function, other_function);
+ MLX5_SET(query_hca_cap_in, in, function_id,
+ mlx5_vport_to_func_id(mdev, vport_number, ec_vf_func));
+ MLX5_SET(query_hca_cap_in, in, ec_vf_function, ec_vf_func);
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 | HCA_CAP_OPMOD_GET_CUR);
+
+ err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
+ if (err) {
+ kfree(out);
+ return err;
+ }
+
+ *gvmi = MLX5_GET(query_hca_cap_out, out, capability.cmd_hca_cap.vhca_id);
+
+ kfree(out);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.h
new file mode 100644
index 000000000000..2fbcf4ff571a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.h
@@ -0,0 +1,361 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_CMD_H_
+#define MLX5HWS_CMD_H_
+
+#define WIRE_PORT 0xFFFF
+
+#define ACCESS_KEY_LEN 32
+
+enum mlx5hws_cmd_ext_dest_flags {
+ MLX5HWS_CMD_EXT_DEST_REFORMAT = 1 << 0,
+ MLX5HWS_CMD_EXT_DEST_ESW_OWNER_VHCA_ID = 1 << 1,
+};
+
+struct mlx5hws_cmd_set_fte_dest {
+ u8 destination_type;
+ u32 destination_id;
+ enum mlx5hws_cmd_ext_dest_flags ext_flags;
+ u32 ext_reformat_id;
+ u16 esw_owner_vhca_id;
+};
+
+struct mlx5hws_cmd_set_fte_attr {
+ u32 action_flags;
+ bool ignore_flow_level;
+ u8 flow_source;
+ u8 extended_dest;
+ u8 encrypt_decrypt_type;
+ u32 encrypt_decrypt_obj_id;
+ u32 packet_reformat_id;
+ u32 dests_num;
+ struct mlx5hws_cmd_set_fte_dest *dests;
+};
+
+struct mlx5hws_cmd_ft_create_attr {
+ u8 type;
+ u8 level;
+ bool rtc_valid;
+ bool decap_en;
+ bool reformat_en;
+};
+
+struct mlx5hws_cmd_ft_modify_attr {
+ u8 type;
+ u32 rtc_id_0;
+ u32 rtc_id_1;
+ u32 table_miss_id;
+ u8 table_miss_action;
+ u64 modify_fs;
+};
+
+struct mlx5hws_cmd_ft_query_attr {
+ u8 type;
+};
+
+struct mlx5hws_cmd_fg_attr {
+ u32 table_id;
+ u32 table_type;
+};
+
+struct mlx5hws_cmd_forward_tbl {
+ u8 type;
+ u32 ft_id;
+ u32 fg_id;
+ u32 refcount;
+};
+
+struct mlx5hws_cmd_rtc_create_attr {
+ u32 pd;
+ u32 stc_base;
+ u32 ste_base;
+ u32 ste_offset;
+ u32 miss_ft_id;
+ bool fw_gen_wqe;
+ u8 update_index_mode;
+ u8 access_index_mode;
+ u8 num_hash_definer;
+ u8 log_depth;
+ u8 log_size;
+ u8 table_type;
+ u8 match_definer_0;
+ u8 match_definer_1;
+ u8 reparse_mode;
+ bool is_frst_jumbo;
+ bool is_scnd_range;
+};
+
+struct mlx5hws_cmd_alias_obj_create_attr {
+ u32 obj_id;
+ u16 vhca_id;
+ u16 obj_type;
+ u8 access_key[ACCESS_KEY_LEN];
+};
+
+struct mlx5hws_cmd_stc_create_attr {
+ u8 log_obj_range;
+ u8 table_type;
+};
+
+struct mlx5hws_cmd_stc_modify_attr {
+ u32 stc_offset;
+ u8 action_offset;
+ u8 reparse_mode;
+ enum mlx5_ifc_stc_action_type action_type;
+ union {
+ u32 id; /* TIRN, TAG, FT ID, STE ID, CRYPTO */
+ struct {
+ u8 decap;
+ u16 start_anchor;
+ u16 end_anchor;
+ } remove_header;
+ struct {
+ u32 arg_id;
+ u32 pattern_id;
+ } modify_header;
+ struct {
+ __be64 data;
+ } modify_action;
+ struct {
+ u32 arg_id;
+ u32 header_size;
+ u8 is_inline;
+ u8 encap;
+ u16 insert_anchor;
+ u16 insert_offset;
+ } insert_header;
+ struct {
+ u8 aso_type;
+ u32 devx_obj_id;
+ u8 return_reg_id;
+ } aso;
+ struct {
+ u16 vport_num;
+ u16 esw_owner_vhca_id;
+ u8 eswitch_owner_vhca_id_valid;
+ } vport;
+ struct {
+ struct mlx5hws_pool_chunk ste;
+ struct mlx5hws_pool *ste_pool;
+ u32 ste_obj_id; /* Internal */
+ u32 match_definer_id;
+ u8 log_hash_size;
+ bool ignore_tx;
+ } ste_table;
+ struct {
+ u16 start_anchor;
+ u16 num_of_words;
+ } remove_words;
+ struct {
+ u8 type;
+ u8 op;
+ u8 size;
+ } reformat_trailer;
+
+ u32 dest_table_id;
+ u32 dest_tir_num;
+ };
+};
+
+struct mlx5hws_cmd_ste_create_attr {
+ u8 log_obj_range;
+ u8 table_type;
+};
+
+struct mlx5hws_cmd_definer_create_attr {
+ u8 *dw_selector;
+ u8 *byte_selector;
+ u8 *match_mask;
+};
+
+struct mlx5hws_cmd_allow_other_vhca_access_attr {
+ u16 obj_type;
+ u32 obj_id;
+ u8 access_key[ACCESS_KEY_LEN];
+};
+
+struct mlx5hws_cmd_packet_reformat_create_attr {
+ u8 type;
+ size_t data_sz;
+ void *data;
+ u8 reformat_param_0;
+};
+
+struct mlx5hws_cmd_query_ft_caps {
+ u8 max_level;
+ u8 reparse;
+ u8 ignore_flow_level_rtc_valid;
+};
+
+struct mlx5hws_cmd_generate_wqe_attr {
+ u8 *wqe_ctrl;
+ u8 *gta_ctrl;
+ u8 *gta_data_0;
+ u8 *gta_data_1;
+ u32 pdn;
+};
+
+struct mlx5hws_cmd_query_caps {
+ u32 flex_protocols;
+ u8 wqe_based_update;
+ u8 rtc_reparse_mode;
+ u16 ste_format;
+ u8 rtc_index_mode;
+ u8 ste_alloc_log_max;
+ u8 ste_alloc_log_gran;
+ u8 stc_alloc_log_max;
+ u8 stc_alloc_log_gran;
+ u8 rtc_log_depth_max;
+ u8 format_select_gtpu_dw_0;
+ u8 format_select_gtpu_dw_1;
+ u8 flow_table_hash_type;
+ u8 format_select_gtpu_dw_2;
+ u8 format_select_gtpu_ext_dw_0;
+ u8 access_index_mode;
+ u32 linear_match_definer;
+ bool full_dw_jumbo_support;
+ bool rtc_hash_split_table;
+ bool rtc_linear_lookup_table;
+ u32 supp_type_gen_wqe;
+ u8 rtc_max_hash_def_gen_wqe;
+ u16 supp_ste_format_gen_wqe;
+ struct mlx5hws_cmd_query_ft_caps nic_ft;
+ struct mlx5hws_cmd_query_ft_caps fdb_ft;
+ bool eswitch_manager;
+ bool merged_eswitch;
+ u32 eswitch_manager_vport_number;
+ u8 log_header_modify_argument_granularity;
+ u8 log_header_modify_argument_max_alloc;
+ u8 sq_ts_format;
+ u8 fdb_tir_stc;
+ u64 definer_format_sup;
+ u32 trivial_match_definer;
+ u32 vhca_id;
+ u32 shared_vhca_id;
+ char fw_ver[64];
+ bool ipsec_offload;
+ bool is_ecpf;
+ u8 flex_parser_ok_bits_supp;
+ u8 flex_parser_id_geneve_tlv_option_0;
+ u8 flex_parser_id_mpls_over_gre;
+ u8 flex_parser_id_mpls_over_udp;
+};
+
+int mlx5hws_cmd_flow_table_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ft_create_attr *ft_attr,
+ u32 *table_id);
+
+int mlx5hws_cmd_flow_table_modify(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ft_modify_attr *ft_attr,
+ u32 table_id);
+
+int mlx5hws_cmd_flow_table_query(struct mlx5_core_dev *mdev,
+ u32 obj_id,
+ struct mlx5hws_cmd_ft_query_attr *ft_attr,
+ u64 *icm_addr_0, u64 *icm_addr_1);
+
+int mlx5hws_cmd_flow_table_destroy(struct mlx5_core_dev *mdev,
+ u8 fw_ft_type, u32 table_id);
+
+void mlx5hws_cmd_alias_flow_table_destroy(struct mlx5_core_dev *mdev,
+ u32 table_id);
+
+int mlx5hws_cmd_rtc_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_rtc_create_attr *rtc_attr,
+ u32 *rtc_id);
+
+void mlx5hws_cmd_rtc_destroy(struct mlx5_core_dev *mdev, u32 rtc_id);
+
+int mlx5hws_cmd_stc_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_stc_create_attr *stc_attr,
+ u32 *stc_id);
+
+int mlx5hws_cmd_stc_modify(struct mlx5_core_dev *mdev,
+ u32 stc_id,
+ struct mlx5hws_cmd_stc_modify_attr *stc_attr);
+
+void mlx5hws_cmd_stc_destroy(struct mlx5_core_dev *mdev, u32 stc_id);
+
+int mlx5hws_cmd_generate_wqe(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_generate_wqe_attr *attr,
+ struct mlx5_cqe64 *ret_cqe);
+
+int mlx5hws_cmd_ste_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ste_create_attr *ste_attr,
+ u32 *ste_id);
+
+void mlx5hws_cmd_ste_destroy(struct mlx5_core_dev *mdev, u32 ste_id);
+
+int mlx5hws_cmd_definer_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_definer_create_attr *def_attr,
+ u32 *definer_id);
+
+void mlx5hws_cmd_definer_destroy(struct mlx5_core_dev *mdev,
+ u32 definer_id);
+
+int mlx5hws_cmd_arg_create(struct mlx5_core_dev *mdev,
+ u16 log_obj_range,
+ u32 pd,
+ u32 *arg_id);
+
+void mlx5hws_cmd_arg_destroy(struct mlx5_core_dev *mdev,
+ u32 arg_id);
+
+int mlx5hws_cmd_header_modify_pattern_create(struct mlx5_core_dev *mdev,
+ u32 pattern_length,
+ u8 *actions,
+ u32 *ptrn_id);
+
+void mlx5hws_cmd_header_modify_pattern_destroy(struct mlx5_core_dev *mdev,
+ u32 ptrn_id);
+
+int mlx5hws_cmd_packet_reformat_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_packet_reformat_create_attr *attr,
+ u32 *reformat_id);
+
+int mlx5hws_cmd_packet_reformat_destroy(struct mlx5_core_dev *mdev,
+ u32 reformat_id);
+
+int mlx5hws_cmd_set_fte(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u32 table_id,
+ u32 group_id,
+ struct mlx5hws_cmd_set_fte_attr *fte_attr);
+
+int mlx5hws_cmd_delete_fte(struct mlx5_core_dev *mdev,
+ u32 table_type, u32 table_id);
+
+struct mlx5hws_cmd_forward_tbl *
+mlx5hws_cmd_forward_tbl_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ft_create_attr *ft_attr,
+ struct mlx5hws_cmd_set_fte_attr *fte_attr);
+
+void mlx5hws_cmd_forward_tbl_destroy(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_forward_tbl *tbl);
+
+int mlx5hws_cmd_alias_obj_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_alias_obj_create_attr *alias_attr,
+ u32 *obj_id);
+
+int mlx5hws_cmd_alias_obj_destroy(struct mlx5_core_dev *mdev,
+ u16 obj_type,
+ u32 obj_id);
+
+int mlx5hws_cmd_sq_modify_rdy(struct mlx5_core_dev *mdev, u32 sqn);
+
+int mlx5hws_cmd_query_caps(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_query_caps *caps);
+
+void mlx5hws_cmd_set_attr_connect_miss_tbl(struct mlx5hws_context *ctx,
+ u32 fw_ft_type,
+ enum mlx5hws_table_type type,
+ struct mlx5hws_cmd_ft_modify_attr *ft_attr);
+
+int mlx5hws_cmd_allow_other_vhca_access(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_allow_other_vhca_access_attr *attr);
+
+int mlx5hws_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_function,
+ u16 vport_number, u16 *gvmi);
+
+#endif /* MLX5HWS_CMD_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.c
new file mode 100644
index 000000000000..00e4fdf4a558
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA CORPORATION. All rights reserved. */
+
+#include "mlx5hws_internal.h"
+
+bool mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context *ctx)
+{
+ return IS_BIT_SET(ctx->caps->rtc_reparse_mode, MLX5_IFC_RTC_REPARSE_BY_STC);
+}
+
+u8 mlx5hws_context_get_reparse_mode(struct mlx5hws_context *ctx)
+{
+ /* Prefer to use dynamic reparse, reparse only specific actions */
+ if (mlx5hws_context_cap_dynamic_reparse(ctx))
+ return MLX5_IFC_RTC_REPARSE_NEVER;
+
+ /* Otherwise use less efficient static */
+ return MLX5_IFC_RTC_REPARSE_ALWAYS;
+}
+
+static int hws_context_pools_init(struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_pool_attr pool_attr = {0};
+ u8 max_log_sz;
+ int ret;
+ int i;
+
+ ret = mlx5hws_pat_init_pattern_cache(&ctx->pattern_cache);
+ if (ret)
+ return ret;
+
+ ret = mlx5hws_definer_init_cache(&ctx->definer_cache);
+ if (ret)
+ goto uninit_pat_cache;
+
+ /* Create an STC pool per FT type */
+ pool_attr.pool_type = MLX5HWS_POOL_TYPE_STC;
+ pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STC_POOL;
+ max_log_sz = min(MLX5HWS_POOL_STC_LOG_SZ, ctx->caps->stc_alloc_log_max);
+ pool_attr.alloc_log_sz = max(max_log_sz, ctx->caps->stc_alloc_log_gran);
+
+ for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) {
+ pool_attr.table_type = i;
+ ctx->stc_pool[i] = mlx5hws_pool_create(ctx, &pool_attr);
+ if (!ctx->stc_pool[i]) {
+ mlx5hws_err(ctx, "Failed to allocate STC pool [%d]", i);
+ ret = -ENOMEM;
+ goto free_stc_pools;
+ }
+ }
+
+ return 0;
+
+free_stc_pools:
+ for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++)
+ if (ctx->stc_pool[i])
+ mlx5hws_pool_destroy(ctx->stc_pool[i]);
+
+ mlx5hws_definer_uninit_cache(ctx->definer_cache);
+uninit_pat_cache:
+ mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache);
+ return ret;
+}
+
+static void hws_context_pools_uninit(struct mlx5hws_context *ctx)
+{
+ int i;
+
+ for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) {
+ if (ctx->stc_pool[i])
+ mlx5hws_pool_destroy(ctx->stc_pool[i]);
+ }
+
+ mlx5hws_definer_uninit_cache(ctx->definer_cache);
+ mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache);
+}
+
+static int hws_context_init_pd(struct mlx5hws_context *ctx)
+{
+ int ret = 0;
+
+ ret = mlx5_core_alloc_pd(ctx->mdev, &ctx->pd_num);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate PD\n");
+ return ret;
+ }
+
+ ctx->flags |= MLX5HWS_CONTEXT_FLAG_PRIVATE_PD;
+
+ return 0;
+}
+
+static int hws_context_uninit_pd(struct mlx5hws_context *ctx)
+{
+ if (ctx->flags & MLX5HWS_CONTEXT_FLAG_PRIVATE_PD)
+ mlx5_core_dealloc_pd(ctx->mdev, ctx->pd_num);
+
+ return 0;
+}
+
+static void hws_context_check_hws_supp(struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_cmd_query_caps *caps = ctx->caps;
+
+ /* HWS not supported on device / FW */
+ if (!caps->wqe_based_update) {
+ mlx5hws_err(ctx, "Required HWS WQE based insertion cap not supported\n");
+ return;
+ }
+
+ if (!caps->eswitch_manager) {
+ mlx5hws_err(ctx, "HWS is not supported for non eswitch manager port\n");
+ return;
+ }
+
+ /* Current solution requires all rules to set reparse bit */
+ if ((!caps->nic_ft.reparse ||
+ (!caps->fdb_ft.reparse && caps->eswitch_manager)) ||
+ !IS_BIT_SET(caps->rtc_reparse_mode, MLX5_IFC_RTC_REPARSE_ALWAYS)) {
+ mlx5hws_err(ctx, "Required HWS reparse cap not supported\n");
+ return;
+ }
+
+ /* FW/HW must support 8DW STE */
+ if (!IS_BIT_SET(caps->ste_format, MLX5_IFC_RTC_STE_FORMAT_8DW)) {
+ mlx5hws_err(ctx, "Required HWS STE format not supported\n");
+ return;
+ }
+
+ /* Adding rules by hash and by offset are requirements */
+ if (!IS_BIT_SET(caps->rtc_index_mode, MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH) ||
+ !IS_BIT_SET(caps->rtc_index_mode, MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET)) {
+ mlx5hws_err(ctx, "Required HWS RTC update mode not supported\n");
+ return;
+ }
+
+ /* Support for SELECT definer ID is required */
+ if (!IS_BIT_SET(caps->definer_format_sup, MLX5_IFC_DEFINER_FORMAT_ID_SELECT)) {
+ mlx5hws_err(ctx, "Required HWS Dynamic definer not supported\n");
+ return;
+ }
+
+ ctx->flags |= MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT;
+}
+
+static int hws_context_init_hws(struct mlx5hws_context *ctx,
+ struct mlx5hws_context_attr *attr)
+{
+ int ret;
+
+ hws_context_check_hws_supp(ctx);
+
+ if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT))
+ return 0;
+
+ ret = hws_context_init_pd(ctx);
+ if (ret)
+ return ret;
+
+ ret = hws_context_pools_init(ctx);
+ if (ret)
+ goto uninit_pd;
+
+ if (attr->bwc)
+ ctx->flags |= MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
+
+ ret = mlx5hws_send_queues_open(ctx, attr->queues, attr->queue_size);
+ if (ret)
+ goto pools_uninit;
+
+ INIT_LIST_HEAD(&ctx->tbl_list);
+
+ return 0;
+
+pools_uninit:
+ hws_context_pools_uninit(ctx);
+uninit_pd:
+ hws_context_uninit_pd(ctx);
+ return ret;
+}
+
+static void hws_context_uninit_hws(struct mlx5hws_context *ctx)
+{
+ if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT))
+ return;
+
+ mlx5hws_send_queues_close(ctx);
+ hws_context_pools_uninit(ctx);
+ hws_context_uninit_pd(ctx);
+}
+
+struct mlx5hws_context *mlx5hws_context_open(struct mlx5_core_dev *mdev,
+ struct mlx5hws_context_attr *attr)
+{
+ struct mlx5hws_context *ctx;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+
+ ctx->mdev = mdev;
+
+ mutex_init(&ctx->ctrl_lock);
+ xa_init(&ctx->peer_ctx_xa);
+
+ ctx->caps = kzalloc(sizeof(*ctx->caps), GFP_KERNEL);
+ if (!ctx->caps)
+ goto free_ctx;
+
+ ret = mlx5hws_cmd_query_caps(mdev, ctx->caps);
+ if (ret)
+ goto free_caps;
+
+ ret = mlx5hws_vport_init_vports(ctx);
+ if (ret)
+ goto free_caps;
+
+ ret = hws_context_init_hws(ctx, attr);
+ if (ret)
+ goto uninit_vports;
+
+ mlx5hws_debug_init_dump(ctx);
+
+ return ctx;
+
+uninit_vports:
+ mlx5hws_vport_uninit_vports(ctx);
+free_caps:
+ kfree(ctx->caps);
+free_ctx:
+ xa_destroy(&ctx->peer_ctx_xa);
+ mutex_destroy(&ctx->ctrl_lock);
+ kfree(ctx);
+ return NULL;
+}
+
+int mlx5hws_context_close(struct mlx5hws_context *ctx)
+{
+ mlx5hws_debug_uninit_dump(ctx);
+ hws_context_uninit_hws(ctx);
+ mlx5hws_vport_uninit_vports(ctx);
+ kfree(ctx->caps);
+ xa_destroy(&ctx->peer_ctx_xa);
+ mutex_destroy(&ctx->ctrl_lock);
+ kfree(ctx);
+ return 0;
+}
+
+void mlx5hws_context_set_peer(struct mlx5hws_context *ctx,
+ struct mlx5hws_context *peer_ctx,
+ u16 peer_vhca_id)
+{
+ mutex_lock(&ctx->ctrl_lock);
+
+ if (xa_err(xa_store(&ctx->peer_ctx_xa, peer_vhca_id, peer_ctx, GFP_KERNEL)))
+ pr_warn("HWS: failed storing peer vhca ID in peer xarray\n");
+
+ mutex_unlock(&ctx->ctrl_lock);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h
new file mode 100644
index 000000000000..e5a7ce604334
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_CONTEXT_H_
+#define MLX5HWS_CONTEXT_H_
+
+enum mlx5hws_context_flags {
+ MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT = 1 << 0,
+ MLX5HWS_CONTEXT_FLAG_PRIVATE_PD = 1 << 1,
+ MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT = 1 << 2,
+};
+
+enum mlx5hws_context_shared_stc_type {
+ MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3 = 0,
+ MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP = 1,
+ MLX5HWS_CONTEXT_SHARED_STC_MAX = 2,
+};
+
+struct mlx5hws_context_common_res {
+ struct mlx5hws_action_default_stc *default_stc;
+ struct mlx5hws_action_shared_stc *shared_stc[MLX5HWS_CONTEXT_SHARED_STC_MAX];
+ struct mlx5hws_cmd_forward_tbl *default_miss;
+};
+
+struct mlx5hws_context_debug_info {
+ struct dentry *steering_debugfs;
+ struct dentry *fdb_debugfs;
+};
+
+struct mlx5hws_context_vports {
+ u16 esw_manager_gvmi;
+ u16 uplink_gvmi;
+ struct xarray vport_gvmi_xa;
+};
+
+struct mlx5hws_context {
+ struct mlx5_core_dev *mdev;
+ struct mlx5hws_cmd_query_caps *caps;
+ u32 pd_num;
+ struct mlx5hws_pool *stc_pool[MLX5HWS_TABLE_TYPE_MAX];
+ struct mlx5hws_context_common_res common_res[MLX5HWS_TABLE_TYPE_MAX];
+ struct mlx5hws_pattern_cache *pattern_cache;
+ struct mlx5hws_definer_cache *definer_cache;
+ struct mutex ctrl_lock; /* control lock to protect the whole context */
+ enum mlx5hws_context_flags flags;
+ struct mlx5hws_send_engine *send_queue;
+ size_t queues;
+ struct mutex *bwc_send_queue_locks; /* protect BWC queues */
+ struct list_head tbl_list;
+ struct mlx5hws_context_debug_info debug_info;
+ struct xarray peer_ctx_xa;
+ struct mlx5hws_context_vports vports;
+};
+
+static inline bool mlx5hws_context_bwc_supported(struct mlx5hws_context *ctx)
+{
+ return ctx->flags & MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
+}
+
+bool mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context *ctx);
+
+u8 mlx5hws_context_get_reparse_mode(struct mlx5hws_context *ctx);
+
+#endif /* MLX5HWS_CONTEXT_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.c
new file mode 100644
index 000000000000..2b8c5a4e1c4c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.c
@@ -0,0 +1,480 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/seq_file.h>
+#include <linux/version.h>
+#include "mlx5hws_internal.h"
+
+static int
+hws_debug_dump_matcher_template_definer(struct seq_file *f,
+ void *parent_obj,
+ struct mlx5hws_definer *definer,
+ enum mlx5hws_debug_res_type type)
+{
+ int i;
+
+ if (!definer)
+ return 0;
+
+ seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,",
+ type,
+ HWS_PTR_TO_ID(definer),
+ HWS_PTR_TO_ID(parent_obj),
+ definer->obj_id,
+ definer->type);
+
+ for (i = 0; i < DW_SELECTORS; i++)
+ seq_printf(f, "0x%x%s", definer->dw_selector[i],
+ (i == DW_SELECTORS - 1) ? "," : "-");
+
+ for (i = 0; i < BYTE_SELECTORS; i++)
+ seq_printf(f, "0x%x%s", definer->byte_selector[i],
+ (i == BYTE_SELECTORS - 1) ? "," : "-");
+
+ for (i = 0; i < MLX5HWS_JUMBO_TAG_SZ; i++)
+ seq_printf(f, "%02x", definer->mask.jumbo[i]);
+
+ seq_puts(f, "\n");
+
+ return 0;
+}
+
+static int
+hws_debug_dump_matcher_match_template(struct seq_file *f, struct mlx5hws_matcher *matcher)
+{
+ enum mlx5hws_debug_res_type type;
+ int i, ret;
+
+ for (i = 0; i < matcher->num_of_mt; i++) {
+ struct mlx5hws_match_template *mt = &matcher->mt[i];
+
+ seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,%d\n",
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_MATCH_TEMPLATE,
+ HWS_PTR_TO_ID(mt),
+ HWS_PTR_TO_ID(matcher),
+ mt->fc_sz,
+ 0, 0);
+
+ type = MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_MATCH_DEFINER;
+ ret = hws_debug_dump_matcher_template_definer(f, mt, mt->definer, type);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+hws_debug_dump_matcher_action_template(struct seq_file *f, struct mlx5hws_matcher *matcher)
+{
+ enum mlx5hws_action_type action_type;
+ int i, j;
+
+ for (i = 0; i < matcher->num_of_at; i++) {
+ struct mlx5hws_action_template *at = &matcher->at[i];
+
+ seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,%d",
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_ACTION_TEMPLATE,
+ HWS_PTR_TO_ID(at),
+ HWS_PTR_TO_ID(matcher),
+ at->only_term,
+ at->num_of_action_stes,
+ at->num_actions);
+
+ for (j = 0; j < at->num_actions; j++) {
+ action_type = at->action_type_arr[j];
+ seq_printf(f, ",%s", mlx5hws_action_type_to_str(action_type));
+ }
+
+ seq_puts(f, "\n");
+ }
+
+ return 0;
+}
+
+static int
+hws_debug_dump_matcher_attr(struct seq_file *f, struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_matcher_attr *attr = &matcher->attr;
+
+ seq_printf(f, "%d,0x%llx,%d,%d,%d,%d,%d,%d,%d,%d\n",
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_ATTR,
+ HWS_PTR_TO_ID(matcher),
+ attr->priority,
+ attr->mode,
+ attr->table.sz_row_log,
+ attr->table.sz_col_log,
+ attr->optimize_using_rule_idx,
+ attr->optimize_flow_src,
+ attr->insert_mode,
+ attr->distribute_mode);
+
+ return 0;
+}
+
+static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *matcher)
+{
+ enum mlx5hws_table_type tbl_type = matcher->tbl->type;
+ struct mlx5hws_cmd_ft_query_attr ft_attr = {0};
+ struct mlx5hws_pool_chunk *ste;
+ struct mlx5hws_pool *ste_pool;
+ u64 icm_addr_0 = 0;
+ u64 icm_addr_1 = 0;
+ u32 ste_0_id = -1;
+ u32 ste_1_id = -1;
+ int ret;
+
+ seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,0x%llx",
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER,
+ HWS_PTR_TO_ID(matcher),
+ HWS_PTR_TO_ID(matcher->tbl),
+ matcher->num_of_mt,
+ matcher->end_ft_id,
+ matcher->col_matcher ? HWS_PTR_TO_ID(matcher->col_matcher) : 0);
+
+ ste = &matcher->match_ste.ste;
+ ste_pool = matcher->match_ste.pool;
+ if (ste_pool) {
+ ste_0_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
+ if (tbl_type == MLX5HWS_TABLE_TYPE_FDB)
+ ste_1_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
+ }
+
+ seq_printf(f, ",%d,%d,%d,%d",
+ matcher->match_ste.rtc_0_id,
+ (int)ste_0_id,
+ matcher->match_ste.rtc_1_id,
+ (int)ste_1_id);
+
+ ste = &matcher->action_ste[0].ste;
+ ste_pool = matcher->action_ste[0].pool;
+ if (ste_pool) {
+ ste_0_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
+ if (tbl_type == MLX5HWS_TABLE_TYPE_FDB)
+ ste_1_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
+ else
+ ste_1_id = -1;
+ } else {
+ ste_0_id = -1;
+ ste_1_id = -1;
+ }
+
+ ft_attr.type = matcher->tbl->fw_ft_type;
+ ret = mlx5hws_cmd_flow_table_query(matcher->tbl->ctx->mdev,
+ matcher->end_ft_id,
+ &ft_attr,
+ &icm_addr_0,
+ &icm_addr_1);
+ if (ret)
+ return ret;
+
+ seq_printf(f, ",%d,%d,%d,%d,%d,0x%llx,0x%llx\n",
+ matcher->action_ste[0].rtc_0_id,
+ (int)ste_0_id,
+ matcher->action_ste[0].rtc_1_id,
+ (int)ste_1_id,
+ 0,
+ mlx5hws_debug_icm_to_idx(icm_addr_0),
+ mlx5hws_debug_icm_to_idx(icm_addr_1));
+
+ ret = hws_debug_dump_matcher_attr(f, matcher);
+ if (ret)
+ return ret;
+
+ ret = hws_debug_dump_matcher_match_template(f, matcher);
+ if (ret)
+ return ret;
+
+ ret = hws_debug_dump_matcher_action_template(f, matcher);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int hws_debug_dump_table(struct seq_file *f, struct mlx5hws_table *tbl)
+{
+ struct mlx5hws_cmd_ft_query_attr ft_attr = {0};
+ struct mlx5hws_matcher *matcher;
+ u64 local_icm_addr_0 = 0;
+ u64 local_icm_addr_1 = 0;
+ u64 icm_addr_0 = 0;
+ u64 icm_addr_1 = 0;
+ int ret;
+
+ seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,%d,%d,%d",
+ MLX5HWS_DEBUG_RES_TYPE_TABLE,
+ HWS_PTR_TO_ID(tbl),
+ HWS_PTR_TO_ID(tbl->ctx),
+ tbl->ft_id,
+ MLX5HWS_TABLE_TYPE_BASE + tbl->type,
+ tbl->fw_ft_type,
+ tbl->level,
+ 0);
+
+ ft_attr.type = tbl->fw_ft_type;
+ ret = mlx5hws_cmd_flow_table_query(tbl->ctx->mdev,
+ tbl->ft_id,
+ &ft_attr,
+ &icm_addr_0,
+ &icm_addr_1);
+ if (ret)
+ return ret;
+
+ seq_printf(f, ",0x%llx,0x%llx,0x%llx,0x%llx,0x%llx\n",
+ mlx5hws_debug_icm_to_idx(icm_addr_0),
+ mlx5hws_debug_icm_to_idx(icm_addr_1),
+ mlx5hws_debug_icm_to_idx(local_icm_addr_0),
+ mlx5hws_debug_icm_to_idx(local_icm_addr_1),
+ HWS_PTR_TO_ID(tbl->default_miss.miss_tbl));
+
+ list_for_each_entry(matcher, &tbl->matchers_list, list_node) {
+ ret = hws_debug_dump_matcher(f, matcher);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+hws_debug_dump_context_send_engine(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_send_engine *send_queue;
+ struct mlx5hws_send_ring *send_ring;
+ struct mlx5hws_send_ring_cq *cq;
+ struct mlx5hws_send_ring_sq *sq;
+ int i;
+
+ for (i = 0; i < (int)ctx->queues; i++) {
+ send_queue = &ctx->send_queue[i];
+ seq_printf(f, "%d,0x%llx,%d,%d,%d,%d,%d,%d,%d,%d,%d\n",
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_SEND_ENGINE,
+ HWS_PTR_TO_ID(ctx),
+ i,
+ send_queue->used_entries,
+ send_queue->num_entries,
+ 1, /* one send ring per queue */
+ send_queue->num_entries,
+ send_queue->err,
+ send_queue->completed.ci,
+ send_queue->completed.pi,
+ send_queue->completed.mask);
+
+ send_ring = &send_queue->send_ring;
+ cq = &send_ring->send_cq;
+ sq = &send_ring->send_sq;
+
+ seq_printf(f, "%d,0x%llx,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n",
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_SEND_RING,
+ HWS_PTR_TO_ID(ctx),
+ 0, /* one send ring per send queue */
+ i,
+ cq->mcq.cqn,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ cq->mcq.cqe_sz,
+ sq->sqn,
+ 0,
+ 0,
+ 0);
+ }
+
+ return 0;
+}
+
+static int hws_debug_dump_context_caps(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_cmd_query_caps *caps = ctx->caps;
+
+ seq_printf(f, "%d,0x%llx,%s,%d,%d,%d,%d,",
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_CAPS,
+ HWS_PTR_TO_ID(ctx),
+ caps->fw_ver,
+ caps->wqe_based_update,
+ caps->ste_format,
+ caps->ste_alloc_log_max,
+ caps->log_header_modify_argument_max_alloc);
+
+ seq_printf(f, "%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%s\n",
+ caps->flex_protocols,
+ caps->rtc_reparse_mode,
+ caps->rtc_index_mode,
+ caps->ste_alloc_log_gran,
+ caps->stc_alloc_log_max,
+ caps->stc_alloc_log_gran,
+ caps->rtc_log_depth_max,
+ caps->format_select_gtpu_dw_0,
+ caps->format_select_gtpu_dw_1,
+ caps->format_select_gtpu_dw_2,
+ caps->format_select_gtpu_ext_dw_0,
+ caps->nic_ft.max_level,
+ caps->nic_ft.reparse,
+ caps->fdb_ft.max_level,
+ caps->fdb_ft.reparse,
+ caps->log_header_modify_argument_granularity,
+ caps->linear_match_definer,
+ "regc_3");
+
+ return 0;
+}
+
+static int hws_debug_dump_context_attr(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+ seq_printf(f, "%u,0x%llx,%d,%zu,%d,%s,%d,%d\n",
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_ATTR,
+ HWS_PTR_TO_ID(ctx),
+ ctx->pd_num,
+ ctx->queues,
+ ctx->send_queue->num_entries,
+ "None", /* no shared gvmi */
+ ctx->caps->vhca_id,
+ 0xffff); /* no shared gvmi */
+
+ return 0;
+}
+
+static int hws_debug_dump_context_info(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+ struct mlx5_core_dev *dev = ctx->mdev;
+ int ret;
+
+ seq_printf(f, "%d,0x%llx,%d,%s,%s.KERNEL_%u_%u_%u\n",
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT,
+ HWS_PTR_TO_ID(ctx),
+ ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT,
+ pci_name(dev->pdev),
+ HWS_DEBUG_FORMAT_VERSION,
+ LINUX_VERSION_MAJOR,
+ LINUX_VERSION_PATCHLEVEL,
+ LINUX_VERSION_SUBLEVEL);
+
+ ret = hws_debug_dump_context_attr(f, ctx);
+ if (ret)
+ return ret;
+
+ ret = hws_debug_dump_context_caps(f, ctx);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int hws_debug_dump_context_stc_resource(struct seq_file *f,
+ struct mlx5hws_context *ctx,
+ u32 tbl_type,
+ struct mlx5hws_pool_resource *resource)
+{
+ seq_printf(f, "%d,0x%llx,%u,%u\n",
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_STC,
+ HWS_PTR_TO_ID(ctx),
+ tbl_type,
+ resource->base_id);
+
+ return 0;
+}
+
+static int hws_debug_dump_context_stc(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_pool *stc_pool;
+ u32 table_type;
+ int ret;
+ int i;
+
+ for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) {
+ stc_pool = ctx->stc_pool[i];
+ table_type = MLX5HWS_TABLE_TYPE_BASE + i;
+
+ if (!stc_pool)
+ continue;
+
+ if (stc_pool->resource[0]) {
+ ret = hws_debug_dump_context_stc_resource(f, ctx, table_type,
+ stc_pool->resource[0]);
+ if (ret)
+ return ret;
+ }
+
+ if (i == MLX5HWS_TABLE_TYPE_FDB && stc_pool->mirror_resource[0]) {
+ ret = hws_debug_dump_context_stc_resource(f, ctx, table_type,
+ stc_pool->mirror_resource[0]);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int hws_debug_dump_context(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_table *tbl;
+ int ret;
+
+ ret = hws_debug_dump_context_info(f, ctx);
+ if (ret)
+ return ret;
+
+ ret = hws_debug_dump_context_send_engine(f, ctx);
+ if (ret)
+ return ret;
+
+ ret = hws_debug_dump_context_stc(f, ctx);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(tbl, &ctx->tbl_list, tbl_list_node) {
+ ret = hws_debug_dump_table(f, tbl);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+hws_debug_dump(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+ int ret;
+
+ if (!f || !ctx)
+ return -EINVAL;
+
+ mutex_lock(&ctx->ctrl_lock);
+ ret = hws_debug_dump_context(f, ctx);
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return ret;
+}
+
+static int hws_dump_show(struct seq_file *file, void *priv)
+{
+ return hws_debug_dump(file, file->private);
+}
+DEFINE_SHOW_ATTRIBUTE(hws_dump);
+
+void mlx5hws_debug_init_dump(struct mlx5hws_context *ctx)
+{
+ struct mlx5_core_dev *dev = ctx->mdev;
+ char file_name[128];
+
+ ctx->debug_info.steering_debugfs =
+ debugfs_create_dir("steering", mlx5_debugfs_get_dev_root(dev));
+ ctx->debug_info.fdb_debugfs =
+ debugfs_create_dir("fdb", ctx->debug_info.steering_debugfs);
+
+ sprintf(file_name, "ctx_%p", ctx);
+ debugfs_create_file(file_name, 0444, ctx->debug_info.fdb_debugfs,
+ ctx, &hws_dump_fops);
+}
+
+void mlx5hws_debug_uninit_dump(struct mlx5hws_context *ctx)
+{
+ debugfs_remove_recursive(ctx->debug_info.steering_debugfs);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.h
new file mode 100644
index 000000000000..b93a536035d9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_DEBUG_H_
+#define MLX5HWS_DEBUG_H_
+
+#define HWS_DEBUG_FORMAT_VERSION "1.0"
+
+#define HWS_PTR_TO_ID(p) ((u64)(uintptr_t)(p) & 0xFFFFFFFFULL)
+
+enum mlx5hws_debug_res_type {
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT = 4000,
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_ATTR = 4001,
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_CAPS = 4002,
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_SEND_ENGINE = 4003,
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_SEND_RING = 4004,
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_STC = 4005,
+
+ MLX5HWS_DEBUG_RES_TYPE_TABLE = 4100,
+
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER = 4200,
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_ATTR = 4201,
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_MATCH_TEMPLATE = 4202,
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_MATCH_DEFINER = 4203,
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_ACTION_TEMPLATE = 4204,
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_HASH_DEFINER = 4205,
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_RANGE_DEFINER = 4206,
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_COMPARE_MATCH_DEFINER = 4207,
+};
+
+static inline u64
+mlx5hws_debug_icm_to_idx(u64 icm_addr)
+{
+ return (icm_addr >> 6) & 0xffffffff;
+}
+
+void mlx5hws_debug_init_dump(struct mlx5hws_context *ctx);
+void mlx5hws_debug_uninit_dump(struct mlx5hws_context *ctx);
+
+#endif /* MLX5HWS_DEBUG_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c
new file mode 100644
index 000000000000..3bdb5c90efff
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c
@@ -0,0 +1,2146 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+/* Pattern tunnel Layer bits. */
+#define MLX5_FLOW_LAYER_VXLAN BIT(12)
+#define MLX5_FLOW_LAYER_VXLAN_GPE BIT(13)
+#define MLX5_FLOW_LAYER_GRE BIT(14)
+#define MLX5_FLOW_LAYER_MPLS BIT(15)
+
+/* Pattern tunnel Layer bits (continued). */
+#define MLX5_FLOW_LAYER_IPIP BIT(23)
+#define MLX5_FLOW_LAYER_IPV6_ENCAP BIT(24)
+#define MLX5_FLOW_LAYER_NVGRE BIT(25)
+#define MLX5_FLOW_LAYER_GENEVE BIT(26)
+
+#define MLX5_FLOW_ITEM_FLEX_TUNNEL BIT_ULL(39)
+
+/* Tunnel Masks. */
+#define MLX5_FLOW_LAYER_TUNNEL \
+ (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
+ MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_NVGRE | MLX5_FLOW_LAYER_MPLS | \
+ MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP | \
+ MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP | \
+ MLX5_FLOW_ITEM_FLEX_TUNNEL)
+
+#define GTP_PDU_SC 0x85
+#define BAD_PORT 0xBAD
+#define ETH_TYPE_IPV4_VXLAN 0x0800
+#define ETH_TYPE_IPV6_VXLAN 0x86DD
+#define UDP_GTPU_PORT 2152
+#define UDP_PORT_MPLS 6635
+#define UDP_GENEVE_PORT 6081
+#define UDP_ROCEV2_PORT 4791
+#define HWS_FLOW_LAYER_TUNNEL_NO_MPLS (MLX5_FLOW_LAYER_TUNNEL & ~MLX5_FLOW_LAYER_MPLS)
+
+#define STE_NO_VLAN 0x0
+#define STE_SVLAN 0x1
+#define STE_CVLAN 0x2
+#define STE_NO_L3 0x0
+#define STE_IPV4 0x1
+#define STE_IPV6 0x2
+#define STE_NO_L4 0x0
+#define STE_TCP 0x1
+#define STE_UDP 0x2
+#define STE_ICMP 0x3
+#define STE_ESP 0x3
+
+#define IPV4 0x4
+#define IPV6 0x6
+
+/* Setter function based on bit offset and mask, for 32bit DW */
+#define _HWS_SET32(p, v, byte_off, bit_off, mask) \
+ do { \
+ u32 _v = v; \
+ *((__be32 *)(p) + ((byte_off) / 4)) = \
+ cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + \
+ ((byte_off) / 4))) & \
+ (~((mask) << (bit_off)))) | \
+ (((_v) & (mask)) << \
+ (bit_off))); \
+ } while (0)
+
+/* Setter function based on bit offset and mask, for unaligned 32bit DW */
+#define HWS_SET32(p, v, byte_off, bit_off, mask) \
+ do { \
+ if (unlikely((bit_off) < 0)) { \
+ u32 _bit_off = -1 * (bit_off); \
+ u32 second_dw_mask = (mask) & ((1 << _bit_off) - 1); \
+ _HWS_SET32(p, (v) >> _bit_off, byte_off, 0, (mask) >> _bit_off); \
+ _HWS_SET32(p, (v) & second_dw_mask, (byte_off) + DW_SIZE, \
+ (bit_off) % BITS_IN_DW, second_dw_mask); \
+ } else { \
+ _HWS_SET32(p, v, byte_off, (bit_off), (mask)); \
+ } \
+ } while (0)
+
+/* Getter for up to aligned 32bit DW */
+#define HWS_GET32(p, byte_off, bit_off, mask) \
+ ((be32_to_cpu(*((__be32 *)(p) + ((byte_off) / 4))) >> (bit_off)) & (mask))
+
+#define HWS_CALC_FNAME(field, inner) \
+ ((inner) ? MLX5HWS_DEFINER_FNAME_##field##_I : \
+ MLX5HWS_DEFINER_FNAME_##field##_O)
+
+#define HWS_GET_MATCH_PARAM(match_param, hdr) \
+ MLX5_GET(fte_match_param, match_param, hdr)
+
+#define HWS_IS_FLD_SET(match_param, hdr) \
+ (!!(HWS_GET_MATCH_PARAM(match_param, hdr)))
+
+#define HWS_IS_FLD_SET_DW_ARR(match_param, hdr, sz_in_bits) ({ \
+ BUILD_BUG_ON((sz_in_bits) % 32); \
+ u32 sz = sz_in_bits; \
+ u32 res = 0; \
+ u32 dw_off = __mlx5_dw_off(fte_match_param, hdr); \
+ while (!res && sz >= 32) { \
+ res = *((match_param) + (dw_off++)); \
+ sz -= 32; \
+ } \
+ res; \
+ })
+
+#define HWS_IS_FLD_SET_SZ(match_param, hdr, sz_in_bits) \
+ (((sz_in_bits) > 32) ? HWS_IS_FLD_SET_DW_ARR(match_param, hdr, sz_in_bits) : \
+ !!(HWS_GET_MATCH_PARAM(match_param, hdr)))
+
+#define HWS_GET64_MATCH_PARAM(match_param, hdr) \
+ MLX5_GET64(fte_match_param, match_param, hdr)
+
+#define HWS_IS_FLD64_SET(match_param, hdr) \
+ (!!(HWS_GET64_MATCH_PARAM(match_param, hdr)))
+
+#define HWS_CALC_HDR_SRC(fc, s_hdr) \
+ do { \
+ (fc)->s_bit_mask = __mlx5_mask(fte_match_param, s_hdr); \
+ (fc)->s_bit_off = __mlx5_dw_bit_off(fte_match_param, s_hdr); \
+ (fc)->s_byte_off = MLX5_BYTE_OFF(fte_match_param, s_hdr); \
+ } while (0)
+
+#define HWS_CALC_HDR_DST(fc, d_hdr) \
+ do { \
+ (fc)->bit_mask = __mlx5_mask(definer_hl, d_hdr); \
+ (fc)->bit_off = __mlx5_dw_bit_off(definer_hl, d_hdr); \
+ (fc)->byte_off = MLX5_BYTE_OFF(definer_hl, d_hdr); \
+ } while (0)
+
+#define HWS_CALC_HDR(fc, s_hdr, d_hdr) \
+ do { \
+ HWS_CALC_HDR_SRC(fc, s_hdr); \
+ HWS_CALC_HDR_DST(fc, d_hdr); \
+ (fc)->tag_set = &hws_definer_generic_set; \
+ } while (0)
+
+#define HWS_SET_HDR(fc_arr, match_param, fname, s_hdr, d_hdr) \
+ do { \
+ if (HWS_IS_FLD_SET(match_param, s_hdr)) \
+ HWS_CALC_HDR(&(fc_arr)[MLX5HWS_DEFINER_FNAME_##fname], s_hdr, d_hdr); \
+ } while (0)
+
+struct mlx5hws_definer_sel_ctrl {
+ u8 allowed_full_dw; /* Full DW selectors cover all offsets */
+ u8 allowed_lim_dw; /* Limited DW selectors cover offset < 64 */
+ u8 allowed_bytes; /* Bytes selectors, up to offset 255 */
+ u8 used_full_dw;
+ u8 used_lim_dw;
+ u8 used_bytes;
+ u8 full_dw_selector[DW_SELECTORS];
+ u8 lim_dw_selector[DW_SELECTORS_LIMITED];
+ u8 byte_selector[BYTE_SELECTORS];
+};
+
+struct mlx5hws_definer_conv_data {
+ struct mlx5hws_context *ctx;
+ struct mlx5hws_definer_fc *fc;
+ /* enum mlx5hws_definer_match_flag */
+ u32 match_flags;
+};
+
+static void
+hws_definer_ones_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ HWS_SET32(tag, -1, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_generic_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ /* Can be optimized */
+ u32 val = HWS_GET32(match_param, fc->s_byte_off, fc->s_bit_off, fc->s_bit_mask);
+
+ HWS_SET32(tag, val, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_outer_vlan_type_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ if (HWS_GET_MATCH_PARAM(match_param, outer_headers.cvlan_tag))
+ HWS_SET32(tag, STE_CVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else if (HWS_GET_MATCH_PARAM(match_param, outer_headers.svlan_tag))
+ HWS_SET32(tag, STE_SVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else
+ HWS_SET32(tag, STE_NO_VLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_inner_vlan_type_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ if (HWS_GET_MATCH_PARAM(match_param, inner_headers.cvlan_tag))
+ HWS_SET32(tag, STE_CVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else if (HWS_GET_MATCH_PARAM(match_param, inner_headers.svlan_tag))
+ HWS_SET32(tag, STE_SVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else
+ HWS_SET32(tag, STE_NO_VLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_second_vlan_type_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag,
+ bool inner)
+{
+ u32 second_cvlan_tag = inner ?
+ HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_cvlan_tag) :
+ HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_cvlan_tag);
+ u32 second_svlan_tag = inner ?
+ HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_svlan_tag) :
+ HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_svlan_tag);
+
+ if (second_cvlan_tag)
+ HWS_SET32(tag, STE_CVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else if (second_svlan_tag)
+ HWS_SET32(tag, STE_SVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else
+ HWS_SET32(tag, STE_NO_VLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_inner_second_vlan_type_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ hws_definer_second_vlan_type_set(fc, match_param, tag, true);
+}
+
+static void
+hws_definer_outer_second_vlan_type_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ hws_definer_second_vlan_type_set(fc, match_param, tag, false);
+}
+
+static void hws_definer_icmp_dw1_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ u32 code = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmp_code);
+ u32 type = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmp_type);
+ u32 dw = (type << __mlx5_dw_bit_off(header_icmp, type)) |
+ (code << __mlx5_dw_bit_off(header_icmp, code));
+
+ HWS_SET32(tag, dw, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_icmpv6_dw1_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ u32 code = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmpv6_code);
+ u32 type = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmpv6_type);
+ u32 dw = (type << __mlx5_dw_bit_off(header_icmp, type)) |
+ (code << __mlx5_dw_bit_off(header_icmp, code));
+
+ HWS_SET32(tag, dw, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_l3_type_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ u32 val = HWS_GET32(match_param, fc->s_byte_off, fc->s_bit_off, fc->s_bit_mask);
+
+ if (val == IPV4)
+ HWS_SET32(tag, STE_IPV4, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else if (val == IPV6)
+ HWS_SET32(tag, STE_IPV6, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else
+ HWS_SET32(tag, STE_NO_L3, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_set_source_port_gvmi(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag,
+ struct mlx5hws_context *peer_ctx)
+{
+ u16 source_port = HWS_GET_MATCH_PARAM(match_param, misc_parameters.source_port);
+ u16 vport_gvmi = 0;
+ int ret;
+
+ ret = mlx5hws_vport_get_gvmi(peer_ctx, source_port, &vport_gvmi);
+ if (ret) {
+ HWS_SET32(tag, BAD_PORT, fc->byte_off, fc->bit_off, fc->bit_mask);
+ mlx5hws_err(fc->ctx, "Vport 0x%x is disabled or invalid\n", source_port);
+ return;
+ }
+
+ if (vport_gvmi)
+ HWS_SET32(tag, vport_gvmi, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_set_source_gvmi_vhca_id(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+__must_hold(&fc->ctx->ctrl_lock)
+{
+ int id = HWS_GET_MATCH_PARAM(match_param, misc_parameters.source_eswitch_owner_vhca_id);
+ struct mlx5hws_context *peer_ctx;
+
+ if (id == fc->ctx->caps->vhca_id)
+ peer_ctx = fc->ctx;
+ else
+ peer_ctx = xa_load(&fc->ctx->peer_ctx_xa, id);
+
+ if (!peer_ctx) {
+ HWS_SET32(tag, BAD_PORT, fc->byte_off, fc->bit_off, fc->bit_mask);
+ mlx5hws_err(fc->ctx, "Invalid vhca_id provided 0x%x\n", id);
+ return;
+ }
+
+ hws_definer_set_source_port_gvmi(fc, match_param, tag, peer_ctx);
+}
+
+static void
+hws_definer_set_source_gvmi(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ hws_definer_set_source_port_gvmi(fc, match_param, tag, fc->ctx);
+}
+
+static struct mlx5hws_definer_fc *
+hws_definer_flex_parser_steering_ok_bits_handler(struct mlx5hws_definer_conv_data *cd,
+ u8 parser_id)
+{
+ struct mlx5hws_definer_fc *fc;
+
+ switch (parser_id) {
+ case 0:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER0_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser0_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 1:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER1_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser1_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 2:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER2_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser2_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 3:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER3_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser3_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 4:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER4_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser4_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 5:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER5_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser5_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 6:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER6_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser6_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 7:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER7_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser7_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ default:
+ mlx5hws_err(cd->ctx, "Unsupported flex parser steering ok index %u\n", parser_id);
+ return NULL;
+ }
+
+ return fc;
+}
+
+static struct mlx5hws_definer_fc *
+hws_definer_flex_parser_handler(struct mlx5hws_definer_conv_data *cd,
+ u8 parser_id)
+{
+ struct mlx5hws_definer_fc *fc;
+
+ switch (parser_id) {
+ case 0:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_0);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 1:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_1);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 2:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_2);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 3:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_3);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 4:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_4);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 5:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_5);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 6:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_6);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 7:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_7);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ default:
+ mlx5hws_err(cd->ctx, "Unsupported flex parser %u\n", parser_id);
+ return NULL;
+ }
+
+ return fc;
+}
+
+static struct mlx5hws_definer_fc *
+hws_definer_misc4_fields_handler(struct mlx5hws_definer_conv_data *cd,
+ bool *parser_is_used,
+ u32 id,
+ u32 value)
+{
+ if (id || value) {
+ if (id >= HWS_NUM_OF_FLEX_PARSERS) {
+ mlx5hws_err(cd->ctx, "Unsupported parser id\n");
+ return NULL;
+ }
+
+ if (parser_is_used[id]) {
+ mlx5hws_err(cd->ctx, "Parser id have been used\n");
+ return NULL;
+ }
+ }
+
+ parser_is_used[id] = true;
+
+ return hws_definer_flex_parser_handler(cd, id);
+}
+
+static int
+hws_definer_check_match_flags(struct mlx5hws_definer_conv_data *cd)
+{
+ u32 flags;
+
+ flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1);
+ if (flags & (flags - 1))
+ goto err_conflict;
+
+ flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE_OPT_KEY |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_2);
+
+ if (flags & (flags - 1))
+ goto err_conflict;
+
+ flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_GRE |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_UDP);
+ if (flags & (flags - 1))
+ goto err_conflict;
+
+ flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_ICMPV4 |
+ MLX5HWS_DEFINER_MATCH_FLAG_ICMPV6 |
+ MLX5HWS_DEFINER_MATCH_FLAG_TCP_O |
+ MLX5HWS_DEFINER_MATCH_FLAG_TCP_I);
+ if (flags & (flags - 1))
+ goto err_conflict;
+
+ return 0;
+
+err_conflict:
+ mlx5hws_err(cd->ctx, "Invalid definer fields combination\n");
+ return -EINVAL;
+}
+
+static int
+hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
+ u32 *match_param)
+{
+ bool is_s_ipv6, is_d_ipv6, smac_set, dmac_set;
+ struct mlx5hws_definer_fc *fc = cd->fc;
+ struct mlx5hws_definer_fc *curr_fc;
+ u32 *s_ipv6, *d_ipv6;
+
+ if (HWS_IS_FLD_SET_SZ(match_param, outer_headers.l4_type, 0x2) ||
+ HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_c2, 0xe) ||
+ HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_c4, 0x4)) {
+ mlx5hws_err(cd->ctx, "Unsupported outer parameters set\n");
+ return -EINVAL;
+ }
+
+ /* L2 Check ethertype */
+ HWS_SET_HDR(fc, match_param, ETH_TYPE_O,
+ outer_headers.ethertype,
+ eth_l2_outer.l3_ethertype);
+ /* L2 Check SMAC 47_16 */
+ HWS_SET_HDR(fc, match_param, ETH_SMAC_47_16_O,
+ outer_headers.smac_47_16, eth_l2_src_outer.smac_47_16);
+ /* L2 Check SMAC 15_0 */
+ HWS_SET_HDR(fc, match_param, ETH_SMAC_15_0_O,
+ outer_headers.smac_15_0, eth_l2_src_outer.smac_15_0);
+ /* L2 Check DMAC 47_16 */
+ HWS_SET_HDR(fc, match_param, ETH_DMAC_47_16_O,
+ outer_headers.dmac_47_16, eth_l2_outer.dmac_47_16);
+ /* L2 Check DMAC 15_0 */
+ HWS_SET_HDR(fc, match_param, ETH_DMAC_15_0_O,
+ outer_headers.dmac_15_0, eth_l2_outer.dmac_15_0);
+
+ /* L2 VLAN */
+ HWS_SET_HDR(fc, match_param, VLAN_FIRST_PRIO_O,
+ outer_headers.first_prio, eth_l2_outer.first_priority);
+ HWS_SET_HDR(fc, match_param, VLAN_CFI_O,
+ outer_headers.first_cfi, eth_l2_outer.first_cfi);
+ HWS_SET_HDR(fc, match_param, VLAN_ID_O,
+ outer_headers.first_vid, eth_l2_outer.first_vlan_id);
+
+ /* L2 CVLAN and SVLAN */
+ if (HWS_GET_MATCH_PARAM(match_param, outer_headers.cvlan_tag) ||
+ HWS_GET_MATCH_PARAM(match_param, outer_headers.svlan_tag)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_TYPE_O];
+ HWS_CALC_HDR_DST(curr_fc, eth_l2_outer.first_vlan_qualifier);
+ curr_fc->tag_set = &hws_definer_outer_vlan_type_set;
+ curr_fc->tag_mask_set = &hws_definer_ones_set;
+ }
+
+ /* L3 Check IP header */
+ HWS_SET_HDR(fc, match_param, IP_PROTOCOL_O,
+ outer_headers.ip_protocol,
+ eth_l3_outer.protocol_next_header);
+ HWS_SET_HDR(fc, match_param, IP_TTL_O,
+ outer_headers.ttl_hoplimit,
+ eth_l3_outer.time_to_live_hop_limit);
+
+ /* L3 Check IPv4/IPv6 addresses */
+ s_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
+ outer_headers.src_ipv4_src_ipv6.ipv6_layout);
+ d_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout);
+
+ /* Assume IPv6 is used if ipv6 bits are set */
+ is_s_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2];
+ is_d_ipv6 = d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
+
+ if (is_s_ipv6) {
+ /* Handle IPv6 source address */
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_O,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96,
+ ipv6_src_outer.ipv6_address_127_96);
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_95_64_O,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64,
+ ipv6_src_outer.ipv6_address_95_64);
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_63_32_O,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32,
+ ipv6_src_outer.ipv6_address_63_32);
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_O,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv6_src_outer.ipv6_address_31_0);
+ } else {
+ /* Handle IPv4 source address */
+ HWS_SET_HDR(fc, match_param, IPV4_SRC_O,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv4_src_dest_outer.source_address);
+ }
+ if (is_d_ipv6) {
+ /* Handle IPv6 destination address */
+ HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_O,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96,
+ ipv6_dst_outer.ipv6_address_127_96);
+ HWS_SET_HDR(fc, match_param, IPV6_DST_95_64_O,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64,
+ ipv6_dst_outer.ipv6_address_95_64);
+ HWS_SET_HDR(fc, match_param, IPV6_DST_63_32_O,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32,
+ ipv6_dst_outer.ipv6_address_63_32);
+ HWS_SET_HDR(fc, match_param, IPV6_DST_31_0_O,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv6_dst_outer.ipv6_address_31_0);
+ } else {
+ /* Handle IPv4 destination address */
+ HWS_SET_HDR(fc, match_param, IPV4_DST_O,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv4_src_dest_outer.destination_address);
+ }
+
+ /* L4 Handle TCP/UDP */
+ HWS_SET_HDR(fc, match_param, L4_SPORT_O,
+ outer_headers.tcp_sport, eth_l4_outer.source_port);
+ HWS_SET_HDR(fc, match_param, L4_DPORT_O,
+ outer_headers.tcp_dport, eth_l4_outer.destination_port);
+ HWS_SET_HDR(fc, match_param, L4_SPORT_O,
+ outer_headers.udp_sport, eth_l4_outer.source_port);
+ HWS_SET_HDR(fc, match_param, L4_DPORT_O,
+ outer_headers.udp_dport, eth_l4_outer.destination_port);
+ HWS_SET_HDR(fc, match_param, TCP_FLAGS_O,
+ outer_headers.tcp_flags, eth_l4_outer.tcp_flags);
+
+ /* L3 Handle DSCP, ECN and IHL */
+ HWS_SET_HDR(fc, match_param, IP_DSCP_O,
+ outer_headers.ip_dscp, eth_l3_outer.dscp);
+ HWS_SET_HDR(fc, match_param, IP_ECN_O,
+ outer_headers.ip_ecn, eth_l3_outer.ecn);
+ HWS_SET_HDR(fc, match_param, IPV4_IHL_O,
+ outer_headers.ipv4_ihl, eth_l3_outer.ihl);
+
+ /* Set IP fragmented bit */
+ if (HWS_IS_FLD_SET(match_param, outer_headers.frag)) {
+ smac_set = HWS_IS_FLD_SET(match_param, outer_headers.smac_15_0) ||
+ HWS_IS_FLD_SET(match_param, outer_headers.smac_47_16);
+ dmac_set = HWS_IS_FLD_SET(match_param, outer_headers.dmac_15_0) ||
+ HWS_IS_FLD_SET(match_param, outer_headers.dmac_47_16);
+ if (smac_set == dmac_set) {
+ HWS_SET_HDR(fc, match_param, IP_FRAG_O,
+ outer_headers.frag, eth_l4_outer.ip_fragmented);
+ } else {
+ HWS_SET_HDR(fc, match_param, IP_FRAG_O,
+ outer_headers.frag, eth_l2_src_outer.ip_fragmented);
+ }
+ }
+
+ /* L3_type set */
+ if (HWS_IS_FLD_SET(match_param, outer_headers.ip_version)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_O];
+ HWS_CALC_HDR_DST(curr_fc, eth_l2_outer.l3_type);
+ curr_fc->tag_set = &hws_definer_l3_type_set;
+ curr_fc->tag_mask_set = &hws_definer_ones_set;
+ HWS_CALC_HDR_SRC(curr_fc, outer_headers.ip_version);
+ }
+
+ return 0;
+}
+
+static int
+hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd,
+ u32 *match_param)
+{
+ bool is_s_ipv6, is_d_ipv6, smac_set, dmac_set;
+ struct mlx5hws_definer_fc *fc = cd->fc;
+ struct mlx5hws_definer_fc *curr_fc;
+ u32 *s_ipv6, *d_ipv6;
+
+ if (HWS_IS_FLD_SET_SZ(match_param, inner_headers.l4_type, 0x2) ||
+ HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_c2, 0xe) ||
+ HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_c4, 0x4)) {
+ mlx5hws_err(cd->ctx, "Unsupported inner parameters set\n");
+ return -EINVAL;
+ }
+
+ /* L2 Check ethertype */
+ HWS_SET_HDR(fc, match_param, ETH_TYPE_I,
+ inner_headers.ethertype,
+ eth_l2_inner.l3_ethertype);
+ /* L2 Check SMAC 47_16 */
+ HWS_SET_HDR(fc, match_param, ETH_SMAC_47_16_I,
+ inner_headers.smac_47_16, eth_l2_src_inner.smac_47_16);
+ /* L2 Check SMAC 15_0 */
+ HWS_SET_HDR(fc, match_param, ETH_SMAC_15_0_I,
+ inner_headers.smac_15_0, eth_l2_src_inner.smac_15_0);
+ /* L2 Check DMAC 47_16 */
+ HWS_SET_HDR(fc, match_param, ETH_DMAC_47_16_I,
+ inner_headers.dmac_47_16, eth_l2_inner.dmac_47_16);
+ /* L2 Check DMAC 15_0 */
+ HWS_SET_HDR(fc, match_param, ETH_DMAC_15_0_I,
+ inner_headers.dmac_15_0, eth_l2_inner.dmac_15_0);
+
+ /* L2 VLAN */
+ HWS_SET_HDR(fc, match_param, VLAN_FIRST_PRIO_I,
+ inner_headers.first_prio, eth_l2_inner.first_priority);
+ HWS_SET_HDR(fc, match_param, VLAN_CFI_I,
+ inner_headers.first_cfi, eth_l2_inner.first_cfi);
+ HWS_SET_HDR(fc, match_param, VLAN_ID_I,
+ inner_headers.first_vid, eth_l2_inner.first_vlan_id);
+
+ /* L2 CVLAN and SVLAN */
+ if (HWS_GET_MATCH_PARAM(match_param, inner_headers.cvlan_tag) ||
+ HWS_GET_MATCH_PARAM(match_param, inner_headers.svlan_tag)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_TYPE_I];
+ HWS_CALC_HDR_DST(curr_fc, eth_l2_inner.first_vlan_qualifier);
+ curr_fc->tag_set = &hws_definer_inner_vlan_type_set;
+ curr_fc->tag_mask_set = &hws_definer_ones_set;
+ }
+ /* L3 Check IP header */
+ HWS_SET_HDR(fc, match_param, IP_PROTOCOL_I,
+ inner_headers.ip_protocol,
+ eth_l3_inner.protocol_next_header);
+ HWS_SET_HDR(fc, match_param, IP_VERSION_I,
+ inner_headers.ip_version,
+ eth_l3_inner.ip_version);
+ HWS_SET_HDR(fc, match_param, IP_TTL_I,
+ inner_headers.ttl_hoplimit,
+ eth_l3_inner.time_to_live_hop_limit);
+
+ /* L3 Check IPv4/IPv6 addresses */
+ s_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
+ inner_headers.src_ipv4_src_ipv6.ipv6_layout);
+ d_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_layout);
+
+ /* Assume IPv6 is used if ipv6 bits are set */
+ is_s_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2];
+ is_d_ipv6 = d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
+
+ if (is_s_ipv6) {
+ /* Handle IPv6 source address */
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_I,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96,
+ ipv6_src_inner.ipv6_address_127_96);
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_95_64_I,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64,
+ ipv6_src_inner.ipv6_address_95_64);
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_63_32_I,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32,
+ ipv6_src_inner.ipv6_address_63_32);
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_I,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv6_src_inner.ipv6_address_31_0);
+ } else {
+ /* Handle IPv4 source address */
+ HWS_SET_HDR(fc, match_param, IPV4_SRC_I,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv4_src_dest_inner.source_address);
+ }
+ if (is_d_ipv6) {
+ /* Handle IPv6 destination address */
+ HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_I,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96,
+ ipv6_dst_inner.ipv6_address_127_96);
+ HWS_SET_HDR(fc, match_param, IPV6_DST_95_64_I,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64,
+ ipv6_dst_inner.ipv6_address_95_64);
+ HWS_SET_HDR(fc, match_param, IPV6_DST_63_32_I,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32,
+ ipv6_dst_inner.ipv6_address_63_32);
+ HWS_SET_HDR(fc, match_param, IPV6_DST_31_0_I,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv6_dst_inner.ipv6_address_31_0);
+ } else {
+ /* Handle IPv4 destination address */
+ HWS_SET_HDR(fc, match_param, IPV4_DST_I,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv4_src_dest_inner.destination_address);
+ }
+
+ /* L4 Handle TCP/UDP */
+ HWS_SET_HDR(fc, match_param, L4_SPORT_I,
+ inner_headers.tcp_sport, eth_l4_inner.source_port);
+ HWS_SET_HDR(fc, match_param, L4_DPORT_I,
+ inner_headers.tcp_dport, eth_l4_inner.destination_port);
+ HWS_SET_HDR(fc, match_param, L4_SPORT_I,
+ inner_headers.udp_sport, eth_l4_inner.source_port);
+ HWS_SET_HDR(fc, match_param, L4_DPORT_I,
+ inner_headers.udp_dport, eth_l4_inner.destination_port);
+ HWS_SET_HDR(fc, match_param, TCP_FLAGS_I,
+ inner_headers.tcp_flags, eth_l4_inner.tcp_flags);
+
+ /* L3 Handle DSCP, ECN and IHL */
+ HWS_SET_HDR(fc, match_param, IP_DSCP_I,
+ inner_headers.ip_dscp, eth_l3_inner.dscp);
+ HWS_SET_HDR(fc, match_param, IP_ECN_I,
+ inner_headers.ip_ecn, eth_l3_inner.ecn);
+ HWS_SET_HDR(fc, match_param, IPV4_IHL_I,
+ inner_headers.ipv4_ihl, eth_l3_inner.ihl);
+
+ /* Set IP fragmented bit */
+ if (HWS_IS_FLD_SET(match_param, inner_headers.frag)) {
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.vxlan_vni)) {
+ HWS_SET_HDR(fc, match_param, IP_FRAG_I,
+ inner_headers.frag, eth_l2_inner.ip_fragmented);
+ } else {
+ smac_set = HWS_IS_FLD_SET(match_param, inner_headers.smac_15_0) ||
+ HWS_IS_FLD_SET(match_param, inner_headers.smac_47_16);
+ dmac_set = HWS_IS_FLD_SET(match_param, inner_headers.dmac_15_0) ||
+ HWS_IS_FLD_SET(match_param, inner_headers.dmac_47_16);
+ if (smac_set == dmac_set) {
+ HWS_SET_HDR(fc, match_param, IP_FRAG_I,
+ inner_headers.frag, eth_l4_inner.ip_fragmented);
+ } else {
+ HWS_SET_HDR(fc, match_param, IP_FRAG_I,
+ inner_headers.frag, eth_l2_src_inner.ip_fragmented);
+ }
+ }
+ }
+
+ /* L3_type set */
+ if (HWS_IS_FLD_SET(match_param, inner_headers.ip_version)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_I];
+ HWS_CALC_HDR_DST(curr_fc, eth_l2_inner.l3_type);
+ curr_fc->tag_set = &hws_definer_l3_type_set;
+ curr_fc->tag_mask_set = &hws_definer_ones_set;
+ HWS_CALC_HDR_SRC(curr_fc, inner_headers.ip_version);
+ }
+
+ return 0;
+}
+
+static int
+hws_definer_conv_misc(struct mlx5hws_definer_conv_data *cd,
+ u32 *match_param)
+{
+ struct mlx5hws_cmd_query_caps *caps = cd->ctx->caps;
+ struct mlx5hws_definer_fc *fc = cd->fc;
+ struct mlx5hws_definer_fc *curr_fc;
+
+ if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_1, 0x1) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_64, 0xc) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_d8, 0x6) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_e0, 0xc) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_100, 0xc) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_120, 0xa) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_140, 0x8) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters.bth_dst_qp) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters.bth_opcode) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters.inner_esp_spi) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters.outer_esp_spi) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters.source_vhca_port) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_1a0, 0x60)) {
+ mlx5hws_err(cd->ctx, "Unsupported misc parameters set\n");
+ return -EINVAL;
+ }
+
+ /* Check GRE related fields */
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_c_present)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_C];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.gre_c_present,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_gre, gre_c_present);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_c_present);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_k_present)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_K];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.gre_k_present,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_gre, gre_k_present);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_k_present);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_s_present)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_S];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.gre_s_present,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_gre, gre_s_present);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_s_present);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_protocol)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.gre_protocol,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_gre, gre_protocol);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_protocol);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_key.key)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE_OPT_KEY;
+ HWS_SET_HDR(fc, match_param, GRE_OPT_KEY,
+ misc_parameters.gre_key.key, tunnel_header.tunnel_header_2);
+ }
+
+ /* Check GENEVE related fields */
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_vni)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_VNI];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.geneve_vni,
+ tunnel_header.tunnel_header_1);
+ curr_fc->bit_mask = __mlx5_mask(header_geneve, vni);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, vni);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_opt_len)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_OPT_LEN];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.geneve_opt_len,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_geneve, opt_len);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, opt_len);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_protocol_type)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_PROTO];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.geneve_protocol_type,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_geneve, protocol_type);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, protocol_type);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_oam)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_OAM];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.geneve_oam,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_geneve, o_flag);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, o_flag);
+ }
+
+ HWS_SET_HDR(fc, match_param, SOURCE_QP,
+ misc_parameters.source_sqn, source_qp_gvmi.source_qp);
+ HWS_SET_HDR(fc, match_param, IPV6_FLOW_LABEL_O,
+ misc_parameters.outer_ipv6_flow_label, eth_l3_outer.flow_label);
+ HWS_SET_HDR(fc, match_param, IPV6_FLOW_LABEL_I,
+ misc_parameters.inner_ipv6_flow_label, eth_l3_inner.flow_label);
+
+ /* L2 Second VLAN */
+ HWS_SET_HDR(fc, match_param, VLAN_SECOND_PRIO_O,
+ misc_parameters.outer_second_prio, eth_l2_outer.second_priority);
+ HWS_SET_HDR(fc, match_param, VLAN_SECOND_PRIO_I,
+ misc_parameters.inner_second_prio, eth_l2_inner.second_priority);
+ HWS_SET_HDR(fc, match_param, VLAN_SECOND_CFI_O,
+ misc_parameters.outer_second_cfi, eth_l2_outer.second_cfi);
+ HWS_SET_HDR(fc, match_param, VLAN_SECOND_CFI_I,
+ misc_parameters.inner_second_cfi, eth_l2_inner.second_cfi);
+ HWS_SET_HDR(fc, match_param, VLAN_SECOND_ID_O,
+ misc_parameters.outer_second_vid, eth_l2_outer.second_vlan_id);
+ HWS_SET_HDR(fc, match_param, VLAN_SECOND_ID_I,
+ misc_parameters.inner_second_vid, eth_l2_inner.second_vlan_id);
+
+ /* L2 Second CVLAN and SVLAN */
+ if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_cvlan_tag) ||
+ HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_svlan_tag)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_O];
+ HWS_CALC_HDR_DST(curr_fc, eth_l2_outer.second_vlan_qualifier);
+ curr_fc->tag_set = &hws_definer_outer_second_vlan_type_set;
+ curr_fc->tag_mask_set = &hws_definer_ones_set;
+ }
+
+ if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_cvlan_tag) ||
+ HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_svlan_tag)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_I];
+ HWS_CALC_HDR_DST(curr_fc, eth_l2_inner.second_vlan_qualifier);
+ curr_fc->tag_set = &hws_definer_inner_second_vlan_type_set;
+ curr_fc->tag_mask_set = &hws_definer_ones_set;
+ }
+
+ /* VXLAN VNI */
+ if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.vxlan_vni)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_VNI];
+ HWS_CALC_HDR(curr_fc, misc_parameters.vxlan_vni, tunnel_header.tunnel_header_1);
+ curr_fc->bit_mask = __mlx5_mask(header_vxlan, vni);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan, vni);
+ }
+
+ /* Flex protocol steering ok bits */
+ if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.geneve_tlv_option_0_exist)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
+
+ if (!caps->flex_parser_ok_bits_supp) {
+ mlx5hws_err(cd->ctx, "Unsupported flex_parser_ok_bits_supp capability\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = hws_definer_flex_parser_steering_ok_bits_handler(
+ cd, caps->flex_parser_id_geneve_tlv_option_0);
+ if (!curr_fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters.geneve_tlv_option_0_exist);
+ }
+
+ if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.source_port)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_SOURCE_GVMI];
+ HWS_CALC_HDR_DST(curr_fc, source_qp_gvmi.source_gvmi);
+ curr_fc->tag_mask_set = &hws_definer_ones_set;
+ curr_fc->tag_set = HWS_IS_FLD_SET(match_param,
+ misc_parameters.source_eswitch_owner_vhca_id) ?
+ &hws_definer_set_source_gvmi_vhca_id :
+ &hws_definer_set_source_gvmi;
+ } else {
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.source_eswitch_owner_vhca_id)) {
+ mlx5hws_err(cd->ctx,
+ "Unsupported source_eswitch_owner_vhca_id field usage\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int
+hws_definer_conv_misc2(struct mlx5hws_definer_conv_data *cd,
+ u32 *match_param)
+{
+ struct mlx5hws_cmd_query_caps *caps = cd->ctx->caps;
+ struct mlx5hws_definer_fc *fc = cd->fc;
+ struct mlx5hws_definer_fc *curr_fc;
+
+ if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1a0, 0x8) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1b8, 0x8) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1c0, 0x40) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_2.macsec_syndrome) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_2.ipsec_syndrome)) {
+ mlx5hws_err(cd->ctx, "Unsupported misc2 parameters set\n");
+ return -EINVAL;
+ }
+
+ HWS_SET_HDR(fc, match_param, MPLS0_O,
+ misc_parameters_2.outer_first_mpls, mpls_outer.mpls0_label);
+ HWS_SET_HDR(fc, match_param, MPLS0_I,
+ misc_parameters_2.inner_first_mpls, mpls_inner.mpls0_label);
+ HWS_SET_HDR(fc, match_param, REG_0,
+ misc_parameters_2.metadata_reg_c_0, registers.register_c_0);
+ HWS_SET_HDR(fc, match_param, REG_1,
+ misc_parameters_2.metadata_reg_c_1, registers.register_c_1);
+ HWS_SET_HDR(fc, match_param, REG_2,
+ misc_parameters_2.metadata_reg_c_2, registers.register_c_2);
+ HWS_SET_HDR(fc, match_param, REG_3,
+ misc_parameters_2.metadata_reg_c_3, registers.register_c_3);
+ HWS_SET_HDR(fc, match_param, REG_4,
+ misc_parameters_2.metadata_reg_c_4, registers.register_c_4);
+ HWS_SET_HDR(fc, match_param, REG_5,
+ misc_parameters_2.metadata_reg_c_5, registers.register_c_5);
+ HWS_SET_HDR(fc, match_param, REG_6,
+ misc_parameters_2.metadata_reg_c_6, registers.register_c_6);
+ HWS_SET_HDR(fc, match_param, REG_7,
+ misc_parameters_2.metadata_reg_c_7, registers.register_c_7);
+ HWS_SET_HDR(fc, match_param, REG_A,
+ misc_parameters_2.metadata_reg_a, metadata.general_purpose);
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_2.outer_first_mpls_over_gre)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_GRE;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported misc2 first mpls over gre parameters set\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = hws_definer_flex_parser_handler(cd, caps->flex_parser_id_mpls_over_gre);
+ if (!curr_fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters_2.outer_first_mpls_over_gre);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_2.outer_first_mpls_over_udp)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_UDP;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported misc2 first mpls over udp parameters set\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = hws_definer_flex_parser_handler(cd, caps->flex_parser_id_mpls_over_udp);
+ if (!curr_fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters_2.outer_first_mpls_over_udp);
+ }
+
+ return 0;
+}
+
+static int
+hws_definer_conv_misc3(struct mlx5hws_definer_conv_data *cd, u32 *match_param)
+{
+ struct mlx5hws_cmd_query_caps *caps = cd->ctx->caps;
+ struct mlx5hws_definer_fc *fc = cd->fc;
+ struct mlx5hws_definer_fc *curr_fc;
+ bool vxlan_gpe_flex_parser_enabled;
+
+ /* Check reserved and unsupported fields */
+ if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_80, 0x8) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_b0, 0x10) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_170, 0x10) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_1e0, 0x20)) {
+ mlx5hws_err(cd->ctx, "Unsupported misc3 parameters set\n");
+ return -EINVAL;
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.inner_tcp_seq_num) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.inner_tcp_ack_num)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TCP_I;
+ HWS_SET_HDR(fc, match_param, TCP_SEQ_NUM,
+ misc_parameters_3.inner_tcp_seq_num, tcp_icmp.tcp_seq);
+ HWS_SET_HDR(fc, match_param, TCP_ACK_NUM,
+ misc_parameters_3.inner_tcp_ack_num, tcp_icmp.tcp_ack);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_tcp_seq_num) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_tcp_ack_num)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TCP_O;
+ HWS_SET_HDR(fc, match_param, TCP_SEQ_NUM,
+ misc_parameters_3.outer_tcp_seq_num, tcp_icmp.tcp_seq);
+ HWS_SET_HDR(fc, match_param, TCP_ACK_NUM,
+ misc_parameters_3.outer_tcp_ack_num, tcp_icmp.tcp_ack);
+ }
+
+ vxlan_gpe_flex_parser_enabled = caps->flex_protocols & MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED;
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_vxlan_gpe_vni)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE;
+
+ if (!vxlan_gpe_flex_parser_enabled) {
+ mlx5hws_err(cd->ctx, "Unsupported VXLAN GPE flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_GPE_VNI];
+ HWS_CALC_HDR(curr_fc, misc_parameters_3.outer_vxlan_gpe_vni,
+ tunnel_header.tunnel_header_1);
+ curr_fc->bit_mask = __mlx5_mask(header_vxlan_gpe, vni);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, vni);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_vxlan_gpe_next_protocol)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE;
+
+ if (!vxlan_gpe_flex_parser_enabled) {
+ mlx5hws_err(cd->ctx, "Unsupported VXLAN GPE flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_GPE_PROTO];
+ HWS_CALC_HDR(curr_fc, misc_parameters_3.outer_vxlan_gpe_next_protocol,
+ tunnel_header.tunnel_header_0);
+ curr_fc->byte_off += MLX5_BYTE_OFF(header_vxlan_gpe, protocol);
+ curr_fc->bit_mask = __mlx5_mask(header_vxlan_gpe, protocol);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, protocol);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_vxlan_gpe_flags)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE;
+
+ if (!vxlan_gpe_flex_parser_enabled) {
+ mlx5hws_err(cd->ctx, "Unsupported VXLAN GPE flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_GPE_FLAGS];
+ HWS_CALC_HDR(curr_fc, misc_parameters_3.outer_vxlan_gpe_flags,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_vxlan_gpe, flags);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, flags);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_header_data) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_type) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_code)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_ICMPV4;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported ICMPv4 flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ HWS_SET_HDR(fc, match_param, ICMP_DW3,
+ misc_parameters_3.icmp_header_data, tcp_icmp.icmp_dw3);
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_type) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_code)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ICMP_DW1];
+ HWS_CALC_HDR_DST(curr_fc, tcp_icmp.icmp_dw1);
+ curr_fc->tag_set = &hws_definer_icmp_dw1_set;
+ }
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_header_data) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_type) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_code)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_ICMPV6;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported ICMPv6 parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ HWS_SET_HDR(fc, match_param, ICMP_DW3,
+ misc_parameters_3.icmpv6_header_data, tcp_icmp.icmp_dw3);
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_type) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_code)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ICMP_DW1];
+ HWS_CALC_HDR_DST(curr_fc, tcp_icmp.icmp_dw1);
+ curr_fc->tag_set = &hws_definer_icmpv6_dw1_set;
+ }
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.geneve_tlv_option_0_data)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
+
+ curr_fc =
+ hws_definer_flex_parser_handler(cd,
+ caps->flex_parser_id_geneve_tlv_option_0);
+ if (!curr_fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters_3.geneve_tlv_option_0_data);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_teid)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_TEID_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported GTPU TEID flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_GTP_TEID];
+ fc->tag_set = &hws_definer_generic_set;
+ fc->bit_mask = __mlx5_mask(header_gtp, teid);
+ fc->byte_off = caps->format_select_gtpu_dw_1 * DW_SIZE;
+ HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_teid);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_msg_type)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported GTPU flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE];
+ fc->tag_set = &hws_definer_generic_set;
+ fc->bit_mask = __mlx5_mask(header_gtp, msg_type);
+ fc->bit_off = __mlx5_dw_bit_off(header_gtp, msg_type);
+ fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
+ HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_msg_type);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_msg_flags)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported GTPU flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE];
+ fc->tag_set = &hws_definer_generic_set;
+ fc->bit_mask = __mlx5_mask(header_gtp, msg_flags);
+ fc->bit_off = __mlx5_dw_bit_off(header_gtp, msg_flags);
+ fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
+ HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_msg_flags);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_dw_2)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported GTPU DW2 flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GTPU_DW2];
+ curr_fc->tag_set = &hws_definer_generic_set;
+ curr_fc->bit_mask = -1;
+ curr_fc->byte_off = caps->format_select_gtpu_dw_2 * DW_SIZE;
+ HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_dw_2);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_first_ext_dw_0)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported GTPU first EXT DW0 flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GTPU_FIRST_EXT_DW0];
+ curr_fc->tag_set = &hws_definer_generic_set;
+ curr_fc->bit_mask = -1;
+ curr_fc->byte_off = caps->format_select_gtpu_ext_dw_0 * DW_SIZE;
+ HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_first_ext_dw_0);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_dw_0)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported GTPU DW0 flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GTPU_DW0];
+ curr_fc->tag_set = &hws_definer_generic_set;
+ curr_fc->bit_mask = -1;
+ curr_fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
+ HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_dw_0);
+ }
+
+ return 0;
+}
+
+static int
+hws_definer_conv_misc4(struct mlx5hws_definer_conv_data *cd,
+ u32 *match_param)
+{
+ bool parser_is_used[HWS_NUM_OF_FLEX_PARSERS] = {};
+ struct mlx5hws_definer_fc *fc;
+ u32 id, value;
+
+ if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_4.reserved_at_100, 0x100)) {
+ mlx5hws_err(cd->ctx, "Unsupported misc4 parameters set\n");
+ return -EINVAL;
+ }
+
+ id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_0);
+ value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_0);
+ fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
+ if (!fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_0);
+
+ id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_1);
+ value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_1);
+ fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
+ if (!fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_1);
+
+ id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_2);
+ value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_2);
+ fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
+ if (!fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_2);
+
+ id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_3);
+ value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_3);
+ fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
+ if (!fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_3);
+
+ return 0;
+}
+
+static int
+hws_definer_conv_misc5(struct mlx5hws_definer_conv_data *cd,
+ u32 *match_param)
+{
+ struct mlx5hws_definer_fc *fc = cd->fc;
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_0) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_1) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_2) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_3) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters_5.reserved_at_100, 0x100)) {
+ mlx5hws_err(cd->ctx, "Unsupported misc5 parameters set\n");
+ return -EINVAL;
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_5.tunnel_header_0)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1;
+ HWS_SET_HDR(fc, match_param, TNL_HDR_0,
+ misc_parameters_5.tunnel_header_0, tunnel_header.tunnel_header_0);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_5.tunnel_header_1)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1;
+ HWS_SET_HDR(fc, match_param, TNL_HDR_1,
+ misc_parameters_5.tunnel_header_1, tunnel_header.tunnel_header_1);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_5.tunnel_header_2)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_2;
+ HWS_SET_HDR(fc, match_param, TNL_HDR_2,
+ misc_parameters_5.tunnel_header_2, tunnel_header.tunnel_header_2);
+ }
+
+ HWS_SET_HDR(fc, match_param, TNL_HDR_3,
+ misc_parameters_5.tunnel_header_3, tunnel_header.tunnel_header_3);
+
+ return 0;
+}
+
+static int hws_definer_get_fc_size(struct mlx5hws_definer_fc *fc)
+{
+ u32 fc_sz = 0;
+ int i;
+
+ /* For empty matcher, ZERO_SIZE_PTR is returned */
+ if (fc == ZERO_SIZE_PTR)
+ return 0;
+
+ for (i = 0; i < MLX5HWS_DEFINER_FNAME_MAX; i++)
+ if (fc[i].tag_set)
+ fc_sz++;
+ return fc_sz;
+}
+
+static struct mlx5hws_definer_fc *
+hws_definer_alloc_compressed_fc(struct mlx5hws_definer_fc *fc)
+{
+ struct mlx5hws_definer_fc *compressed_fc = NULL;
+ u32 definer_size = hws_definer_get_fc_size(fc);
+ u32 fc_sz = 0;
+ int i;
+
+ compressed_fc = kcalloc(definer_size, sizeof(*compressed_fc), GFP_KERNEL);
+ if (!compressed_fc)
+ return NULL;
+
+ /* For empty matcher, ZERO_SIZE_PTR is returned */
+ if (!definer_size)
+ return compressed_fc;
+
+ for (i = 0, fc_sz = 0; i < MLX5HWS_DEFINER_FNAME_MAX; i++) {
+ if (!fc[i].tag_set)
+ continue;
+
+ fc[i].fname = i;
+ memcpy(&compressed_fc[fc_sz++], &fc[i], sizeof(*compressed_fc));
+ }
+
+ return compressed_fc;
+}
+
+static void
+hws_definer_set_hl(u8 *hl, struct mlx5hws_definer_fc *fc)
+{
+ int i;
+
+ /* nothing to do for empty matcher */
+ if (fc == ZERO_SIZE_PTR)
+ return;
+
+ for (i = 0; i < MLX5HWS_DEFINER_FNAME_MAX; i++) {
+ if (!fc[i].tag_set)
+ continue;
+
+ HWS_SET32(hl, -1, fc[i].byte_off, fc[i].bit_off, fc[i].bit_mask);
+ }
+}
+
+static struct mlx5hws_definer_fc *
+hws_definer_alloc_fc(struct mlx5hws_context *ctx,
+ size_t len)
+{
+ struct mlx5hws_definer_fc *fc;
+ int i;
+
+ fc = kcalloc(len, sizeof(*fc), GFP_KERNEL);
+ if (!fc)
+ return NULL;
+
+ for (i = 0; i < len; i++)
+ fc[i].ctx = ctx;
+
+ return fc;
+}
+
+static int
+hws_definer_conv_match_params_to_hl(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt,
+ u8 *hl)
+{
+ struct mlx5hws_definer_conv_data cd = {0};
+ struct mlx5hws_definer_fc *fc;
+ int ret;
+
+ fc = hws_definer_alloc_fc(ctx, MLX5HWS_DEFINER_FNAME_MAX);
+ if (!fc)
+ return -ENOMEM;
+
+ cd.fc = fc;
+ cd.ctx = ctx;
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC6) {
+ mlx5hws_err(ctx, "Unsupported match_criteria_enable provided\n");
+ ret = -EOPNOTSUPP;
+ goto err_free_fc;
+ }
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_OUTER) {
+ ret = hws_definer_conv_outer(&cd, mt->match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_INNER) {
+ ret = hws_definer_conv_inner(&cd, mt->match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC) {
+ ret = hws_definer_conv_misc(&cd, mt->match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2) {
+ ret = hws_definer_conv_misc2(&cd, mt->match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC3) {
+ ret = hws_definer_conv_misc3(&cd, mt->match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC4) {
+ ret = hws_definer_conv_misc4(&cd, mt->match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC5) {
+ ret = hws_definer_conv_misc5(&cd, mt->match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ /* Check there is no conflicted fields set together */
+ ret = hws_definer_check_match_flags(&cd);
+ if (ret)
+ goto err_free_fc;
+
+ /* Allocate fc array on mt */
+ mt->fc = hws_definer_alloc_compressed_fc(fc);
+ if (!mt->fc) {
+ mlx5hws_err(ctx,
+ "Convert match params: failed to set field copy to match template\n");
+ ret = -ENOMEM;
+ goto err_free_fc;
+ }
+ mt->fc_sz = hws_definer_get_fc_size(fc);
+
+ /* Fill in headers layout */
+ hws_definer_set_hl(hl, fc);
+
+ kfree(fc);
+ return 0;
+
+err_free_fc:
+ kfree(fc);
+ return ret;
+}
+
+struct mlx5hws_definer_fc *
+mlx5hws_definer_conv_match_params_to_compressed_fc(struct mlx5hws_context *ctx,
+ u8 match_criteria_enable,
+ u32 *match_param,
+ int *fc_sz)
+{
+ struct mlx5hws_definer_fc *compressed_fc = NULL;
+ struct mlx5hws_definer_conv_data cd = {0};
+ struct mlx5hws_definer_fc *fc;
+ int ret;
+
+ fc = hws_definer_alloc_fc(ctx, MLX5HWS_DEFINER_FNAME_MAX);
+ if (!fc)
+ return NULL;
+
+ cd.fc = fc;
+ cd.ctx = ctx;
+
+ if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_OUTER) {
+ ret = hws_definer_conv_outer(&cd, match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_INNER) {
+ ret = hws_definer_conv_inner(&cd, match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC) {
+ ret = hws_definer_conv_misc(&cd, match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2) {
+ ret = hws_definer_conv_misc2(&cd, match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC3) {
+ ret = hws_definer_conv_misc3(&cd, match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC4) {
+ ret = hws_definer_conv_misc4(&cd, match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC5) {
+ ret = hws_definer_conv_misc5(&cd, match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ /* Allocate fc array on mt */
+ compressed_fc = hws_definer_alloc_compressed_fc(fc);
+ if (!compressed_fc) {
+ mlx5hws_err(ctx,
+ "Convert to compressed fc: failed to set field copy to match template\n");
+ goto err_free_fc;
+ }
+ *fc_sz = hws_definer_get_fc_size(fc);
+
+err_free_fc:
+ kfree(fc);
+ return compressed_fc;
+}
+
+static int
+hws_definer_find_byte_in_tag(struct mlx5hws_definer *definer,
+ u32 hl_byte_off,
+ u32 *tag_byte_off)
+{
+ int i, dw_to_scan;
+ u8 byte_offset;
+
+ /* Avoid accessing unused DW selectors */
+ dw_to_scan = mlx5hws_definer_is_jumbo(definer) ?
+ DW_SELECTORS : DW_SELECTORS_MATCH;
+
+ /* Add offset since each DW covers multiple BYTEs */
+ byte_offset = hl_byte_off % DW_SIZE;
+ for (i = 0; i < dw_to_scan; i++) {
+ if (definer->dw_selector[i] == hl_byte_off / DW_SIZE) {
+ *tag_byte_off = byte_offset + DW_SIZE * (DW_SELECTORS - i - 1);
+ return 0;
+ }
+ }
+
+ /* Add offset to skip DWs in definer */
+ byte_offset = DW_SIZE * DW_SELECTORS;
+ /* Iterate in reverse since the code uses bytes from 7 -> 0 */
+ for (i = BYTE_SELECTORS; i-- > 0 ;) {
+ if (definer->byte_selector[i] == hl_byte_off) {
+ *tag_byte_off = byte_offset + (BYTE_SELECTORS - i - 1);
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int
+hws_definer_fc_bind(struct mlx5hws_definer *definer,
+ struct mlx5hws_definer_fc *fc,
+ u32 fc_sz)
+{
+ u32 tag_offset = 0;
+ int ret, byte_diff;
+ u32 i;
+
+ for (i = 0; i < fc_sz; i++) {
+ /* Map header layout byte offset to byte offset in tag */
+ ret = hws_definer_find_byte_in_tag(definer, fc->byte_off, &tag_offset);
+ if (ret)
+ return ret;
+
+ /* Move setter based on the location in the definer */
+ byte_diff = fc->byte_off % DW_SIZE - tag_offset % DW_SIZE;
+ fc->bit_off = fc->bit_off + byte_diff * BITS_IN_BYTE;
+
+ /* Update offset in headers layout to offset in tag */
+ fc->byte_off = tag_offset;
+ fc++;
+ }
+
+ return 0;
+}
+
+static bool
+hws_definer_best_hl_fit_recu(struct mlx5hws_definer_sel_ctrl *ctrl,
+ u32 cur_dw,
+ u32 *data)
+{
+ u8 bytes_set;
+ int byte_idx;
+ bool ret;
+ int i;
+
+ /* Reached end, nothing left to do */
+ if (cur_dw == MLX5_ST_SZ_DW(definer_hl))
+ return true;
+
+ /* No data set, can skip to next DW */
+ while (!*data) {
+ cur_dw++;
+ data++;
+
+ /* Reached end, nothing left to do */
+ if (cur_dw == MLX5_ST_SZ_DW(definer_hl))
+ return true;
+ }
+
+ /* Used all DW selectors and Byte selectors, no possible solution */
+ if (ctrl->allowed_full_dw == ctrl->used_full_dw &&
+ ctrl->allowed_lim_dw == ctrl->used_lim_dw &&
+ ctrl->allowed_bytes == ctrl->used_bytes)
+ return false;
+
+ /* Try to use limited DW selectors */
+ if (ctrl->allowed_lim_dw > ctrl->used_lim_dw && cur_dw < 64) {
+ ctrl->lim_dw_selector[ctrl->used_lim_dw++] = cur_dw;
+
+ ret = hws_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
+ if (ret)
+ return ret;
+
+ ctrl->lim_dw_selector[--ctrl->used_lim_dw] = 0;
+ }
+
+ /* Try to use DW selectors */
+ if (ctrl->allowed_full_dw > ctrl->used_full_dw) {
+ ctrl->full_dw_selector[ctrl->used_full_dw++] = cur_dw;
+
+ ret = hws_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
+ if (ret)
+ return ret;
+
+ ctrl->full_dw_selector[--ctrl->used_full_dw] = 0;
+ }
+
+ /* No byte selector for offset bigger than 255 */
+ if (cur_dw * DW_SIZE > 255)
+ return false;
+
+ bytes_set = !!(0x000000ff & *data) +
+ !!(0x0000ff00 & *data) +
+ !!(0x00ff0000 & *data) +
+ !!(0xff000000 & *data);
+
+ /* Check if there are enough byte selectors left */
+ if (bytes_set + ctrl->used_bytes > ctrl->allowed_bytes)
+ return false;
+
+ /* Try to use Byte selectors */
+ for (i = 0; i < DW_SIZE; i++)
+ if ((0xff000000 >> (i * BITS_IN_BYTE)) & be32_to_cpu((__force __be32)*data)) {
+ /* Use byte selectors high to low */
+ byte_idx = ctrl->allowed_bytes - ctrl->used_bytes - 1;
+ ctrl->byte_selector[byte_idx] = cur_dw * DW_SIZE + i;
+ ctrl->used_bytes++;
+ }
+
+ ret = hws_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < DW_SIZE; i++)
+ if ((0xff << (i * BITS_IN_BYTE)) & be32_to_cpu((__force __be32)*data)) {
+ ctrl->used_bytes--;
+ byte_idx = ctrl->allowed_bytes - ctrl->used_bytes - 1;
+ ctrl->byte_selector[byte_idx] = 0;
+ }
+
+ return false;
+}
+
+static void
+hws_definer_copy_sel_ctrl(struct mlx5hws_definer_sel_ctrl *ctrl,
+ struct mlx5hws_definer *definer)
+{
+ memcpy(definer->byte_selector, ctrl->byte_selector, ctrl->allowed_bytes);
+ memcpy(definer->dw_selector, ctrl->full_dw_selector, ctrl->allowed_full_dw);
+ memcpy(definer->dw_selector + ctrl->allowed_full_dw,
+ ctrl->lim_dw_selector, ctrl->allowed_lim_dw);
+}
+
+static int
+hws_definer_find_best_match_fit(struct mlx5hws_context *ctx,
+ struct mlx5hws_definer *definer,
+ u8 *hl)
+{
+ struct mlx5hws_definer_sel_ctrl ctrl = {0};
+ bool found;
+
+ /* Try to create a match definer */
+ ctrl.allowed_full_dw = DW_SELECTORS_MATCH;
+ ctrl.allowed_lim_dw = 0;
+ ctrl.allowed_bytes = BYTE_SELECTORS;
+
+ found = hws_definer_best_hl_fit_recu(&ctrl, 0, (u32 *)hl);
+ if (found) {
+ hws_definer_copy_sel_ctrl(&ctrl, definer);
+ definer->type = MLX5HWS_DEFINER_TYPE_MATCH;
+ return 0;
+ }
+
+ /* Try to create a full/limited jumbo definer */
+ ctrl.allowed_full_dw = ctx->caps->full_dw_jumbo_support ? DW_SELECTORS :
+ DW_SELECTORS_MATCH;
+ ctrl.allowed_lim_dw = ctx->caps->full_dw_jumbo_support ? 0 :
+ DW_SELECTORS_LIMITED;
+ ctrl.allowed_bytes = BYTE_SELECTORS;
+
+ found = hws_definer_best_hl_fit_recu(&ctrl, 0, (u32 *)hl);
+ if (found) {
+ hws_definer_copy_sel_ctrl(&ctrl, definer);
+ definer->type = MLX5HWS_DEFINER_TYPE_JUMBO;
+ return 0;
+ }
+
+ return E2BIG;
+}
+
+static void
+hws_definer_create_tag_mask(u32 *match_param,
+ struct mlx5hws_definer_fc *fc,
+ u32 fc_sz,
+ u8 *tag)
+{
+ u32 i;
+
+ for (i = 0; i < fc_sz; i++) {
+ if (fc->tag_mask_set)
+ fc->tag_mask_set(fc, match_param, tag);
+ else
+ fc->tag_set(fc, match_param, tag);
+ fc++;
+ }
+}
+
+void mlx5hws_definer_create_tag(u32 *match_param,
+ struct mlx5hws_definer_fc *fc,
+ u32 fc_sz,
+ u8 *tag)
+{
+ u32 i;
+
+ for (i = 0; i < fc_sz; i++) {
+ fc->tag_set(fc, match_param, tag);
+ fc++;
+ }
+}
+
+int mlx5hws_definer_get_id(struct mlx5hws_definer *definer)
+{
+ return definer->obj_id;
+}
+
+int mlx5hws_definer_compare(struct mlx5hws_definer *definer_a,
+ struct mlx5hws_definer *definer_b)
+{
+ int i;
+
+ /* Future: Optimize by comparing selectors with valid mask only */
+ for (i = 0; i < BYTE_SELECTORS; i++)
+ if (definer_a->byte_selector[i] != definer_b->byte_selector[i])
+ return 1;
+
+ for (i = 0; i < DW_SELECTORS; i++)
+ if (definer_a->dw_selector[i] != definer_b->dw_selector[i])
+ return 1;
+
+ for (i = 0; i < MLX5HWS_JUMBO_TAG_SZ; i++)
+ if (definer_a->mask.jumbo[i] != definer_b->mask.jumbo[i])
+ return 1;
+
+ return 0;
+}
+
+int
+mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt,
+ struct mlx5hws_definer *match_definer)
+{
+ u8 *match_hl;
+ int ret;
+
+ /* Union header-layout (hl) is used for creating a single definer
+ * field layout used with different bitmasks for hash and match.
+ */
+ match_hl = kzalloc(MLX5_ST_SZ_BYTES(definer_hl), GFP_KERNEL);
+ if (!match_hl)
+ return -ENOMEM;
+
+ /* Convert all mt items to header layout (hl)
+ * and allocate the match and range field copy array (fc & fcr).
+ */
+ ret = hws_definer_conv_match_params_to_hl(ctx, mt, match_hl);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to convert items to header layout\n");
+ goto free_fc;
+ }
+
+ /* Find the match definer layout for header layout match union */
+ ret = hws_definer_find_best_match_fit(ctx, match_definer, match_hl);
+ if (ret) {
+ if (ret == E2BIG)
+ mlx5hws_dbg(ctx,
+ "Failed to create match definer from header layout - E2BIG\n");
+ else
+ mlx5hws_err(ctx,
+ "Failed to create match definer from header layout (%d)\n",
+ ret);
+ goto free_fc;
+ }
+
+ kfree(match_hl);
+ return 0;
+
+free_fc:
+ kfree(mt->fc);
+
+ kfree(match_hl);
+ return ret;
+}
+
+int mlx5hws_definer_init_cache(struct mlx5hws_definer_cache **cache)
+{
+ struct mlx5hws_definer_cache *new_cache;
+
+ new_cache = kzalloc(sizeof(*new_cache), GFP_KERNEL);
+ if (!new_cache)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&new_cache->list_head);
+ *cache = new_cache;
+
+ return 0;
+}
+
+void mlx5hws_definer_uninit_cache(struct mlx5hws_definer_cache *cache)
+{
+ kfree(cache);
+}
+
+int mlx5hws_definer_get_obj(struct mlx5hws_context *ctx,
+ struct mlx5hws_definer *definer)
+{
+ struct mlx5hws_definer_cache *cache = ctx->definer_cache;
+ struct mlx5hws_cmd_definer_create_attr def_attr = {0};
+ struct mlx5hws_definer_cache_item *cached_definer;
+ u32 obj_id;
+ int ret;
+
+ /* Search definer cache for requested definer */
+ list_for_each_entry(cached_definer, &cache->list_head, list_node) {
+ if (mlx5hws_definer_compare(&cached_definer->definer, definer))
+ continue;
+
+ /* Reuse definer and set LRU (move to be first in the list) */
+ list_del_init(&cached_definer->list_node);
+ list_add(&cached_definer->list_node, &cache->list_head);
+ cached_definer->refcount++;
+ return cached_definer->definer.obj_id;
+ }
+
+ /* Allocate and create definer based on the bitmask tag */
+ def_attr.match_mask = definer->mask.jumbo;
+ def_attr.dw_selector = definer->dw_selector;
+ def_attr.byte_selector = definer->byte_selector;
+
+ ret = mlx5hws_cmd_definer_create(ctx->mdev, &def_attr, &obj_id);
+ if (ret)
+ return -1;
+
+ cached_definer = kzalloc(sizeof(*cached_definer), GFP_KERNEL);
+ if (!cached_definer)
+ goto free_definer_obj;
+
+ memcpy(&cached_definer->definer, definer, sizeof(*definer));
+ cached_definer->definer.obj_id = obj_id;
+ cached_definer->refcount = 1;
+ list_add(&cached_definer->list_node, &cache->list_head);
+
+ return obj_id;
+
+free_definer_obj:
+ mlx5hws_cmd_definer_destroy(ctx->mdev, obj_id);
+ return -1;
+}
+
+static void
+hws_definer_put_obj(struct mlx5hws_context *ctx, u32 obj_id)
+{
+ struct mlx5hws_definer_cache_item *cached_definer;
+
+ list_for_each_entry(cached_definer, &ctx->definer_cache->list_head, list_node) {
+ if (cached_definer->definer.obj_id != obj_id)
+ continue;
+
+ /* Object found */
+ if (--cached_definer->refcount)
+ return;
+
+ list_del_init(&cached_definer->list_node);
+ mlx5hws_cmd_definer_destroy(ctx->mdev, cached_definer->definer.obj_id);
+ kfree(cached_definer);
+ return;
+ }
+
+ /* Programming error, object must be part of cache */
+ pr_warn("HWS: failed putting definer object\n");
+}
+
+static struct mlx5hws_definer *
+hws_definer_alloc(struct mlx5hws_context *ctx,
+ struct mlx5hws_definer_fc *fc,
+ int fc_sz,
+ u32 *match_param,
+ struct mlx5hws_definer *layout,
+ bool bind_fc)
+{
+ struct mlx5hws_definer *definer;
+ int ret;
+
+ definer = kmemdup(layout, sizeof(*definer), GFP_KERNEL);
+ if (!definer)
+ return NULL;
+
+ /* Align field copy array based on given layout */
+ if (bind_fc) {
+ ret = hws_definer_fc_bind(definer, fc, fc_sz);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to bind field copy to definer\n");
+ goto free_definer;
+ }
+ }
+
+ /* Create the tag mask used for definer creation */
+ hws_definer_create_tag_mask(match_param, fc, fc_sz, definer->mask.jumbo);
+
+ ret = mlx5hws_definer_get_obj(ctx, definer);
+ if (ret < 0)
+ goto free_definer;
+
+ definer->obj_id = ret;
+ return definer;
+
+free_definer:
+ kfree(definer);
+ return NULL;
+}
+
+void mlx5hws_definer_free(struct mlx5hws_context *ctx,
+ struct mlx5hws_definer *definer)
+{
+ hws_definer_put_obj(ctx, definer->obj_id);
+ kfree(definer);
+}
+
+static int
+hws_definer_mt_match_init(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt,
+ struct mlx5hws_definer *match_layout)
+{
+ /* Create mandatory match definer */
+ mt->definer = hws_definer_alloc(ctx,
+ mt->fc,
+ mt->fc_sz,
+ mt->match_param,
+ match_layout,
+ true);
+ if (!mt->definer) {
+ mlx5hws_err(ctx, "Failed to create match definer\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+hws_definer_mt_match_uninit(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt)
+{
+ mlx5hws_definer_free(ctx, mt->definer);
+}
+
+int mlx5hws_definer_mt_init(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt)
+{
+ struct mlx5hws_definer match_layout = {0};
+ int ret;
+
+ ret = mlx5hws_definer_calc_layout(ctx, mt, &match_layout);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to calculate matcher definer layout\n");
+ return ret;
+ }
+
+ /* Calculate definers needed for exact match */
+ ret = hws_definer_mt_match_init(ctx, mt, &match_layout);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to init match definers\n");
+ goto free_fc;
+ }
+
+ return 0;
+
+free_fc:
+ kfree(mt->fc);
+ return ret;
+}
+
+void mlx5hws_definer_mt_uninit(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt)
+{
+ hws_definer_mt_match_uninit(ctx, mt);
+ kfree(mt->fc);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.h
new file mode 100644
index 000000000000..2f6a7df4021c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_DEFINER_H_
+#define MLX5HWS_DEFINER_H_
+
+/* Max available selecotrs */
+#define DW_SELECTORS 9
+#define BYTE_SELECTORS 8
+
+/* Selectors based on match TAG */
+#define DW_SELECTORS_MATCH 6
+#define DW_SELECTORS_LIMITED 3
+
+/* Selectors based on range TAG */
+#define DW_SELECTORS_RANGE 2
+#define BYTE_SELECTORS_RANGE 8
+
+#define HWS_NUM_OF_FLEX_PARSERS 8
+
+enum mlx5hws_definer_fname {
+ MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_O,
+ MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_I,
+ MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_O,
+ MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_I,
+ MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_O,
+ MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_I,
+ MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_O,
+ MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_I,
+ MLX5HWS_DEFINER_FNAME_ETH_TYPE_O,
+ MLX5HWS_DEFINER_FNAME_ETH_TYPE_I,
+ MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_O,
+ MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_TYPE_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_TYPE_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_FIRST_PRIO_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_FIRST_PRIO_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_CFI_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_CFI_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_ID_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_ID_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_PRIO_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_PRIO_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_CFI_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_CFI_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_ID_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_ID_I,
+ MLX5HWS_DEFINER_FNAME_IPV4_IHL_O,
+ MLX5HWS_DEFINER_FNAME_IPV4_IHL_I,
+ MLX5HWS_DEFINER_FNAME_IP_DSCP_O,
+ MLX5HWS_DEFINER_FNAME_IP_DSCP_I,
+ MLX5HWS_DEFINER_FNAME_IP_ECN_O,
+ MLX5HWS_DEFINER_FNAME_IP_ECN_I,
+ MLX5HWS_DEFINER_FNAME_IP_TTL_O,
+ MLX5HWS_DEFINER_FNAME_IP_TTL_I,
+ MLX5HWS_DEFINER_FNAME_IPV4_DST_O,
+ MLX5HWS_DEFINER_FNAME_IPV4_DST_I,
+ MLX5HWS_DEFINER_FNAME_IPV4_SRC_O,
+ MLX5HWS_DEFINER_FNAME_IPV4_SRC_I,
+ MLX5HWS_DEFINER_FNAME_IP_VERSION_O,
+ MLX5HWS_DEFINER_FNAME_IP_VERSION_I,
+ MLX5HWS_DEFINER_FNAME_IP_FRAG_O,
+ MLX5HWS_DEFINER_FNAME_IP_FRAG_I,
+ MLX5HWS_DEFINER_FNAME_IP_LEN_O,
+ MLX5HWS_DEFINER_FNAME_IP_LEN_I,
+ MLX5HWS_DEFINER_FNAME_IP_TOS_O,
+ MLX5HWS_DEFINER_FNAME_IP_TOS_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_FLOW_LABEL_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_FLOW_LABEL_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_I,
+ MLX5HWS_DEFINER_FNAME_IP_PROTOCOL_O,
+ MLX5HWS_DEFINER_FNAME_IP_PROTOCOL_I,
+ MLX5HWS_DEFINER_FNAME_L4_SPORT_O,
+ MLX5HWS_DEFINER_FNAME_L4_SPORT_I,
+ MLX5HWS_DEFINER_FNAME_L4_DPORT_O,
+ MLX5HWS_DEFINER_FNAME_L4_DPORT_I,
+ MLX5HWS_DEFINER_FNAME_TCP_FLAGS_I,
+ MLX5HWS_DEFINER_FNAME_TCP_FLAGS_O,
+ MLX5HWS_DEFINER_FNAME_TCP_SEQ_NUM,
+ MLX5HWS_DEFINER_FNAME_TCP_ACK_NUM,
+ MLX5HWS_DEFINER_FNAME_GTP_TEID,
+ MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE,
+ MLX5HWS_DEFINER_FNAME_GTP_EXT_FLAG,
+ MLX5HWS_DEFINER_FNAME_GTP_NEXT_EXT_HDR,
+ MLX5HWS_DEFINER_FNAME_GTP_EXT_HDR_PDU,
+ MLX5HWS_DEFINER_FNAME_GTP_EXT_HDR_QFI,
+ MLX5HWS_DEFINER_FNAME_GTPU_DW0,
+ MLX5HWS_DEFINER_FNAME_GTPU_FIRST_EXT_DW0,
+ MLX5HWS_DEFINER_FNAME_GTPU_DW2,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7,
+ MLX5HWS_DEFINER_FNAME_VPORT_REG_C_0,
+ MLX5HWS_DEFINER_FNAME_VXLAN_FLAGS,
+ MLX5HWS_DEFINER_FNAME_VXLAN_VNI,
+ MLX5HWS_DEFINER_FNAME_VXLAN_GPE_FLAGS,
+ MLX5HWS_DEFINER_FNAME_VXLAN_GPE_RSVD0,
+ MLX5HWS_DEFINER_FNAME_VXLAN_GPE_PROTO,
+ MLX5HWS_DEFINER_FNAME_VXLAN_GPE_VNI,
+ MLX5HWS_DEFINER_FNAME_VXLAN_GPE_RSVD1,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_LEN,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OAM,
+ MLX5HWS_DEFINER_FNAME_GENEVE_PROTO,
+ MLX5HWS_DEFINER_FNAME_GENEVE_VNI,
+ MLX5HWS_DEFINER_FNAME_SOURCE_QP,
+ MLX5HWS_DEFINER_FNAME_SOURCE_GVMI,
+ MLX5HWS_DEFINER_FNAME_REG_0,
+ MLX5HWS_DEFINER_FNAME_REG_1,
+ MLX5HWS_DEFINER_FNAME_REG_2,
+ MLX5HWS_DEFINER_FNAME_REG_3,
+ MLX5HWS_DEFINER_FNAME_REG_4,
+ MLX5HWS_DEFINER_FNAME_REG_5,
+ MLX5HWS_DEFINER_FNAME_REG_6,
+ MLX5HWS_DEFINER_FNAME_REG_7,
+ MLX5HWS_DEFINER_FNAME_REG_8,
+ MLX5HWS_DEFINER_FNAME_REG_9,
+ MLX5HWS_DEFINER_FNAME_REG_10,
+ MLX5HWS_DEFINER_FNAME_REG_11,
+ MLX5HWS_DEFINER_FNAME_REG_A,
+ MLX5HWS_DEFINER_FNAME_REG_B,
+ MLX5HWS_DEFINER_FNAME_GRE_KEY_PRESENT,
+ MLX5HWS_DEFINER_FNAME_GRE_C,
+ MLX5HWS_DEFINER_FNAME_GRE_K,
+ MLX5HWS_DEFINER_FNAME_GRE_S,
+ MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL,
+ MLX5HWS_DEFINER_FNAME_GRE_OPT_KEY,
+ MLX5HWS_DEFINER_FNAME_GRE_OPT_SEQ,
+ MLX5HWS_DEFINER_FNAME_GRE_OPT_CHECKSUM,
+ MLX5HWS_DEFINER_FNAME_INTEGRITY_O,
+ MLX5HWS_DEFINER_FNAME_INTEGRITY_I,
+ MLX5HWS_DEFINER_FNAME_ICMP_DW1,
+ MLX5HWS_DEFINER_FNAME_ICMP_DW2,
+ MLX5HWS_DEFINER_FNAME_ICMP_DW3,
+ MLX5HWS_DEFINER_FNAME_IPSEC_SPI,
+ MLX5HWS_DEFINER_FNAME_IPSEC_SEQUENCE_NUMBER,
+ MLX5HWS_DEFINER_FNAME_IPSEC_SYNDROME,
+ MLX5HWS_DEFINER_FNAME_MPLS0_O,
+ MLX5HWS_DEFINER_FNAME_MPLS1_O,
+ MLX5HWS_DEFINER_FNAME_MPLS2_O,
+ MLX5HWS_DEFINER_FNAME_MPLS3_O,
+ MLX5HWS_DEFINER_FNAME_MPLS4_O,
+ MLX5HWS_DEFINER_FNAME_MPLS0_I,
+ MLX5HWS_DEFINER_FNAME_MPLS1_I,
+ MLX5HWS_DEFINER_FNAME_MPLS2_I,
+ MLX5HWS_DEFINER_FNAME_MPLS3_I,
+ MLX5HWS_DEFINER_FNAME_MPLS4_I,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER0_OK,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER1_OK,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER2_OK,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER3_OK,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER4_OK,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER5_OK,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER6_OK,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER7_OK,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS0_O,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS1_O,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS2_O,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS3_O,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS4_O,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS0_I,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS1_I,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS2_I,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS3_I,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS4_I,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_0,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_1,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_2,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_3,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_4,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_5,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_6,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_7,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_0,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_1,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_2,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_3,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_4,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_5,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_6,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_7,
+ MLX5HWS_DEFINER_FNAME_IB_L4_OPCODE,
+ MLX5HWS_DEFINER_FNAME_IB_L4_QPN,
+ MLX5HWS_DEFINER_FNAME_IB_L4_A,
+ MLX5HWS_DEFINER_FNAME_RANDOM_NUM,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L2_O,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L2_I,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L3_O,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L3_I,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L4_O,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L4_I,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L4_EXT_O,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L4_EXT_I,
+ MLX5HWS_DEFINER_FNAME_PTYPE_FRAG_O,
+ MLX5HWS_DEFINER_FNAME_PTYPE_FRAG_I,
+ MLX5HWS_DEFINER_FNAME_TNL_HDR_0,
+ MLX5HWS_DEFINER_FNAME_TNL_HDR_1,
+ MLX5HWS_DEFINER_FNAME_TNL_HDR_2,
+ MLX5HWS_DEFINER_FNAME_TNL_HDR_3,
+ MLX5HWS_DEFINER_FNAME_MAX,
+};
+
+enum mlx5hws_definer_match_criteria {
+ MLX5HWS_DEFINER_MATCH_CRITERIA_EMPTY = 0,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_OUTER = 1 << 0,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_MISC = 1 << 1,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_INNER = 1 << 2,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2 = 1 << 3,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_MISC3 = 1 << 4,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_MISC4 = 1 << 5,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_MISC5 = 1 << 6,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_MISC6 = 1 << 7,
+};
+
+enum mlx5hws_definer_type {
+ MLX5HWS_DEFINER_TYPE_MATCH,
+ MLX5HWS_DEFINER_TYPE_JUMBO,
+};
+
+enum mlx5hws_definer_match_flag {
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE = 1 << 0,
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE = 1 << 1,
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU = 1 << 2,
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE = 1 << 3,
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN = 1 << 4,
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1 = 1 << 5,
+
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE_OPT_KEY = 1 << 6,
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_2 = 1 << 7,
+
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_GRE = 1 << 8,
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_UDP = 1 << 9,
+
+ MLX5HWS_DEFINER_MATCH_FLAG_ICMPV4 = 1 << 10,
+ MLX5HWS_DEFINER_MATCH_FLAG_ICMPV6 = 1 << 11,
+ MLX5HWS_DEFINER_MATCH_FLAG_TCP_O = 1 << 12,
+ MLX5HWS_DEFINER_MATCH_FLAG_TCP_I = 1 << 13,
+};
+
+struct mlx5hws_definer_fc {
+ struct mlx5hws_context *ctx;
+ /* Source */
+ u32 s_byte_off;
+ int s_bit_off;
+ u32 s_bit_mask;
+ /* Destination */
+ u32 byte_off;
+ int bit_off;
+ u32 bit_mask;
+ enum mlx5hws_definer_fname fname;
+ void (*tag_set)(struct mlx5hws_definer_fc *fc,
+ void *mach_param,
+ u8 *tag);
+ void (*tag_mask_set)(struct mlx5hws_definer_fc *fc,
+ void *mach_param,
+ u8 *tag);
+};
+
+struct mlx5_ifc_definer_hl_eth_l2_bits {
+ u8 dmac_47_16[0x20];
+ u8 dmac_15_0[0x10];
+ u8 l3_ethertype[0x10];
+ u8 reserved_at_40[0x1];
+ u8 sx_sniffer[0x1];
+ u8 functional_lb[0x1];
+ u8 ip_fragmented[0x1];
+ u8 qp_type[0x2];
+ u8 encap_type[0x2];
+ u8 port_number[0x2];
+ u8 l3_type[0x2];
+ u8 l4_type_bwc[0x2];
+ u8 first_vlan_qualifier[0x2];
+ u8 first_priority[0x3];
+ u8 first_cfi[0x1];
+ u8 first_vlan_id[0xc];
+ u8 l4_type[0x4];
+ u8 reserved_at_64[0x2];
+ u8 ipsec_layer[0x2];
+ u8 l2_type[0x2];
+ u8 force_lb[0x1];
+ u8 l2_ok[0x1];
+ u8 l3_ok[0x1];
+ u8 l4_ok[0x1];
+ u8 second_vlan_qualifier[0x2];
+ u8 second_priority[0x3];
+ u8 second_cfi[0x1];
+ u8 second_vlan_id[0xc];
+};
+
+struct mlx5_ifc_definer_hl_eth_l2_src_bits {
+ u8 smac_47_16[0x20];
+ u8 smac_15_0[0x10];
+ u8 loopback_syndrome[0x8];
+ u8 l3_type[0x2];
+ u8 l4_type_bwc[0x2];
+ u8 first_vlan_qualifier[0x2];
+ u8 ip_fragmented[0x1];
+ u8 functional_lb[0x1];
+};
+
+struct mlx5_ifc_definer_hl_ib_l2_bits {
+ u8 sx_sniffer[0x1];
+ u8 force_lb[0x1];
+ u8 functional_lb[0x1];
+ u8 reserved_at_3[0x3];
+ u8 port_number[0x2];
+ u8 sl[0x4];
+ u8 qp_type[0x2];
+ u8 lnh[0x2];
+ u8 dlid[0x10];
+ u8 vl[0x4];
+ u8 lrh_packet_length[0xc];
+ u8 slid[0x10];
+};
+
+struct mlx5_ifc_definer_hl_eth_l3_bits {
+ u8 ip_version[0x4];
+ u8 ihl[0x4];
+ union {
+ u8 tos[0x8];
+ struct {
+ u8 dscp[0x6];
+ u8 ecn[0x2];
+ };
+ };
+ u8 time_to_live_hop_limit[0x8];
+ u8 protocol_next_header[0x8];
+ u8 identification[0x10];
+ union {
+ u8 ipv4_frag[0x10];
+ struct {
+ u8 flags[0x3];
+ u8 fragment_offset[0xd];
+ };
+ };
+ u8 ipv4_total_length[0x10];
+ u8 checksum[0x10];
+ u8 reserved_at_60[0xc];
+ u8 flow_label[0x14];
+ u8 packet_length[0x10];
+ u8 ipv6_payload_length[0x10];
+};
+
+struct mlx5_ifc_definer_hl_eth_l4_bits {
+ u8 source_port[0x10];
+ u8 destination_port[0x10];
+ u8 data_offset[0x4];
+ u8 l4_ok[0x1];
+ u8 l3_ok[0x1];
+ u8 ip_fragmented[0x1];
+ u8 tcp_ns[0x1];
+ union {
+ u8 tcp_flags[0x8];
+ struct {
+ u8 tcp_cwr[0x1];
+ u8 tcp_ece[0x1];
+ u8 tcp_urg[0x1];
+ u8 tcp_ack[0x1];
+ u8 tcp_psh[0x1];
+ u8 tcp_rst[0x1];
+ u8 tcp_syn[0x1];
+ u8 tcp_fin[0x1];
+ };
+ };
+ u8 first_fragment[0x1];
+ u8 reserved_at_31[0xf];
+};
+
+struct mlx5_ifc_definer_hl_src_qp_gvmi_bits {
+ u8 loopback_syndrome[0x8];
+ u8 l3_type[0x2];
+ u8 l4_type_bwc[0x2];
+ u8 first_vlan_qualifier[0x2];
+ u8 reserved_at_e[0x1];
+ u8 functional_lb[0x1];
+ u8 source_gvmi[0x10];
+ u8 force_lb[0x1];
+ u8 ip_fragmented[0x1];
+ u8 source_is_requestor[0x1];
+ u8 reserved_at_23[0x5];
+ u8 source_qp[0x18];
+};
+
+struct mlx5_ifc_definer_hl_ib_l4_bits {
+ u8 opcode[0x8];
+ u8 qp[0x18];
+ u8 se[0x1];
+ u8 migreq[0x1];
+ u8 ackreq[0x1];
+ u8 fecn[0x1];
+ u8 becn[0x1];
+ u8 bth[0x1];
+ u8 deth[0x1];
+ u8 dcceth[0x1];
+ u8 reserved_at_28[0x2];
+ u8 pad_count[0x2];
+ u8 tver[0x4];
+ u8 p_key[0x10];
+ u8 reserved_at_40[0x8];
+ u8 deth_source_qp[0x18];
+};
+
+enum mlx5hws_integrity_ok1_bits {
+ MLX5HWS_DEFINER_OKS1_FIRST_L4_OK = 24,
+ MLX5HWS_DEFINER_OKS1_FIRST_L3_OK = 25,
+ MLX5HWS_DEFINER_OKS1_SECOND_L4_OK = 26,
+ MLX5HWS_DEFINER_OKS1_SECOND_L3_OK = 27,
+ MLX5HWS_DEFINER_OKS1_FIRST_L4_CSUM_OK = 28,
+ MLX5HWS_DEFINER_OKS1_FIRST_IPV4_CSUM_OK = 29,
+ MLX5HWS_DEFINER_OKS1_SECOND_L4_CSUM_OK = 30,
+ MLX5HWS_DEFINER_OKS1_SECOND_IPV4_CSUM_OK = 31,
+};
+
+struct mlx5_ifc_definer_hl_oks1_bits {
+ union {
+ u8 oks1_bits[0x20];
+ struct {
+ u8 second_ipv4_checksum_ok[0x1];
+ u8 second_l4_checksum_ok[0x1];
+ u8 first_ipv4_checksum_ok[0x1];
+ u8 first_l4_checksum_ok[0x1];
+ u8 second_l3_ok[0x1];
+ u8 second_l4_ok[0x1];
+ u8 first_l3_ok[0x1];
+ u8 first_l4_ok[0x1];
+ u8 flex_parser7_steering_ok[0x1];
+ u8 flex_parser6_steering_ok[0x1];
+ u8 flex_parser5_steering_ok[0x1];
+ u8 flex_parser4_steering_ok[0x1];
+ u8 flex_parser3_steering_ok[0x1];
+ u8 flex_parser2_steering_ok[0x1];
+ u8 flex_parser1_steering_ok[0x1];
+ u8 flex_parser0_steering_ok[0x1];
+ u8 second_ipv6_extension_header_vld[0x1];
+ u8 first_ipv6_extension_header_vld[0x1];
+ u8 l3_tunneling_ok[0x1];
+ u8 l2_tunneling_ok[0x1];
+ u8 second_tcp_ok[0x1];
+ u8 second_udp_ok[0x1];
+ u8 second_ipv4_ok[0x1];
+ u8 second_ipv6_ok[0x1];
+ u8 second_l2_ok[0x1];
+ u8 vxlan_ok[0x1];
+ u8 gre_ok[0x1];
+ u8 first_tcp_ok[0x1];
+ u8 first_udp_ok[0x1];
+ u8 first_ipv4_ok[0x1];
+ u8 first_ipv6_ok[0x1];
+ u8 first_l2_ok[0x1];
+ };
+ };
+};
+
+struct mlx5_ifc_definer_hl_oks2_bits {
+ u8 reserved_at_0[0xa];
+ u8 second_mpls_ok[0x1];
+ u8 second_mpls4_s_bit[0x1];
+ u8 second_mpls4_qualifier[0x1];
+ u8 second_mpls3_s_bit[0x1];
+ u8 second_mpls3_qualifier[0x1];
+ u8 second_mpls2_s_bit[0x1];
+ u8 second_mpls2_qualifier[0x1];
+ u8 second_mpls1_s_bit[0x1];
+ u8 second_mpls1_qualifier[0x1];
+ u8 second_mpls0_s_bit[0x1];
+ u8 second_mpls0_qualifier[0x1];
+ u8 first_mpls_ok[0x1];
+ u8 first_mpls4_s_bit[0x1];
+ u8 first_mpls4_qualifier[0x1];
+ u8 first_mpls3_s_bit[0x1];
+ u8 first_mpls3_qualifier[0x1];
+ u8 first_mpls2_s_bit[0x1];
+ u8 first_mpls2_qualifier[0x1];
+ u8 first_mpls1_s_bit[0x1];
+ u8 first_mpls1_qualifier[0x1];
+ u8 first_mpls0_s_bit[0x1];
+ u8 first_mpls0_qualifier[0x1];
+};
+
+struct mlx5_ifc_definer_hl_voq_bits {
+ u8 reserved_at_0[0x18];
+ u8 ecn_ok[0x1];
+ u8 congestion[0x1];
+ u8 profile[0x2];
+ u8 internal_prio[0x4];
+};
+
+struct mlx5_ifc_definer_hl_ipv4_src_dst_bits {
+ u8 source_address[0x20];
+ u8 destination_address[0x20];
+};
+
+struct mlx5_ifc_definer_hl_random_number_bits {
+ u8 random_number[0x10];
+ u8 reserved[0x10];
+};
+
+struct mlx5_ifc_definer_hl_ipv6_addr_bits {
+ u8 ipv6_address_127_96[0x20];
+ u8 ipv6_address_95_64[0x20];
+ u8 ipv6_address_63_32[0x20];
+ u8 ipv6_address_31_0[0x20];
+};
+
+struct mlx5_ifc_definer_tcp_icmp_header_bits {
+ union {
+ struct {
+ u8 icmp_dw1[0x20];
+ u8 icmp_dw2[0x20];
+ u8 icmp_dw3[0x20];
+ };
+ struct {
+ u8 tcp_seq[0x20];
+ u8 tcp_ack[0x20];
+ u8 tcp_win_urg[0x20];
+ };
+ };
+};
+
+struct mlx5_ifc_definer_hl_tunnel_header_bits {
+ u8 tunnel_header_0[0x20];
+ u8 tunnel_header_1[0x20];
+ u8 tunnel_header_2[0x20];
+ u8 tunnel_header_3[0x20];
+};
+
+struct mlx5_ifc_definer_hl_ipsec_bits {
+ u8 spi[0x20];
+ u8 sequence_number[0x20];
+ u8 reserved[0x10];
+ u8 ipsec_syndrome[0x8];
+ u8 next_header[0x8];
+};
+
+struct mlx5_ifc_definer_hl_metadata_bits {
+ u8 metadata_to_cqe[0x20];
+ u8 general_purpose[0x20];
+ u8 acomulated_hash[0x20];
+};
+
+struct mlx5_ifc_definer_hl_flex_parser_bits {
+ u8 flex_parser_7[0x20];
+ u8 flex_parser_6[0x20];
+ u8 flex_parser_5[0x20];
+ u8 flex_parser_4[0x20];
+ u8 flex_parser_3[0x20];
+ u8 flex_parser_2[0x20];
+ u8 flex_parser_1[0x20];
+ u8 flex_parser_0[0x20];
+};
+
+struct mlx5_ifc_definer_hl_registers_bits {
+ u8 register_c_10[0x20];
+ u8 register_c_11[0x20];
+ u8 register_c_8[0x20];
+ u8 register_c_9[0x20];
+ u8 register_c_6[0x20];
+ u8 register_c_7[0x20];
+ u8 register_c_4[0x20];
+ u8 register_c_5[0x20];
+ u8 register_c_2[0x20];
+ u8 register_c_3[0x20];
+ u8 register_c_0[0x20];
+ u8 register_c_1[0x20];
+};
+
+struct mlx5_ifc_definer_hl_mpls_bits {
+ u8 mpls0_label[0x20];
+ u8 mpls1_label[0x20];
+ u8 mpls2_label[0x20];
+ u8 mpls3_label[0x20];
+ u8 mpls4_label[0x20];
+};
+
+struct mlx5_ifc_definer_hl_bits {
+ struct mlx5_ifc_definer_hl_eth_l2_bits eth_l2_outer;
+ struct mlx5_ifc_definer_hl_eth_l2_bits eth_l2_inner;
+ struct mlx5_ifc_definer_hl_eth_l2_src_bits eth_l2_src_outer;
+ struct mlx5_ifc_definer_hl_eth_l2_src_bits eth_l2_src_inner;
+ struct mlx5_ifc_definer_hl_ib_l2_bits ib_l2;
+ struct mlx5_ifc_definer_hl_eth_l3_bits eth_l3_outer;
+ struct mlx5_ifc_definer_hl_eth_l3_bits eth_l3_inner;
+ struct mlx5_ifc_definer_hl_eth_l4_bits eth_l4_outer;
+ struct mlx5_ifc_definer_hl_eth_l4_bits eth_l4_inner;
+ struct mlx5_ifc_definer_hl_src_qp_gvmi_bits source_qp_gvmi;
+ struct mlx5_ifc_definer_hl_ib_l4_bits ib_l4;
+ struct mlx5_ifc_definer_hl_oks1_bits oks1;
+ struct mlx5_ifc_definer_hl_oks2_bits oks2;
+ struct mlx5_ifc_definer_hl_voq_bits voq;
+ u8 reserved_at_480[0x380];
+ struct mlx5_ifc_definer_hl_ipv4_src_dst_bits ipv4_src_dest_outer;
+ struct mlx5_ifc_definer_hl_ipv4_src_dst_bits ipv4_src_dest_inner;
+ struct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_dst_outer;
+ struct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_dst_inner;
+ struct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_src_outer;
+ struct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_src_inner;
+ u8 unsupported_dest_ib_l3[0x80];
+ u8 unsupported_source_ib_l3[0x80];
+ u8 unsupported_udp_misc_outer[0x20];
+ u8 unsupported_udp_misc_inner[0x20];
+ struct mlx5_ifc_definer_tcp_icmp_header_bits tcp_icmp;
+ struct mlx5_ifc_definer_hl_tunnel_header_bits tunnel_header;
+ struct mlx5_ifc_definer_hl_mpls_bits mpls_outer;
+ struct mlx5_ifc_definer_hl_mpls_bits mpls_inner;
+ u8 unsupported_config_headers_outer[0x80];
+ u8 unsupported_config_headers_inner[0x80];
+ struct mlx5_ifc_definer_hl_random_number_bits random_number;
+ struct mlx5_ifc_definer_hl_ipsec_bits ipsec;
+ struct mlx5_ifc_definer_hl_metadata_bits metadata;
+ u8 unsupported_utc_timestamp[0x40];
+ u8 unsupported_free_running_timestamp[0x40];
+ struct mlx5_ifc_definer_hl_flex_parser_bits flex_parser;
+ struct mlx5_ifc_definer_hl_registers_bits registers;
+ /* Reserved in case header layout on future HW */
+ u8 unsupported_reserved[0xd40];
+};
+
+enum mlx5hws_definer_gtp {
+ MLX5HWS_DEFINER_GTP_EXT_HDR_BIT = 0x04,
+};
+
+struct mlx5_ifc_header_gtp_bits {
+ u8 version[0x3];
+ u8 proto_type[0x1];
+ u8 reserved1[0x1];
+ union {
+ u8 msg_flags[0x3];
+ struct {
+ u8 ext_hdr_flag[0x1];
+ u8 seq_num_flag[0x1];
+ u8 pdu_flag[0x1];
+ };
+ };
+ u8 msg_type[0x8];
+ u8 msg_len[0x8];
+ u8 teid[0x20];
+};
+
+struct mlx5_ifc_header_opt_gtp_bits {
+ u8 seq_num[0x10];
+ u8 pdu_num[0x8];
+ u8 next_ext_hdr_type[0x8];
+};
+
+struct mlx5_ifc_header_gtp_psc_bits {
+ u8 len[0x8];
+ u8 pdu_type[0x4];
+ u8 flags[0x4];
+ u8 qfi[0x8];
+ u8 reserved2[0x8];
+};
+
+struct mlx5_ifc_header_ipv6_vtc_bits {
+ u8 version[0x4];
+ union {
+ u8 tos[0x8];
+ struct {
+ u8 dscp[0x6];
+ u8 ecn[0x2];
+ };
+ };
+ u8 flow_label[0x14];
+};
+
+struct mlx5_ifc_header_ipv6_routing_ext_bits {
+ u8 next_hdr[0x8];
+ u8 hdr_len[0x8];
+ u8 type[0x8];
+ u8 segments_left[0x8];
+ union {
+ u8 flags[0x20];
+ struct {
+ u8 last_entry[0x8];
+ u8 flag[0x8];
+ u8 tag[0x10];
+ };
+ };
+};
+
+struct mlx5_ifc_header_vxlan_bits {
+ u8 flags[0x8];
+ u8 reserved1[0x18];
+ u8 vni[0x18];
+ u8 reserved2[0x8];
+};
+
+struct mlx5_ifc_header_vxlan_gpe_bits {
+ u8 flags[0x8];
+ u8 rsvd0[0x10];
+ u8 protocol[0x8];
+ u8 vni[0x18];
+ u8 rsvd1[0x8];
+};
+
+struct mlx5_ifc_header_gre_bits {
+ union {
+ u8 c_rsvd0_ver[0x10];
+ struct {
+ u8 gre_c_present[0x1];
+ u8 reserved_at_1[0x1];
+ u8 gre_k_present[0x1];
+ u8 gre_s_present[0x1];
+ u8 reserved_at_4[0x9];
+ u8 version[0x3];
+ };
+ };
+ u8 gre_protocol[0x10];
+ u8 checksum[0x10];
+ u8 reserved_at_30[0x10];
+};
+
+struct mlx5_ifc_header_geneve_bits {
+ union {
+ u8 ver_opt_len_o_c_rsvd[0x10];
+ struct {
+ u8 version[0x2];
+ u8 opt_len[0x6];
+ u8 o_flag[0x1];
+ u8 c_flag[0x1];
+ u8 reserved_at_a[0x6];
+ };
+ };
+ u8 protocol_type[0x10];
+ u8 vni[0x18];
+ u8 reserved_at_38[0x8];
+};
+
+struct mlx5_ifc_header_geneve_opt_bits {
+ u8 class[0x10];
+ u8 type[0x8];
+ u8 reserved[0x3];
+ u8 len[0x5];
+};
+
+struct mlx5_ifc_header_icmp_bits {
+ union {
+ u8 icmp_dw1[0x20];
+ struct {
+ u8 type[0x8];
+ u8 code[0x8];
+ u8 cksum[0x10];
+ };
+ };
+ union {
+ u8 icmp_dw2[0x20];
+ struct {
+ u8 ident[0x10];
+ u8 seq_nb[0x10];
+ };
+ };
+};
+
+struct mlx5hws_definer {
+ enum mlx5hws_definer_type type;
+ u8 dw_selector[DW_SELECTORS];
+ u8 byte_selector[BYTE_SELECTORS];
+ struct mlx5hws_rule_match_tag mask;
+ u32 obj_id;
+};
+
+struct mlx5hws_definer_cache {
+ struct list_head list_head;
+};
+
+struct mlx5hws_definer_cache_item {
+ struct mlx5hws_definer definer;
+ u32 refcount;
+ struct list_head list_node;
+};
+
+static inline bool
+mlx5hws_definer_is_jumbo(struct mlx5hws_definer *definer)
+{
+ return (definer->type == MLX5HWS_DEFINER_TYPE_JUMBO);
+}
+
+void mlx5hws_definer_create_tag(u32 *match_param,
+ struct mlx5hws_definer_fc *fc,
+ u32 fc_sz,
+ u8 *tag);
+
+int mlx5hws_definer_get_id(struct mlx5hws_definer *definer);
+
+int mlx5hws_definer_mt_init(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt);
+
+void mlx5hws_definer_mt_uninit(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt);
+
+int mlx5hws_definer_init_cache(struct mlx5hws_definer_cache **cache);
+
+void mlx5hws_definer_uninit_cache(struct mlx5hws_definer_cache *cache);
+
+int mlx5hws_definer_compare(struct mlx5hws_definer *definer_a,
+ struct mlx5hws_definer *definer_b);
+
+int mlx5hws_definer_get_obj(struct mlx5hws_context *ctx,
+ struct mlx5hws_definer *definer);
+
+void mlx5hws_definer_free(struct mlx5hws_context *ctx,
+ struct mlx5hws_definer *definer);
+
+int mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt,
+ struct mlx5hws_definer *match_definer);
+
+struct mlx5hws_definer_fc *
+mlx5hws_definer_conv_match_params_to_compressed_fc(struct mlx5hws_context *ctx,
+ u8 match_criteria_enable,
+ u32 *match_param,
+ int *fc_sz);
+
+#endif /* MLX5HWS_DEFINER_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_internal.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_internal.h
new file mode 100644
index 000000000000..5643be1cd5bf
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_internal.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_INTERNAL_H_
+#define MLX5HWS_INTERNAL_H_
+
+#include <linux/mlx5/transobj.h>
+#include <linux/mlx5/vport.h>
+#include "fs_core.h"
+#include "wq.h"
+#include "lib/mlx5.h"
+
+#include "mlx5hws_prm.h"
+#include "mlx5hws.h"
+#include "mlx5hws_pool.h"
+#include "mlx5hws_vport.h"
+#include "mlx5hws_context.h"
+#include "mlx5hws_table.h"
+#include "mlx5hws_send.h"
+#include "mlx5hws_rule.h"
+#include "mlx5hws_cmd.h"
+#include "mlx5hws_action.h"
+#include "mlx5hws_definer.h"
+#include "mlx5hws_matcher.h"
+#include "mlx5hws_debug.h"
+#include "mlx5hws_pat_arg.h"
+#include "mlx5hws_bwc.h"
+#include "mlx5hws_bwc_complex.h"
+
+#define W_SIZE 2
+#define DW_SIZE 4
+#define BITS_IN_BYTE 8
+#define BITS_IN_DW (BITS_IN_BYTE * DW_SIZE)
+
+#define IS_BIT_SET(_value, _bit) ((_value) & (1ULL << (_bit)))
+
+#define mlx5hws_err(ctx, arg...) mlx5_core_err((ctx)->mdev, ##arg)
+#define mlx5hws_info(ctx, arg...) mlx5_core_info((ctx)->mdev, ##arg)
+#define mlx5hws_dbg(ctx, arg...) mlx5_core_dbg((ctx)->mdev, ##arg)
+
+#define MLX5HWS_TABLE_TYPE_BASE 2
+#define MLX5HWS_ACTION_STE_IDX_ANY 0
+
+static inline bool is_mem_zero(const u8 *mem, size_t size)
+{
+ if (unlikely(!size)) {
+ pr_warn("HWS: invalid buffer of size 0 in %s\n", __func__);
+ return true;
+ }
+
+ return (*mem == 0) && memcmp(mem, mem + 1, size - 1) == 0;
+}
+
+static inline unsigned long align(unsigned long val, unsigned long align)
+{
+ return (val + align - 1) & ~(align - 1);
+}
+
+#endif /* MLX5HWS_INTERNAL_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c
new file mode 100644
index 000000000000..1964261415aa
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c
@@ -0,0 +1,1216 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+enum mlx5hws_matcher_rtc_type {
+ HWS_MATCHER_RTC_TYPE_MATCH,
+ HWS_MATCHER_RTC_TYPE_STE_ARRAY,
+ HWS_MATCHER_RTC_TYPE_MAX,
+};
+
+static const char * const mlx5hws_matcher_rtc_type_str[] = {
+ [HWS_MATCHER_RTC_TYPE_MATCH] = "MATCH",
+ [HWS_MATCHER_RTC_TYPE_STE_ARRAY] = "STE_ARRAY",
+ [HWS_MATCHER_RTC_TYPE_MAX] = "UNKNOWN",
+};
+
+static const char *hws_matcher_rtc_type_to_str(enum mlx5hws_matcher_rtc_type rtc_type)
+{
+ if (rtc_type > HWS_MATCHER_RTC_TYPE_MAX)
+ rtc_type = HWS_MATCHER_RTC_TYPE_MAX;
+ return mlx5hws_matcher_rtc_type_str[rtc_type];
+}
+
+static bool hws_matcher_requires_col_tbl(u8 log_num_of_rules)
+{
+ /* Collision table concatenation is done only for large rule tables */
+ return log_num_of_rules > MLX5HWS_MATCHER_ASSURED_RULES_TH;
+}
+
+static u8 hws_matcher_rules_to_tbl_depth(u8 log_num_of_rules)
+{
+ if (hws_matcher_requires_col_tbl(log_num_of_rules))
+ return MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH;
+
+ /* For small rule tables we use a single deep table to assure insertion */
+ return min(log_num_of_rules, MLX5HWS_MATCHER_ASSURED_COL_TBL_DEPTH);
+}
+
+static void hws_matcher_destroy_end_ft(struct mlx5hws_matcher *matcher)
+{
+ mlx5hws_table_destroy_default_ft(matcher->tbl, matcher->end_ft_id);
+}
+
+static int hws_matcher_create_end_ft(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_table *tbl = matcher->tbl;
+ int ret;
+
+ ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl, &matcher->end_ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed to create matcher end flow table\n");
+ return ret;
+ }
+ return 0;
+}
+
+static int hws_matcher_connect(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_context *ctx = tbl->ctx;
+ struct mlx5hws_matcher *prev = NULL;
+ struct mlx5hws_matcher *next = NULL;
+ struct mlx5hws_matcher *tmp_matcher;
+ int ret;
+
+ /* Find location in matcher list */
+ if (list_empty(&tbl->matchers_list)) {
+ list_add(&matcher->list_node, &tbl->matchers_list);
+ goto connect;
+ }
+
+ list_for_each_entry(tmp_matcher, &tbl->matchers_list, list_node) {
+ if (tmp_matcher->attr.priority > matcher->attr.priority) {
+ next = tmp_matcher;
+ break;
+ }
+ prev = tmp_matcher;
+ }
+
+ if (next)
+ /* insert before next */
+ list_add_tail(&matcher->list_node, &next->list_node);
+ else
+ /* insert after prev */
+ list_add(&matcher->list_node, &prev->list_node);
+
+connect:
+ if (next) {
+ /* Connect to next RTC */
+ ret = mlx5hws_table_ft_set_next_rtc(ctx,
+ matcher->end_ft_id,
+ tbl->fw_ft_type,
+ next->match_ste.rtc_0_id,
+ next->match_ste.rtc_1_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to connect new matcher to next RTC\n");
+ goto remove_from_list;
+ }
+ } else {
+ /* Connect last matcher to next miss_tbl if exists */
+ ret = mlx5hws_table_connect_to_miss_table(tbl, tbl->default_miss.miss_tbl);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed connect new matcher to miss_tbl\n");
+ goto remove_from_list;
+ }
+ }
+
+ /* Connect to previous FT */
+ ret = mlx5hws_table_ft_set_next_rtc(ctx,
+ prev ? prev->end_ft_id : tbl->ft_id,
+ tbl->fw_ft_type,
+ matcher->match_ste.rtc_0_id,
+ matcher->match_ste.rtc_1_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to connect new matcher to previous FT\n");
+ goto remove_from_list;
+ }
+
+ /* Reset prev matcher FT default miss (drop refcount) */
+ ret = mlx5hws_table_ft_set_default_next_ft(tbl, prev ? prev->end_ft_id : tbl->ft_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to reset matcher ft default miss\n");
+ goto remove_from_list;
+ }
+
+ if (!prev) {
+ /* Update tables missing to current matcher in the table */
+ ret = mlx5hws_table_update_connected_miss_tables(tbl);
+ if (ret) {
+ mlx5hws_err(ctx, "Fatal error, failed to update connected miss table\n");
+ goto remove_from_list;
+ }
+ }
+
+ return 0;
+
+remove_from_list:
+ list_del_init(&matcher->list_node);
+ return ret;
+}
+
+static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_matcher *next = NULL, *prev = NULL;
+ struct mlx5hws_table *tbl = matcher->tbl;
+ u32 prev_ft_id = tbl->ft_id;
+ int ret;
+
+ if (!list_is_first(&matcher->list_node, &tbl->matchers_list)) {
+ prev = list_prev_entry(matcher, list_node);
+ prev_ft_id = prev->end_ft_id;
+ }
+
+ if (!list_is_last(&matcher->list_node, &tbl->matchers_list))
+ next = list_next_entry(matcher, list_node);
+
+ list_del_init(&matcher->list_node);
+
+ if (next) {
+ /* Connect previous end FT to next RTC */
+ ret = mlx5hws_table_ft_set_next_rtc(tbl->ctx,
+ prev_ft_id,
+ tbl->fw_ft_type,
+ next->match_ste.rtc_0_id,
+ next->match_ste.rtc_1_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed to disconnect matcher\n");
+ goto matcher_reconnect;
+ }
+ } else {
+ ret = mlx5hws_table_connect_to_miss_table(tbl, tbl->default_miss.miss_tbl);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed to disconnect last matcher\n");
+ goto matcher_reconnect;
+ }
+ }
+
+ /* Removing first matcher, update connected miss tables if exists */
+ if (prev_ft_id == tbl->ft_id) {
+ ret = mlx5hws_table_update_connected_miss_tables(tbl);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Fatal error, failed to update connected miss table\n");
+ goto matcher_reconnect;
+ }
+ }
+
+ ret = mlx5hws_table_ft_set_default_next_ft(tbl, prev_ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Fatal error, failed to restore matcher ft default miss\n");
+ goto matcher_reconnect;
+ }
+
+ return 0;
+
+matcher_reconnect:
+ if (list_empty(&tbl->matchers_list) || !prev)
+ list_add(&matcher->list_node, &tbl->matchers_list);
+ else
+ /* insert after prev matcher */
+ list_add(&matcher->list_node, &prev->list_node);
+
+ return ret;
+}
+
+static void hws_matcher_set_rtc_attr_sz(struct mlx5hws_matcher *matcher,
+ struct mlx5hws_cmd_rtc_create_attr *rtc_attr,
+ enum mlx5hws_matcher_rtc_type rtc_type,
+ bool is_mirror)
+{
+ struct mlx5hws_pool_chunk *ste = &matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].ste;
+ enum mlx5hws_matcher_flow_src flow_src = matcher->attr.optimize_flow_src;
+ bool is_match_rtc = rtc_type == HWS_MATCHER_RTC_TYPE_MATCH;
+
+ if ((flow_src == MLX5HWS_MATCHER_FLOW_SRC_VPORT && !is_mirror) ||
+ (flow_src == MLX5HWS_MATCHER_FLOW_SRC_WIRE && is_mirror)) {
+ /* Optimize FDB RTC */
+ rtc_attr->log_size = 0;
+ rtc_attr->log_depth = 0;
+ } else {
+ /* Keep original values */
+ rtc_attr->log_size = is_match_rtc ? matcher->attr.table.sz_row_log : ste->order;
+ rtc_attr->log_depth = is_match_rtc ? matcher->attr.table.sz_col_log : 0;
+ }
+}
+
+static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher,
+ enum mlx5hws_matcher_rtc_type rtc_type,
+ u8 action_ste_selector)
+{
+ struct mlx5hws_matcher_attr *attr = &matcher->attr;
+ struct mlx5hws_cmd_rtc_create_attr rtc_attr = {0};
+ struct mlx5hws_match_template *mt = matcher->mt;
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ struct mlx5hws_action_default_stc *default_stc;
+ struct mlx5hws_matcher_action_ste *action_ste;
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_pool *ste_pool, *stc_pool;
+ struct mlx5hws_pool_chunk *ste;
+ u32 *rtc_0_id, *rtc_1_id;
+ u32 obj_id;
+ int ret;
+
+ switch (rtc_type) {
+ case HWS_MATCHER_RTC_TYPE_MATCH:
+ rtc_0_id = &matcher->match_ste.rtc_0_id;
+ rtc_1_id = &matcher->match_ste.rtc_1_id;
+ ste_pool = matcher->match_ste.pool;
+ ste = &matcher->match_ste.ste;
+ ste->order = attr->table.sz_col_log + attr->table.sz_row_log;
+
+ rtc_attr.log_size = attr->table.sz_row_log;
+ rtc_attr.log_depth = attr->table.sz_col_log;
+ rtc_attr.is_frst_jumbo = mlx5hws_matcher_mt_is_jumbo(mt);
+ rtc_attr.is_scnd_range = 0;
+ rtc_attr.miss_ft_id = matcher->end_ft_id;
+
+ if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_HASH) {
+ /* The usual Hash Table */
+ rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH;
+
+ /* The first mt is used since all share the same definer */
+ rtc_attr.match_definer_0 = mlx5hws_definer_get_id(mt->definer);
+ } else if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX) {
+ rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
+ rtc_attr.num_hash_definer = 1;
+
+ if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH) {
+ /* Hash Split Table */
+ rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH;
+ rtc_attr.match_definer_0 = mlx5hws_definer_get_id(mt->definer);
+ } else if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_LINEAR) {
+ /* Linear Lookup Table */
+ rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR;
+ rtc_attr.match_definer_0 = ctx->caps->linear_match_definer;
+ }
+ }
+
+ /* Match pool requires implicit allocation */
+ ret = mlx5hws_pool_chunk_alloc(ste_pool, ste);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate STE for %s RTC",
+ hws_matcher_rtc_type_to_str(rtc_type));
+ return ret;
+ }
+ break;
+
+ case HWS_MATCHER_RTC_TYPE_STE_ARRAY:
+ action_ste = &matcher->action_ste[action_ste_selector];
+
+ rtc_0_id = &action_ste->rtc_0_id;
+ rtc_1_id = &action_ste->rtc_1_id;
+ ste_pool = action_ste->pool;
+ ste = &action_ste->ste;
+ ste->order = ilog2(roundup_pow_of_two(action_ste->max_stes)) +
+ attr->table.sz_row_log;
+ rtc_attr.log_size = ste->order;
+ rtc_attr.log_depth = 0;
+ rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
+ /* The action STEs use the default always hit definer */
+ rtc_attr.match_definer_0 = ctx->caps->trivial_match_definer;
+ rtc_attr.is_frst_jumbo = false;
+ rtc_attr.miss_ft_id = 0;
+ break;
+
+ default:
+ mlx5hws_err(ctx, "HWS Invalid RTC type\n");
+ return -EINVAL;
+ }
+
+ obj_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
+
+ rtc_attr.pd = ctx->pd_num;
+ rtc_attr.ste_base = obj_id;
+ rtc_attr.ste_offset = ste->offset;
+ rtc_attr.reparse_mode = mlx5hws_context_get_reparse_mode(ctx);
+ rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(tbl->type, false);
+ hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, rtc_type, false);
+
+ /* STC is a single resource (obj_id), use any STC for the ID */
+ stc_pool = ctx->stc_pool[tbl->type];
+ default_stc = ctx->common_res[tbl->type].default_stc;
+ obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, &default_stc->default_hit);
+ rtc_attr.stc_base = obj_id;
+
+ ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_0_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create matcher RTC of type %s",
+ hws_matcher_rtc_type_to_str(rtc_type));
+ goto free_ste;
+ }
+
+ if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
+ obj_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
+ rtc_attr.ste_base = obj_id;
+ rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(tbl->type, true);
+
+ obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, &default_stc->default_hit);
+ rtc_attr.stc_base = obj_id;
+ hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, rtc_type, true);
+
+ ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_1_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create peer matcher RTC of type %s",
+ hws_matcher_rtc_type_to_str(rtc_type));
+ goto destroy_rtc_0;
+ }
+ }
+
+ return 0;
+
+destroy_rtc_0:
+ mlx5hws_cmd_rtc_destroy(ctx->mdev, *rtc_0_id);
+free_ste:
+ if (rtc_type == HWS_MATCHER_RTC_TYPE_MATCH)
+ mlx5hws_pool_chunk_free(ste_pool, ste);
+ return ret;
+}
+
+static void hws_matcher_destroy_rtc(struct mlx5hws_matcher *matcher,
+ enum mlx5hws_matcher_rtc_type rtc_type,
+ u8 action_ste_selector)
+{
+ struct mlx5hws_matcher_action_ste *action_ste;
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_pool_chunk *ste;
+ struct mlx5hws_pool *ste_pool;
+ u32 rtc_0_id, rtc_1_id;
+
+ switch (rtc_type) {
+ case HWS_MATCHER_RTC_TYPE_MATCH:
+ rtc_0_id = matcher->match_ste.rtc_0_id;
+ rtc_1_id = matcher->match_ste.rtc_1_id;
+ ste_pool = matcher->match_ste.pool;
+ ste = &matcher->match_ste.ste;
+ break;
+ case HWS_MATCHER_RTC_TYPE_STE_ARRAY:
+ action_ste = &matcher->action_ste[action_ste_selector];
+ rtc_0_id = action_ste->rtc_0_id;
+ rtc_1_id = action_ste->rtc_1_id;
+ ste_pool = action_ste->pool;
+ ste = &action_ste->ste;
+ break;
+ default:
+ return;
+ }
+
+ if (tbl->type == MLX5HWS_TABLE_TYPE_FDB)
+ mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, rtc_1_id);
+
+ mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, rtc_0_id);
+ if (rtc_type == HWS_MATCHER_RTC_TYPE_MATCH)
+ mlx5hws_pool_chunk_free(ste_pool, ste);
+}
+
+static int
+hws_matcher_check_attr_sz(struct mlx5hws_cmd_query_caps *caps,
+ struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_matcher_attr *attr = &matcher->attr;
+
+ if (attr->table.sz_col_log > caps->rtc_log_depth_max) {
+ mlx5hws_err(matcher->tbl->ctx, "Matcher depth exceeds limit %d\n",
+ caps->rtc_log_depth_max);
+ return -EOPNOTSUPP;
+ }
+
+ if (attr->table.sz_col_log + attr->table.sz_row_log > caps->ste_alloc_log_max) {
+ mlx5hws_err(matcher->tbl->ctx, "Total matcher size exceeds limit %d\n",
+ caps->ste_alloc_log_max);
+ return -EOPNOTSUPP;
+ }
+
+ if (attr->table.sz_col_log + attr->table.sz_row_log < caps->ste_alloc_log_gran) {
+ mlx5hws_err(matcher->tbl->ctx, "Total matcher size below limit %d\n",
+ caps->ste_alloc_log_gran);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void hws_matcher_set_pool_attr(struct mlx5hws_pool_attr *attr,
+ struct mlx5hws_matcher *matcher)
+{
+ switch (matcher->attr.optimize_flow_src) {
+ case MLX5HWS_MATCHER_FLOW_SRC_VPORT:
+ attr->opt_type = MLX5HWS_POOL_OPTIMIZE_ORIG;
+ break;
+ case MLX5HWS_MATCHER_FLOW_SRC_WIRE:
+ attr->opt_type = MLX5HWS_POOL_OPTIMIZE_MIRROR;
+ break;
+ default:
+ break;
+ }
+}
+
+static int hws_matcher_check_and_process_at(struct mlx5hws_matcher *matcher,
+ struct mlx5hws_action_template *at)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ bool valid;
+ int ret;
+
+ valid = mlx5hws_action_check_combo(ctx, at->action_type_arr, matcher->tbl->type);
+ if (!valid) {
+ mlx5hws_err(ctx, "Invalid combination in action template\n");
+ return -EINVAL;
+ }
+
+ /* Process action template to setters */
+ ret = mlx5hws_action_template_process(at);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to process action template\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hws_matcher_resize_init(struct mlx5hws_matcher *src_matcher)
+{
+ struct mlx5hws_matcher_resize_data *resize_data;
+
+ resize_data = kzalloc(sizeof(*resize_data), GFP_KERNEL);
+ if (!resize_data)
+ return -ENOMEM;
+
+ resize_data->max_stes = src_matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes;
+
+ resize_data->action_ste[0].stc = src_matcher->action_ste[0].stc;
+ resize_data->action_ste[0].rtc_0_id = src_matcher->action_ste[0].rtc_0_id;
+ resize_data->action_ste[0].rtc_1_id = src_matcher->action_ste[0].rtc_1_id;
+ resize_data->action_ste[0].pool = src_matcher->action_ste[0].max_stes ?
+ src_matcher->action_ste[0].pool :
+ NULL;
+ resize_data->action_ste[1].stc = src_matcher->action_ste[1].stc;
+ resize_data->action_ste[1].rtc_0_id = src_matcher->action_ste[1].rtc_0_id;
+ resize_data->action_ste[1].rtc_1_id = src_matcher->action_ste[1].rtc_1_id;
+ resize_data->action_ste[1].pool = src_matcher->action_ste[1].max_stes ?
+ src_matcher->action_ste[1].pool :
+ NULL;
+
+ /* Place the new resized matcher on the dst matcher's list */
+ list_add(&resize_data->list_node, &src_matcher->resize_dst->resize_data);
+
+ /* Move all the previous resized matchers to the dst matcher's list */
+ while (!list_empty(&src_matcher->resize_data)) {
+ resize_data = list_first_entry(&src_matcher->resize_data,
+ struct mlx5hws_matcher_resize_data,
+ list_node);
+ list_del_init(&resize_data->list_node);
+ list_add(&resize_data->list_node, &src_matcher->resize_dst->resize_data);
+ }
+
+ return 0;
+}
+
+static void hws_matcher_resize_uninit(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_matcher_resize_data *resize_data;
+
+ if (!mlx5hws_matcher_is_resizable(matcher))
+ return;
+
+ while (!list_empty(&matcher->resize_data)) {
+ resize_data = list_first_entry(&matcher->resize_data,
+ struct mlx5hws_matcher_resize_data,
+ list_node);
+ list_del_init(&resize_data->list_node);
+
+ if (resize_data->max_stes) {
+ mlx5hws_action_free_single_stc(matcher->tbl->ctx,
+ matcher->tbl->type,
+ &resize_data->action_ste[1].stc);
+ mlx5hws_action_free_single_stc(matcher->tbl->ctx,
+ matcher->tbl->type,
+ &resize_data->action_ste[0].stc);
+
+ if (matcher->tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
+ mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
+ resize_data->action_ste[1].rtc_1_id);
+ mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
+ resize_data->action_ste[0].rtc_1_id);
+ }
+ mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
+ resize_data->action_ste[1].rtc_0_id);
+ mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
+ resize_data->action_ste[0].rtc_0_id);
+ if (resize_data->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].pool) {
+ mlx5hws_pool_destroy(resize_data->action_ste[1].pool);
+ mlx5hws_pool_destroy(resize_data->action_ste[0].pool);
+ }
+ }
+
+ kfree(resize_data);
+ }
+}
+
+static int
+hws_matcher_bind_at_idx(struct mlx5hws_matcher *matcher, u8 action_ste_selector)
+{
+ struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+ struct mlx5hws_matcher_action_ste *action_ste;
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_pool_attr pool_attr = {0};
+ struct mlx5hws_context *ctx = tbl->ctx;
+ int ret;
+
+ action_ste = &matcher->action_ste[action_ste_selector];
+
+ /* Allocate action STE mempool */
+ pool_attr.table_type = tbl->type;
+ pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
+ pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL;
+ pool_attr.alloc_log_sz = ilog2(roundup_pow_of_two(action_ste->max_stes)) +
+ matcher->attr.table.sz_row_log;
+ hws_matcher_set_pool_attr(&pool_attr, matcher);
+ action_ste->pool = mlx5hws_pool_create(ctx, &pool_attr);
+ if (!action_ste->pool) {
+ mlx5hws_err(ctx, "Failed to create action ste pool\n");
+ return -EINVAL;
+ }
+
+ /* Allocate action RTC */
+ ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create action RTC\n");
+ goto free_ste_pool;
+ }
+
+ /* Allocate STC for jumps to STE */
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE;
+ stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+ stc_attr.ste_table.ste = action_ste->ste;
+ stc_attr.ste_table.ste_pool = action_ste->pool;
+ stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer;
+
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl->type,
+ &action_ste->stc);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create action jump to table STC\n");
+ goto free_rtc;
+ }
+
+ return 0;
+
+free_rtc:
+ hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector);
+free_ste_pool:
+ mlx5hws_pool_destroy(action_ste->pool);
+ return ret;
+}
+
+static void hws_matcher_unbind_at_idx(struct mlx5hws_matcher *matcher, u8 action_ste_selector)
+{
+ struct mlx5hws_matcher_action_ste *action_ste;
+ struct mlx5hws_table *tbl = matcher->tbl;
+
+ action_ste = &matcher->action_ste[action_ste_selector];
+
+ if (!action_ste->max_stes ||
+ matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION ||
+ mlx5hws_matcher_is_in_resize(matcher))
+ return;
+
+ mlx5hws_action_free_single_stc(tbl->ctx, tbl->type, &action_ste->stc);
+ hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector);
+ mlx5hws_pool_destroy(action_ste->pool);
+}
+
+static int hws_matcher_bind_at(struct mlx5hws_matcher *matcher)
+{
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(matcher->mt);
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_context *ctx = tbl->ctx;
+ u32 required_stes;
+ u8 max_stes = 0;
+ int i, ret;
+
+ if (matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION)
+ return 0;
+
+ for (i = 0; i < matcher->num_of_at; i++) {
+ struct mlx5hws_action_template *at = &matcher->at[i];
+
+ ret = hws_matcher_check_and_process_at(matcher, at);
+ if (ret) {
+ mlx5hws_err(ctx, "Invalid at %d", i);
+ return ret;
+ }
+
+ required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term);
+ max_stes = max(max_stes, required_stes);
+
+ /* Future: Optimize reparse */
+ }
+
+ /* There are no additional STEs required for matcher */
+ if (!max_stes)
+ return 0;
+
+ matcher->action_ste[0].max_stes = max_stes;
+ matcher->action_ste[1].max_stes = max_stes;
+
+ ret = hws_matcher_bind_at_idx(matcher, 0);
+ if (ret)
+ return ret;
+
+ ret = hws_matcher_bind_at_idx(matcher, 1);
+ if (ret)
+ goto free_at_0;
+
+ return 0;
+
+free_at_0:
+ hws_matcher_unbind_at_idx(matcher, 0);
+ return ret;
+}
+
+static void hws_matcher_unbind_at(struct mlx5hws_matcher *matcher)
+{
+ hws_matcher_unbind_at_idx(matcher, 1);
+ hws_matcher_unbind_at_idx(matcher, 0);
+}
+
+static int hws_matcher_bind_mt(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ struct mlx5hws_pool_attr pool_attr = {0};
+ int ret;
+
+ /* Calculate match, range and hash definers */
+ if (!(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION)) {
+ ret = mlx5hws_definer_mt_init(ctx, matcher->mt);
+ if (ret) {
+ if (ret == E2BIG)
+ mlx5hws_err(ctx, "Failed to set matcher templates with match definers\n");
+ return ret;
+ }
+ }
+
+ /* Create an STE pool per matcher*/
+ pool_attr.table_type = matcher->tbl->type;
+ pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
+ pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_MATCHER_STE_POOL;
+ pool_attr.alloc_log_sz = matcher->attr.table.sz_col_log +
+ matcher->attr.table.sz_row_log;
+ hws_matcher_set_pool_attr(&pool_attr, matcher);
+
+ matcher->match_ste.pool = mlx5hws_pool_create(ctx, &pool_attr);
+ if (!matcher->match_ste.pool) {
+ mlx5hws_err(ctx, "Failed to allocate matcher STE pool\n");
+ ret = -EOPNOTSUPP;
+ goto uninit_match_definer;
+ }
+
+ return 0;
+
+uninit_match_definer:
+ if (!(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION))
+ mlx5hws_definer_mt_uninit(ctx, matcher->mt);
+ return ret;
+}
+
+static void hws_matcher_unbind_mt(struct mlx5hws_matcher *matcher)
+{
+ mlx5hws_pool_destroy(matcher->match_ste.pool);
+ if (!(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION))
+ mlx5hws_definer_mt_uninit(matcher->tbl->ctx, matcher->mt);
+}
+
+static int
+hws_matcher_validate_insert_mode(struct mlx5hws_cmd_query_caps *caps,
+ struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_matcher_attr *attr = &matcher->attr;
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+
+ switch (attr->insert_mode) {
+ case MLX5HWS_MATCHER_INSERT_BY_HASH:
+ if (matcher->attr.distribute_mode != MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH) {
+ mlx5hws_err(ctx, "Invalid matcher distribute mode\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ case MLX5HWS_MATCHER_INSERT_BY_INDEX:
+ if (attr->table.sz_col_log) {
+ mlx5hws_err(ctx, "Matcher with INSERT_BY_INDEX supports only Nx1 table size\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH) {
+ /* Hash Split Table */
+ if (!caps->rtc_hash_split_table) {
+ mlx5hws_err(ctx, "FW doesn't support insert by index and hash distribute\n");
+ return -EOPNOTSUPP;
+ }
+ } else if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_LINEAR) {
+ /* Linear Lookup Table */
+ if (!caps->rtc_linear_lookup_table ||
+ !IS_BIT_SET(caps->access_index_mode,
+ MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR)) {
+ mlx5hws_err(ctx, "FW doesn't support insert by index and linear distribute\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (attr->table.sz_row_log > MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX) {
+ mlx5hws_err(ctx, "Matcher with linear distribute: rows exceed limit %d",
+ MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX);
+ return -EOPNOTSUPP;
+ }
+ } else {
+ mlx5hws_err(ctx, "Matcher has unsupported distribute mode\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ default:
+ mlx5hws_err(ctx, "Matcher has unsupported insert mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+hws_matcher_process_attr(struct mlx5hws_cmd_query_caps *caps,
+ struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_matcher_attr *attr = &matcher->attr;
+
+ if (hws_matcher_validate_insert_mode(caps, matcher))
+ return -EOPNOTSUPP;
+
+ if (matcher->tbl->type != MLX5HWS_TABLE_TYPE_FDB && attr->optimize_flow_src) {
+ mlx5hws_err(matcher->tbl->ctx, "NIC domain doesn't support flow_src\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Convert number of rules to the required depth */
+ if (attr->mode == MLX5HWS_MATCHER_RESOURCE_MODE_RULE &&
+ attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_HASH)
+ attr->table.sz_col_log = hws_matcher_rules_to_tbl_depth(attr->rule.num_log);
+
+ matcher->flags |= attr->resizable ? MLX5HWS_MATCHER_FLAGS_RESIZABLE : 0;
+
+ return hws_matcher_check_attr_sz(caps, matcher);
+}
+
+static int hws_matcher_create_and_connect(struct mlx5hws_matcher *matcher)
+{
+ int ret;
+
+ /* Select and create the definers for current matcher */
+ ret = hws_matcher_bind_mt(matcher);
+ if (ret)
+ return ret;
+
+ /* Calculate and verify action combination */
+ ret = hws_matcher_bind_at(matcher);
+ if (ret)
+ goto unbind_mt;
+
+ /* Create matcher end flow table anchor */
+ ret = hws_matcher_create_end_ft(matcher);
+ if (ret)
+ goto unbind_at;
+
+ /* Allocate the RTC for the new matcher */
+ ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0);
+ if (ret)
+ goto destroy_end_ft;
+
+ /* Connect the matcher to the matcher list */
+ ret = hws_matcher_connect(matcher);
+ if (ret)
+ goto destroy_rtc;
+
+ return 0;
+
+destroy_rtc:
+ hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0);
+destroy_end_ft:
+ hws_matcher_destroy_end_ft(matcher);
+unbind_at:
+ hws_matcher_unbind_at(matcher);
+unbind_mt:
+ hws_matcher_unbind_mt(matcher);
+ return ret;
+}
+
+static void hws_matcher_destroy_and_disconnect(struct mlx5hws_matcher *matcher)
+{
+ hws_matcher_resize_uninit(matcher);
+ hws_matcher_disconnect(matcher);
+ hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0);
+ hws_matcher_destroy_end_ft(matcher);
+ hws_matcher_unbind_at(matcher);
+ hws_matcher_unbind_mt(matcher);
+}
+
+static int
+hws_matcher_create_col_matcher(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ struct mlx5hws_matcher *col_matcher;
+ int ret;
+
+ if (matcher->attr.mode != MLX5HWS_MATCHER_RESOURCE_MODE_RULE ||
+ matcher->attr.insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX)
+ return 0;
+
+ if (!hws_matcher_requires_col_tbl(matcher->attr.rule.num_log))
+ return 0;
+
+ col_matcher = kzalloc(sizeof(*matcher), GFP_KERNEL);
+ if (!col_matcher)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&col_matcher->resize_data);
+
+ col_matcher->tbl = matcher->tbl;
+ col_matcher->mt = matcher->mt;
+ col_matcher->at = matcher->at;
+ col_matcher->num_of_at = matcher->num_of_at;
+ col_matcher->num_of_mt = matcher->num_of_mt;
+ col_matcher->attr.priority = matcher->attr.priority;
+ col_matcher->flags = matcher->flags;
+ col_matcher->flags |= MLX5HWS_MATCHER_FLAGS_COLLISION;
+ col_matcher->attr.mode = MLX5HWS_MATCHER_RESOURCE_MODE_HTABLE;
+ col_matcher->attr.optimize_flow_src = matcher->attr.optimize_flow_src;
+ col_matcher->attr.table.sz_row_log = matcher->attr.rule.num_log;
+ col_matcher->attr.table.sz_col_log = MLX5HWS_MATCHER_ASSURED_COL_TBL_DEPTH;
+ if (col_matcher->attr.table.sz_row_log > MLX5HWS_MATCHER_ASSURED_ROW_RATIO)
+ col_matcher->attr.table.sz_row_log -= MLX5HWS_MATCHER_ASSURED_ROW_RATIO;
+
+ col_matcher->attr.max_num_of_at_attach = matcher->attr.max_num_of_at_attach;
+
+ ret = hws_matcher_process_attr(ctx->caps, col_matcher);
+ if (ret)
+ goto free_col_matcher;
+
+ ret = hws_matcher_create_and_connect(col_matcher);
+ if (ret)
+ goto free_col_matcher;
+
+ matcher->col_matcher = col_matcher;
+
+ return 0;
+
+free_col_matcher:
+ kfree(col_matcher);
+ mlx5hws_err(ctx, "Failed to create assured collision matcher\n");
+ return ret;
+}
+
+static void
+hws_matcher_destroy_col_matcher(struct mlx5hws_matcher *matcher)
+{
+ if (matcher->attr.mode != MLX5HWS_MATCHER_RESOURCE_MODE_RULE ||
+ matcher->attr.insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX)
+ return;
+
+ if (matcher->col_matcher) {
+ hws_matcher_destroy_and_disconnect(matcher->col_matcher);
+ kfree(matcher->col_matcher);
+ }
+}
+
+static int hws_matcher_init(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ int ret;
+
+ INIT_LIST_HEAD(&matcher->resize_data);
+
+ mutex_lock(&ctx->ctrl_lock);
+
+ /* Allocate matcher resource and connect to the packet pipe */
+ ret = hws_matcher_create_and_connect(matcher);
+ if (ret)
+ goto unlock_err;
+
+ /* Create additional matcher for collision handling */
+ ret = hws_matcher_create_col_matcher(matcher);
+ if (ret)
+ goto destory_and_disconnect;
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return 0;
+
+destory_and_disconnect:
+ hws_matcher_destroy_and_disconnect(matcher);
+unlock_err:
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+}
+
+static int hws_matcher_uninit(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+
+ mutex_lock(&ctx->ctrl_lock);
+ hws_matcher_destroy_col_matcher(matcher);
+ hws_matcher_destroy_and_disconnect(matcher);
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return 0;
+}
+
+int mlx5hws_matcher_attach_at(struct mlx5hws_matcher *matcher,
+ struct mlx5hws_action_template *at)
+{
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(matcher->mt);
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ u32 required_stes;
+ int ret;
+
+ if (!matcher->attr.max_num_of_at_attach) {
+ mlx5hws_dbg(ctx, "Num of current at (%d) exceed allowed value\n",
+ matcher->num_of_at);
+ return -EOPNOTSUPP;
+ }
+
+ ret = hws_matcher_check_and_process_at(matcher, at);
+ if (ret)
+ return -ret;
+
+ required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term);
+ if (matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes < required_stes) {
+ mlx5hws_dbg(ctx, "Required STEs [%d] exceeds initial action template STE [%d]\n",
+ required_stes,
+ matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes);
+ return -ENOMEM;
+ }
+
+ matcher->at[matcher->num_of_at] = *at;
+ matcher->num_of_at += 1;
+ matcher->attr.max_num_of_at_attach -= 1;
+
+ if (matcher->col_matcher)
+ matcher->col_matcher->num_of_at = matcher->num_of_at;
+
+ return 0;
+}
+
+static int
+hws_matcher_set_templates(struct mlx5hws_matcher *matcher,
+ struct mlx5hws_match_template *mt[],
+ u8 num_of_mt,
+ struct mlx5hws_action_template *at[],
+ u8 num_of_at)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ int ret = 0;
+ int i;
+
+ if (!num_of_mt || !num_of_at) {
+ mlx5hws_err(ctx, "Number of action/match template cannot be zero\n");
+ return -EOPNOTSUPP;
+ }
+
+ matcher->mt = kcalloc(num_of_mt, sizeof(*matcher->mt), GFP_KERNEL);
+ if (!matcher->mt)
+ return -ENOMEM;
+
+ matcher->at = kcalloc(num_of_at + matcher->attr.max_num_of_at_attach,
+ sizeof(*matcher->at),
+ GFP_KERNEL);
+ if (!matcher->at) {
+ mlx5hws_err(ctx, "Failed to allocate action template array\n");
+ ret = -ENOMEM;
+ goto free_mt;
+ }
+
+ for (i = 0; i < num_of_mt; i++)
+ matcher->mt[i] = *mt[i];
+
+ for (i = 0; i < num_of_at; i++)
+ matcher->at[i] = *at[i];
+
+ matcher->num_of_mt = num_of_mt;
+ matcher->num_of_at = num_of_at;
+
+ return 0;
+
+free_mt:
+ kfree(matcher->mt);
+ return ret;
+}
+
+static void
+hws_matcher_unset_templates(struct mlx5hws_matcher *matcher)
+{
+ kfree(matcher->at);
+ kfree(matcher->mt);
+}
+
+struct mlx5hws_matcher *
+mlx5hws_matcher_create(struct mlx5hws_table *tbl,
+ struct mlx5hws_match_template *mt[],
+ u8 num_of_mt,
+ struct mlx5hws_action_template *at[],
+ u8 num_of_at,
+ struct mlx5hws_matcher_attr *attr)
+{
+ struct mlx5hws_context *ctx = tbl->ctx;
+ struct mlx5hws_matcher *matcher;
+ int ret;
+
+ matcher = kzalloc(sizeof(*matcher), GFP_KERNEL);
+ if (!matcher)
+ return NULL;
+
+ matcher->tbl = tbl;
+ matcher->attr = *attr;
+
+ ret = hws_matcher_process_attr(tbl->ctx->caps, matcher);
+ if (ret)
+ goto free_matcher;
+
+ ret = hws_matcher_set_templates(matcher, mt, num_of_mt, at, num_of_at);
+ if (ret)
+ goto free_matcher;
+
+ ret = hws_matcher_init(matcher);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to initialise matcher: %d\n", ret);
+ goto unset_templates;
+ }
+
+ return matcher;
+
+unset_templates:
+ hws_matcher_unset_templates(matcher);
+free_matcher:
+ kfree(matcher);
+ return NULL;
+}
+
+int mlx5hws_matcher_destroy(struct mlx5hws_matcher *matcher)
+{
+ hws_matcher_uninit(matcher);
+ hws_matcher_unset_templates(matcher);
+ kfree(matcher);
+ return 0;
+}
+
+struct mlx5hws_match_template *
+mlx5hws_match_template_create(struct mlx5hws_context *ctx,
+ u32 *match_param,
+ u32 match_param_sz,
+ u8 match_criteria_enable)
+{
+ struct mlx5hws_match_template *mt;
+
+ mt = kzalloc(sizeof(*mt), GFP_KERNEL);
+ if (!mt)
+ return NULL;
+
+ mt->match_param = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+ if (!mt->match_param)
+ goto free_template;
+
+ memcpy(mt->match_param, match_param, match_param_sz);
+ mt->match_criteria_enable = match_criteria_enable;
+
+ return mt;
+
+free_template:
+ kfree(mt);
+ return NULL;
+}
+
+int mlx5hws_match_template_destroy(struct mlx5hws_match_template *mt)
+{
+ kfree(mt->match_param);
+ kfree(mt);
+ return 0;
+}
+
+static int hws_matcher_resize_precheck(struct mlx5hws_matcher *src_matcher,
+ struct mlx5hws_matcher *dst_matcher)
+{
+ struct mlx5hws_context *ctx = src_matcher->tbl->ctx;
+ int i;
+
+ if (src_matcher->tbl->type != dst_matcher->tbl->type) {
+ mlx5hws_err(ctx, "Table type mismatch for src/dst matchers\n");
+ return -EINVAL;
+ }
+
+ if (!mlx5hws_matcher_is_resizable(src_matcher) ||
+ !mlx5hws_matcher_is_resizable(dst_matcher)) {
+ mlx5hws_err(ctx, "Src/dst matcher is not resizable\n");
+ return -EINVAL;
+ }
+
+ if (mlx5hws_matcher_is_insert_by_idx(src_matcher) !=
+ mlx5hws_matcher_is_insert_by_idx(dst_matcher)) {
+ mlx5hws_err(ctx, "Src/dst matchers insert mode mismatch\n");
+ return -EINVAL;
+ }
+
+ if (mlx5hws_matcher_is_in_resize(src_matcher) ||
+ mlx5hws_matcher_is_in_resize(dst_matcher)) {
+ mlx5hws_err(ctx, "Src/dst matcher is already in resize\n");
+ return -EINVAL;
+ }
+
+ /* Compare match templates - make sure the definers are equivalent */
+ if (src_matcher->num_of_mt != dst_matcher->num_of_mt) {
+ mlx5hws_err(ctx, "Src/dst matcher match templates mismatch\n");
+ return -EINVAL;
+ }
+
+ if (src_matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes >
+ dst_matcher->action_ste[0].max_stes) {
+ mlx5hws_err(ctx, "Src/dst matcher max STEs mismatch\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < src_matcher->num_of_mt; i++) {
+ if (mlx5hws_definer_compare(src_matcher->mt[i].definer,
+ dst_matcher->mt[i].definer)) {
+ mlx5hws_err(ctx, "Src/dst matcher definers mismatch\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int mlx5hws_matcher_resize_set_target(struct mlx5hws_matcher *src_matcher,
+ struct mlx5hws_matcher *dst_matcher)
+{
+ int ret = 0;
+
+ mutex_lock(&src_matcher->tbl->ctx->ctrl_lock);
+
+ ret = hws_matcher_resize_precheck(src_matcher, dst_matcher);
+ if (ret)
+ goto out;
+
+ src_matcher->resize_dst = dst_matcher;
+
+ ret = hws_matcher_resize_init(src_matcher);
+ if (ret)
+ src_matcher->resize_dst = NULL;
+
+out:
+ mutex_unlock(&src_matcher->tbl->ctx->ctrl_lock);
+ return ret;
+}
+
+int mlx5hws_matcher_resize_rule_move(struct mlx5hws_matcher *src_matcher,
+ struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ struct mlx5hws_context *ctx = src_matcher->tbl->ctx;
+
+ if (unlikely(!mlx5hws_matcher_is_in_resize(src_matcher))) {
+ mlx5hws_err(ctx, "Matcher is not resizable or not in resize\n");
+ return -EINVAL;
+ }
+
+ if (unlikely(src_matcher != rule->matcher)) {
+ mlx5hws_err(ctx, "Rule doesn't belong to src matcher\n");
+ return -EINVAL;
+ }
+
+ return mlx5hws_rule_move_hws_add(rule, attr);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.h
new file mode 100644
index 000000000000..125391d1a114
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_MATCHER_H_
+#define MLX5HWS_MATCHER_H_
+
+/* We calculated that concatenating a collision table to the main table with
+ * 3% of the main table rows will be enough resources for high insertion
+ * success probability.
+ *
+ * The calculation: log2(2^x * 3 / 100) = log2(2^x) + log2(3/100) = x - 5.05 ~ 5
+ */
+#define MLX5HWS_MATCHER_ASSURED_ROW_RATIO 5
+/* Threshold to determine if amount of rules require a collision table */
+#define MLX5HWS_MATCHER_ASSURED_RULES_TH 10
+/* Required depth of an assured collision table */
+#define MLX5HWS_MATCHER_ASSURED_COL_TBL_DEPTH 4
+/* Required depth of the main large table */
+#define MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH 2
+
+enum mlx5hws_matcher_offset {
+ MLX5HWS_MATCHER_OFFSET_TAG_DW1 = 12,
+ MLX5HWS_MATCHER_OFFSET_TAG_DW0 = 13,
+};
+
+enum mlx5hws_matcher_flags {
+ MLX5HWS_MATCHER_FLAGS_COLLISION = 1 << 2,
+ MLX5HWS_MATCHER_FLAGS_RESIZABLE = 1 << 3,
+};
+
+struct mlx5hws_match_template {
+ struct mlx5hws_definer *definer;
+ struct mlx5hws_definer_fc *fc;
+ u32 *match_param;
+ u8 match_criteria_enable;
+ u16 fc_sz;
+};
+
+struct mlx5hws_matcher_match_ste {
+ struct mlx5hws_pool_chunk ste;
+ u32 rtc_0_id;
+ u32 rtc_1_id;
+ struct mlx5hws_pool *pool;
+};
+
+struct mlx5hws_matcher_action_ste {
+ struct mlx5hws_pool_chunk ste;
+ struct mlx5hws_pool_chunk stc;
+ u32 rtc_0_id;
+ u32 rtc_1_id;
+ struct mlx5hws_pool *pool;
+ u8 max_stes;
+};
+
+struct mlx5hws_matcher_resize_data_node {
+ struct mlx5hws_pool_chunk stc;
+ u32 rtc_0_id;
+ u32 rtc_1_id;
+ struct mlx5hws_pool *pool;
+};
+
+struct mlx5hws_matcher_resize_data {
+ struct mlx5hws_matcher_resize_data_node action_ste[2];
+ u8 max_stes;
+ struct list_head list_node;
+};
+
+struct mlx5hws_matcher {
+ struct mlx5hws_table *tbl;
+ struct mlx5hws_matcher_attr attr;
+ struct mlx5hws_match_template *mt;
+ struct mlx5hws_action_template *at;
+ u8 num_of_at;
+ u8 num_of_mt;
+ /* enum mlx5hws_matcher_flags */
+ u8 flags;
+ u32 end_ft_id;
+ struct mlx5hws_matcher *col_matcher;
+ struct mlx5hws_matcher *resize_dst;
+ struct mlx5hws_matcher_match_ste match_ste;
+ struct mlx5hws_matcher_action_ste action_ste[2];
+ struct list_head list_node;
+ struct list_head resize_data;
+};
+
+static inline bool
+mlx5hws_matcher_mt_is_jumbo(struct mlx5hws_match_template *mt)
+{
+ return mlx5hws_definer_is_jumbo(mt->definer);
+}
+
+static inline bool mlx5hws_matcher_is_resizable(struct mlx5hws_matcher *matcher)
+{
+ return !!(matcher->flags & MLX5HWS_MATCHER_FLAGS_RESIZABLE);
+}
+
+static inline bool mlx5hws_matcher_is_in_resize(struct mlx5hws_matcher *matcher)
+{
+ return !!matcher->resize_dst;
+}
+
+static inline bool mlx5hws_matcher_is_insert_by_idx(struct mlx5hws_matcher *matcher)
+{
+ return matcher->attr.insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX;
+}
+
+#endif /* MLX5HWS_MATCHER_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.c
new file mode 100644
index 000000000000..e084a5cbf81f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.c
@@ -0,0 +1,579 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+enum mlx5hws_arg_chunk_size
+mlx5hws_arg_data_size_to_arg_log_size(u16 data_size)
+{
+ /* Return the roundup of log2(data_size) */
+ if (data_size <= MLX5HWS_ARG_DATA_SIZE)
+ return MLX5HWS_ARG_CHUNK_SIZE_1;
+ if (data_size <= MLX5HWS_ARG_DATA_SIZE * 2)
+ return MLX5HWS_ARG_CHUNK_SIZE_2;
+ if (data_size <= MLX5HWS_ARG_DATA_SIZE * 4)
+ return MLX5HWS_ARG_CHUNK_SIZE_3;
+ if (data_size <= MLX5HWS_ARG_DATA_SIZE * 8)
+ return MLX5HWS_ARG_CHUNK_SIZE_4;
+
+ return MLX5HWS_ARG_CHUNK_SIZE_MAX;
+}
+
+u32 mlx5hws_arg_data_size_to_arg_size(u16 data_size)
+{
+ return BIT(mlx5hws_arg_data_size_to_arg_log_size(data_size));
+}
+
+enum mlx5hws_arg_chunk_size
+mlx5hws_arg_get_arg_log_size(u16 num_of_actions)
+{
+ return mlx5hws_arg_data_size_to_arg_log_size(num_of_actions *
+ MLX5HWS_MODIFY_ACTION_SIZE);
+}
+
+u32 mlx5hws_arg_get_arg_size(u16 num_of_actions)
+{
+ return BIT(mlx5hws_arg_get_arg_log_size(num_of_actions));
+}
+
+bool mlx5hws_pat_require_reparse(__be64 *actions, u16 num_of_actions)
+{
+ u16 i, field;
+ u8 action_id;
+
+ for (i = 0; i < num_of_actions; i++) {
+ action_id = MLX5_GET(set_action_in, &actions[i], action_type);
+
+ switch (action_id) {
+ case MLX5_MODIFICATION_TYPE_NOP:
+ field = MLX5_MODI_OUT_NONE;
+ break;
+
+ case MLX5_MODIFICATION_TYPE_SET:
+ case MLX5_MODIFICATION_TYPE_ADD:
+ field = MLX5_GET(set_action_in, &actions[i], field);
+ break;
+
+ case MLX5_MODIFICATION_TYPE_COPY:
+ case MLX5_MODIFICATION_TYPE_ADD_FIELD:
+ field = MLX5_GET(copy_action_in, &actions[i], dst_field);
+ break;
+
+ default:
+ /* Insert/Remove/Unknown actions require reparse */
+ return true;
+ }
+
+ /* Below fields can change packet structure require a reparse */
+ if (field == MLX5_MODI_OUT_ETHERTYPE ||
+ field == MLX5_MODI_OUT_IPV6_NEXT_HDR)
+ return true;
+ }
+
+ return false;
+}
+
+/* Cache and cache element handling */
+int mlx5hws_pat_init_pattern_cache(struct mlx5hws_pattern_cache **cache)
+{
+ struct mlx5hws_pattern_cache *new_cache;
+
+ new_cache = kzalloc(sizeof(*new_cache), GFP_KERNEL);
+ if (!new_cache)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&new_cache->ptrn_list);
+ mutex_init(&new_cache->lock);
+
+ *cache = new_cache;
+
+ return 0;
+}
+
+void mlx5hws_pat_uninit_pattern_cache(struct mlx5hws_pattern_cache *cache)
+{
+ mutex_destroy(&cache->lock);
+ kfree(cache);
+}
+
+static bool mlx5hws_pat_compare_pattern(int cur_num_of_actions,
+ __be64 cur_actions[],
+ int num_of_actions,
+ __be64 actions[])
+{
+ int i;
+
+ if (cur_num_of_actions != num_of_actions)
+ return false;
+
+ for (i = 0; i < num_of_actions; i++) {
+ u8 action_id =
+ MLX5_GET(set_action_in, &actions[i], action_type);
+
+ if (action_id == MLX5_MODIFICATION_TYPE_COPY ||
+ action_id == MLX5_MODIFICATION_TYPE_ADD_FIELD) {
+ if (actions[i] != cur_actions[i])
+ return false;
+ } else {
+ /* Compare just the control, not the values */
+ if ((__force __be32)actions[i] !=
+ (__force __be32)cur_actions[i])
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static struct mlx5hws_pattern_cache_item *
+mlx5hws_pat_find_cached_pattern(struct mlx5hws_pattern_cache *cache,
+ u16 num_of_actions,
+ __be64 *actions)
+{
+ struct mlx5hws_pattern_cache_item *cached_pat = NULL;
+
+ list_for_each_entry(cached_pat, &cache->ptrn_list, ptrn_list_node) {
+ if (mlx5hws_pat_compare_pattern(cached_pat->mh_data.num_of_actions,
+ (__be64 *)cached_pat->mh_data.data,
+ num_of_actions,
+ actions))
+ return cached_pat;
+ }
+
+ return NULL;
+}
+
+static struct mlx5hws_pattern_cache_item *
+mlx5hws_pat_get_existing_cached_pattern(struct mlx5hws_pattern_cache *cache,
+ u16 num_of_actions,
+ __be64 *actions)
+{
+ struct mlx5hws_pattern_cache_item *cached_pattern;
+
+ cached_pattern = mlx5hws_pat_find_cached_pattern(cache, num_of_actions, actions);
+ if (cached_pattern) {
+ /* LRU: move it to be first in the list */
+ list_del_init(&cached_pattern->ptrn_list_node);
+ list_add(&cached_pattern->ptrn_list_node, &cache->ptrn_list);
+ cached_pattern->refcount++;
+ }
+
+ return cached_pattern;
+}
+
+static struct mlx5hws_pattern_cache_item *
+mlx5hws_pat_add_pattern_to_cache(struct mlx5hws_pattern_cache *cache,
+ u32 pattern_id,
+ u16 num_of_actions,
+ __be64 *actions)
+{
+ struct mlx5hws_pattern_cache_item *cached_pattern;
+
+ cached_pattern = kzalloc(sizeof(*cached_pattern), GFP_KERNEL);
+ if (!cached_pattern)
+ return NULL;
+
+ cached_pattern->mh_data.num_of_actions = num_of_actions;
+ cached_pattern->mh_data.pattern_id = pattern_id;
+ cached_pattern->mh_data.data =
+ kmemdup(actions, num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE, GFP_KERNEL);
+ if (!cached_pattern->mh_data.data)
+ goto free_cached_obj;
+
+ list_add(&cached_pattern->ptrn_list_node, &cache->ptrn_list);
+ cached_pattern->refcount = 1;
+
+ return cached_pattern;
+
+free_cached_obj:
+ kfree(cached_pattern);
+ return NULL;
+}
+
+static struct mlx5hws_pattern_cache_item *
+mlx5hws_pat_find_cached_pattern_by_id(struct mlx5hws_pattern_cache *cache,
+ u32 ptrn_id)
+{
+ struct mlx5hws_pattern_cache_item *cached_pattern = NULL;
+
+ list_for_each_entry(cached_pattern, &cache->ptrn_list, ptrn_list_node) {
+ if (cached_pattern->mh_data.pattern_id == ptrn_id)
+ return cached_pattern;
+ }
+
+ return NULL;
+}
+
+static void
+mlx5hws_pat_remove_pattern(struct mlx5hws_pattern_cache_item *cached_pattern)
+{
+ list_del_init(&cached_pattern->ptrn_list_node);
+
+ kfree(cached_pattern->mh_data.data);
+ kfree(cached_pattern);
+}
+
+void mlx5hws_pat_put_pattern(struct mlx5hws_context *ctx, u32 ptrn_id)
+{
+ struct mlx5hws_pattern_cache *cache = ctx->pattern_cache;
+ struct mlx5hws_pattern_cache_item *cached_pattern;
+
+ mutex_lock(&cache->lock);
+ cached_pattern = mlx5hws_pat_find_cached_pattern_by_id(cache, ptrn_id);
+ if (!cached_pattern) {
+ mlx5hws_err(ctx, "Failed to find cached pattern with provided ID\n");
+ pr_warn("HWS: pattern ID %d is not found\n", ptrn_id);
+ goto out;
+ }
+
+ if (--cached_pattern->refcount)
+ goto out;
+
+ mlx5hws_pat_remove_pattern(cached_pattern);
+ mlx5hws_cmd_header_modify_pattern_destroy(ctx->mdev, ptrn_id);
+
+out:
+ mutex_unlock(&cache->lock);
+}
+
+int mlx5hws_pat_get_pattern(struct mlx5hws_context *ctx,
+ __be64 *pattern, size_t pattern_sz,
+ u32 *pattern_id)
+{
+ u16 num_of_actions = pattern_sz / MLX5HWS_MODIFY_ACTION_SIZE;
+ struct mlx5hws_pattern_cache_item *cached_pattern;
+ u32 ptrn_id = 0;
+ int ret = 0;
+
+ mutex_lock(&ctx->pattern_cache->lock);
+
+ cached_pattern = mlx5hws_pat_get_existing_cached_pattern(ctx->pattern_cache,
+ num_of_actions,
+ pattern);
+ if (cached_pattern) {
+ *pattern_id = cached_pattern->mh_data.pattern_id;
+ goto out_unlock;
+ }
+
+ ret = mlx5hws_cmd_header_modify_pattern_create(ctx->mdev,
+ pattern_sz,
+ (u8 *)pattern,
+ &ptrn_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create pattern FW object\n");
+ goto out_unlock;
+ }
+
+ cached_pattern = mlx5hws_pat_add_pattern_to_cache(ctx->pattern_cache,
+ ptrn_id,
+ num_of_actions,
+ pattern);
+ if (!cached_pattern) {
+ mlx5hws_err(ctx, "Failed to add pattern to cache\n");
+ ret = -EINVAL;
+ goto clean_pattern;
+ }
+
+ mutex_unlock(&ctx->pattern_cache->lock);
+ *pattern_id = ptrn_id;
+
+ return ret;
+
+clean_pattern:
+ mlx5hws_cmd_header_modify_pattern_destroy(ctx->mdev, *pattern_id);
+out_unlock:
+ mutex_unlock(&ctx->pattern_cache->lock);
+ return ret;
+}
+
+static void
+mlx5d_arg_init_send_attr(struct mlx5hws_send_engine_post_attr *send_attr,
+ void *comp_data,
+ u32 arg_idx)
+{
+ send_attr->opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ send_attr->opmod = MLX5HWS_WQE_GTA_OPMOD_MOD_ARG;
+ send_attr->len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+ send_attr->id = arg_idx;
+ send_attr->user_data = comp_data;
+}
+
+void mlx5hws_arg_decapl3_write(struct mlx5hws_send_engine *queue,
+ u32 arg_idx,
+ u8 *arg_data,
+ u16 num_of_actions)
+{
+ struct mlx5hws_send_engine_post_attr send_attr = {0};
+ struct mlx5hws_wqe_gta_data_seg_arg *wqe_arg = NULL;
+ struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl = NULL;
+ struct mlx5hws_send_engine_post_ctrl ctrl;
+ size_t wqe_len;
+
+ mlx5d_arg_init_send_attr(&send_attr, NULL, arg_idx);
+
+ ctrl = mlx5hws_send_engine_post_start(queue);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
+ memset(wqe_ctrl, 0, wqe_len);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
+ mlx5hws_action_prepare_decap_l3_data(arg_data, (u8 *)wqe_arg,
+ num_of_actions);
+ mlx5hws_send_engine_post_end(&ctrl, &send_attr);
+}
+
+void mlx5hws_arg_write(struct mlx5hws_send_engine *queue,
+ void *comp_data,
+ u32 arg_idx,
+ u8 *arg_data,
+ size_t data_size)
+{
+ struct mlx5hws_send_engine_post_attr send_attr = {0};
+ struct mlx5hws_wqe_gta_data_seg_arg *wqe_arg;
+ struct mlx5hws_send_engine_post_ctrl ctrl;
+ struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
+ int i, full_iter, leftover;
+ size_t wqe_len;
+
+ mlx5d_arg_init_send_attr(&send_attr, comp_data, arg_idx);
+
+ /* Each WQE can hold 64B of data, it might require multiple iteration */
+ full_iter = data_size / MLX5HWS_ARG_DATA_SIZE;
+ leftover = data_size & (MLX5HWS_ARG_DATA_SIZE - 1);
+
+ for (i = 0; i < full_iter; i++) {
+ ctrl = mlx5hws_send_engine_post_start(queue);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
+ memset(wqe_ctrl, 0, wqe_len);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
+ memcpy(wqe_arg, arg_data, wqe_len);
+ send_attr.id = arg_idx++;
+ mlx5hws_send_engine_post_end(&ctrl, &send_attr);
+
+ /* Move to next argument data */
+ arg_data += MLX5HWS_ARG_DATA_SIZE;
+ }
+
+ if (leftover) {
+ ctrl = mlx5hws_send_engine_post_start(queue);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
+ memset(wqe_ctrl, 0, wqe_len);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
+ memcpy(wqe_arg, arg_data, leftover);
+ send_attr.id = arg_idx;
+ mlx5hws_send_engine_post_end(&ctrl, &send_attr);
+ }
+}
+
+int mlx5hws_arg_write_inline_arg_data(struct mlx5hws_context *ctx,
+ u32 arg_idx,
+ u8 *arg_data,
+ size_t data_size)
+{
+ struct mlx5hws_send_engine *queue;
+ int ret;
+
+ mutex_lock(&ctx->ctrl_lock);
+
+ /* Get the control queue */
+ queue = &ctx->send_queue[ctx->queues - 1];
+
+ mlx5hws_arg_write(queue, arg_data, arg_idx, arg_data, data_size);
+
+ mlx5hws_send_engine_flush_queue(queue);
+
+ /* Poll for completion */
+ ret = mlx5hws_send_queue_action(ctx, ctx->queues - 1,
+ MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC);
+
+ if (ret)
+ mlx5hws_err(ctx, "Failed to drain arg queue\n");
+
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return ret;
+}
+
+bool mlx5hws_arg_is_valid_arg_request_size(struct mlx5hws_context *ctx,
+ u32 arg_size)
+{
+ if (arg_size < ctx->caps->log_header_modify_argument_granularity ||
+ arg_size > ctx->caps->log_header_modify_argument_max_alloc) {
+ return false;
+ }
+ return true;
+}
+
+int mlx5hws_arg_create(struct mlx5hws_context *ctx,
+ u8 *data,
+ size_t data_sz,
+ u32 log_bulk_sz,
+ bool write_data,
+ u32 *arg_id)
+{
+ u16 single_arg_log_sz;
+ u16 multi_arg_log_sz;
+ int ret;
+ u32 id;
+
+ single_arg_log_sz = mlx5hws_arg_data_size_to_arg_log_size(data_sz);
+ multi_arg_log_sz = single_arg_log_sz + log_bulk_sz;
+
+ if (single_arg_log_sz >= MLX5HWS_ARG_CHUNK_SIZE_MAX) {
+ mlx5hws_err(ctx, "Requested single arg %u not supported\n", single_arg_log_sz);
+ return -EOPNOTSUPP;
+ }
+
+ if (!mlx5hws_arg_is_valid_arg_request_size(ctx, multi_arg_log_sz)) {
+ mlx5hws_err(ctx, "Argument log size %d not supported by FW\n", multi_arg_log_sz);
+ return -EOPNOTSUPP;
+ }
+
+ /* Alloc bulk of args */
+ ret = mlx5hws_cmd_arg_create(ctx->mdev, multi_arg_log_sz, ctx->pd_num, &id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed allocating arg in order: %d\n", multi_arg_log_sz);
+ return ret;
+ }
+
+ if (write_data) {
+ ret = mlx5hws_arg_write_inline_arg_data(ctx, id,
+ data, data_sz);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed writing arg data\n");
+ mlx5hws_cmd_arg_destroy(ctx->mdev, id);
+ return ret;
+ }
+ }
+
+ *arg_id = id;
+ return ret;
+}
+
+void mlx5hws_arg_destroy(struct mlx5hws_context *ctx, u32 arg_id)
+{
+ mlx5hws_cmd_arg_destroy(ctx->mdev, arg_id);
+}
+
+int mlx5hws_arg_create_modify_header_arg(struct mlx5hws_context *ctx,
+ __be64 *data,
+ u8 num_of_actions,
+ u32 log_bulk_sz,
+ bool write_data,
+ u32 *arg_id)
+{
+ size_t data_sz = num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE;
+ int ret;
+
+ ret = mlx5hws_arg_create(ctx,
+ (u8 *)data,
+ data_sz,
+ log_bulk_sz,
+ write_data,
+ arg_id);
+ if (ret)
+ mlx5hws_err(ctx, "Failed creating modify header arg\n");
+
+ return ret;
+}
+
+static int
+hws_action_modify_check_field_limitation(u8 action_type, __be64 *pattern)
+{
+ /* Need to check field limitation here, but for now - return OK */
+ return 0;
+}
+
+#define INVALID_FIELD 0xffff
+
+static void
+hws_action_modify_get_target_fields(u8 action_type, __be64 *pattern,
+ u16 *src_field, u16 *dst_field)
+{
+ switch (action_type) {
+ case MLX5_ACTION_TYPE_SET:
+ case MLX5_ACTION_TYPE_ADD:
+ *src_field = MLX5_GET(set_action_in, pattern, field);
+ *dst_field = INVALID_FIELD;
+ break;
+ case MLX5_ACTION_TYPE_COPY:
+ *src_field = MLX5_GET(copy_action_in, pattern, src_field);
+ *dst_field = MLX5_GET(copy_action_in, pattern, dst_field);
+ break;
+ default:
+ pr_warn("HWS: invalid modify header action type %d\n", action_type);
+ }
+}
+
+bool mlx5hws_pat_verify_actions(struct mlx5hws_context *ctx, __be64 pattern[], size_t sz)
+{
+ size_t i;
+
+ for (i = 0; i < sz / MLX5HWS_MODIFY_ACTION_SIZE; i++) {
+ u8 action_type =
+ MLX5_GET(set_action_in, &pattern[i], action_type);
+ if (action_type >= MLX5_MODIFICATION_TYPE_MAX) {
+ mlx5hws_err(ctx, "Unsupported action id %d\n", action_type);
+ return false;
+ }
+ if (hws_action_modify_check_field_limitation(action_type, &pattern[i])) {
+ mlx5hws_err(ctx, "Unsupported action number %zu\n", i);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void mlx5hws_pat_calc_nope(__be64 *pattern, size_t num_actions,
+ size_t max_actions, size_t *new_size,
+ u32 *nope_location, __be64 *new_pat)
+{
+ u16 prev_src_field = 0, prev_dst_field = 0;
+ u16 src_field, dst_field;
+ u8 action_type;
+ size_t i, j;
+
+ *new_size = num_actions;
+ *nope_location = 0;
+
+ if (num_actions == 1)
+ return;
+
+ for (i = 0, j = 0; i < num_actions; i++, j++) {
+ action_type = MLX5_GET(set_action_in, &pattern[i], action_type);
+
+ hws_action_modify_get_target_fields(action_type, &pattern[i],
+ &src_field, &dst_field);
+ if (i % 2) {
+ if (action_type == MLX5_ACTION_TYPE_COPY &&
+ (prev_src_field == src_field ||
+ prev_dst_field == dst_field)) {
+ /* need Nope */
+ *new_size += 1;
+ *nope_location |= BIT(i);
+ memset(&new_pat[j], 0, MLX5HWS_MODIFY_ACTION_SIZE);
+ MLX5_SET(set_action_in, &new_pat[j],
+ action_type,
+ MLX5_MODIFICATION_TYPE_NOP);
+ j++;
+ } else if (prev_src_field == src_field) {
+ /* need Nope*/
+ *new_size += 1;
+ *nope_location |= BIT(i);
+ MLX5_SET(set_action_in, &new_pat[j],
+ action_type,
+ MLX5_MODIFICATION_TYPE_NOP);
+ j++;
+ }
+ }
+ memcpy(&new_pat[j], &pattern[i], MLX5HWS_MODIFY_ACTION_SIZE);
+ /* check if no more space */
+ if (j > max_actions) {
+ *new_size = num_actions;
+ *nope_location = 0;
+ return;
+ }
+
+ prev_src_field = src_field;
+ prev_dst_field = dst_field;
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.h
new file mode 100644
index 000000000000..27ca93385b08
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_PAT_ARG_H_
+#define MLX5HWS_PAT_ARG_H_
+
+/* Modify-header arg pool */
+enum mlx5hws_arg_chunk_size {
+ MLX5HWS_ARG_CHUNK_SIZE_1,
+ /* Keep MIN updated when changing */
+ MLX5HWS_ARG_CHUNK_SIZE_MIN = MLX5HWS_ARG_CHUNK_SIZE_1,
+ MLX5HWS_ARG_CHUNK_SIZE_2,
+ MLX5HWS_ARG_CHUNK_SIZE_3,
+ MLX5HWS_ARG_CHUNK_SIZE_4,
+ MLX5HWS_ARG_CHUNK_SIZE_MAX,
+};
+
+enum {
+ MLX5HWS_MODIFY_ACTION_SIZE = 8,
+ MLX5HWS_ARG_DATA_SIZE = 64,
+};
+
+struct mlx5hws_pattern_cache {
+ struct mutex lock; /* Protect pattern list */
+ struct list_head ptrn_list;
+};
+
+struct mlx5hws_pattern_cache_item {
+ struct {
+ u32 pattern_id;
+ u8 *data;
+ u16 num_of_actions;
+ } mh_data;
+ u32 refcount;
+ struct list_head ptrn_list_node;
+};
+
+enum mlx5hws_arg_chunk_size
+mlx5hws_arg_get_arg_log_size(u16 num_of_actions);
+
+u32 mlx5hws_arg_get_arg_size(u16 num_of_actions);
+
+enum mlx5hws_arg_chunk_size
+mlx5hws_arg_data_size_to_arg_log_size(u16 data_size);
+
+u32 mlx5hws_arg_data_size_to_arg_size(u16 data_size);
+
+int mlx5hws_pat_init_pattern_cache(struct mlx5hws_pattern_cache **cache);
+
+void mlx5hws_pat_uninit_pattern_cache(struct mlx5hws_pattern_cache *cache);
+
+bool mlx5hws_pat_verify_actions(struct mlx5hws_context *ctx, __be64 pattern[], size_t sz);
+
+int mlx5hws_arg_create(struct mlx5hws_context *ctx,
+ u8 *data,
+ size_t data_sz,
+ u32 log_bulk_sz,
+ bool write_data,
+ u32 *arg_id);
+
+void mlx5hws_arg_destroy(struct mlx5hws_context *ctx, u32 arg_id);
+
+int mlx5hws_arg_create_modify_header_arg(struct mlx5hws_context *ctx,
+ __be64 *data,
+ u8 num_of_actions,
+ u32 log_bulk_sz,
+ bool write_data,
+ u32 *modify_hdr_arg_id);
+
+int mlx5hws_pat_get_pattern(struct mlx5hws_context *ctx,
+ __be64 *pattern,
+ size_t pattern_sz,
+ u32 *ptrn_id);
+
+void mlx5hws_pat_put_pattern(struct mlx5hws_context *ctx,
+ u32 ptrn_id);
+
+bool mlx5hws_arg_is_valid_arg_request_size(struct mlx5hws_context *ctx,
+ u32 arg_size);
+
+bool mlx5hws_pat_require_reparse(__be64 *actions, u16 num_of_actions);
+
+void mlx5hws_arg_write(struct mlx5hws_send_engine *queue,
+ void *comp_data,
+ u32 arg_idx,
+ u8 *arg_data,
+ size_t data_size);
+
+void mlx5hws_arg_decapl3_write(struct mlx5hws_send_engine *queue,
+ u32 arg_idx,
+ u8 *arg_data,
+ u16 num_of_actions);
+
+int mlx5hws_arg_write_inline_arg_data(struct mlx5hws_context *ctx,
+ u32 arg_idx,
+ u8 *arg_data,
+ size_t data_size);
+
+void mlx5hws_pat_calc_nope(__be64 *pattern, size_t num_actions, size_t max_actions,
+ size_t *new_size, u32 *nope_location, __be64 *new_pat);
+#endif /* MLX5HWS_PAT_ARG_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.c
new file mode 100644
index 000000000000..a8a63e3278be
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.c
@@ -0,0 +1,640 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+#include "mlx5hws_buddy.h"
+
+static void hws_pool_free_one_resource(struct mlx5hws_pool_resource *resource)
+{
+ switch (resource->pool->type) {
+ case MLX5HWS_POOL_TYPE_STE:
+ mlx5hws_cmd_ste_destroy(resource->pool->ctx->mdev, resource->base_id);
+ break;
+ case MLX5HWS_POOL_TYPE_STC:
+ mlx5hws_cmd_stc_destroy(resource->pool->ctx->mdev, resource->base_id);
+ break;
+ default:
+ break;
+ }
+
+ kfree(resource);
+}
+
+static void hws_pool_resource_free(struct mlx5hws_pool *pool,
+ int resource_idx)
+{
+ hws_pool_free_one_resource(pool->resource[resource_idx]);
+ pool->resource[resource_idx] = NULL;
+
+ if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) {
+ hws_pool_free_one_resource(pool->mirror_resource[resource_idx]);
+ pool->mirror_resource[resource_idx] = NULL;
+ }
+}
+
+static struct mlx5hws_pool_resource *
+hws_pool_create_one_resource(struct mlx5hws_pool *pool, u32 log_range,
+ u32 fw_ft_type)
+{
+ struct mlx5hws_cmd_ste_create_attr ste_attr;
+ struct mlx5hws_cmd_stc_create_attr stc_attr;
+ struct mlx5hws_pool_resource *resource;
+ u32 obj_id = 0;
+ int ret;
+
+ resource = kzalloc(sizeof(*resource), GFP_KERNEL);
+ if (!resource)
+ return NULL;
+
+ switch (pool->type) {
+ case MLX5HWS_POOL_TYPE_STE:
+ ste_attr.log_obj_range = log_range;
+ ste_attr.table_type = fw_ft_type;
+ ret = mlx5hws_cmd_ste_create(pool->ctx->mdev, &ste_attr, &obj_id);
+ break;
+ case MLX5HWS_POOL_TYPE_STC:
+ stc_attr.log_obj_range = log_range;
+ stc_attr.table_type = fw_ft_type;
+ ret = mlx5hws_cmd_stc_create(pool->ctx->mdev, &stc_attr, &obj_id);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret) {
+ mlx5hws_err(pool->ctx, "Failed to allocate resource objects\n");
+ goto free_resource;
+ }
+
+ resource->pool = pool;
+ resource->range = 1 << log_range;
+ resource->base_id = obj_id;
+
+ return resource;
+
+free_resource:
+ kfree(resource);
+ return NULL;
+}
+
+static int
+hws_pool_resource_alloc(struct mlx5hws_pool *pool, u32 log_range, int idx)
+{
+ struct mlx5hws_pool_resource *resource;
+ u32 fw_ft_type, opt_log_range;
+
+ fw_ft_type = mlx5hws_table_get_res_fw_ft_type(pool->tbl_type, false);
+ opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_ORIG ? 0 : log_range;
+ resource = hws_pool_create_one_resource(pool, opt_log_range, fw_ft_type);
+ if (!resource) {
+ mlx5hws_err(pool->ctx, "Failed allocating resource\n");
+ return -EINVAL;
+ }
+
+ pool->resource[idx] = resource;
+
+ if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) {
+ struct mlx5hws_pool_resource *mirror_resource;
+
+ fw_ft_type = mlx5hws_table_get_res_fw_ft_type(pool->tbl_type, true);
+ opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_MIRROR ? 0 : log_range;
+ mirror_resource = hws_pool_create_one_resource(pool, opt_log_range, fw_ft_type);
+ if (!mirror_resource) {
+ mlx5hws_err(pool->ctx, "Failed allocating mirrored resource\n");
+ hws_pool_free_one_resource(resource);
+ pool->resource[idx] = NULL;
+ return -EINVAL;
+ }
+ pool->mirror_resource[idx] = mirror_resource;
+ }
+
+ return 0;
+}
+
+static unsigned long *hws_pool_create_and_init_bitmap(u32 log_range)
+{
+ unsigned long *cur_bmp;
+
+ cur_bmp = bitmap_zalloc(1 << log_range, GFP_KERNEL);
+ if (!cur_bmp)
+ return NULL;
+
+ bitmap_fill(cur_bmp, 1 << log_range);
+
+ return cur_bmp;
+}
+
+static void hws_pool_buddy_db_put_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ struct mlx5hws_buddy_mem *buddy;
+
+ buddy = pool->db.buddy_manager->buddies[chunk->resource_idx];
+ if (!buddy) {
+ mlx5hws_err(pool->ctx, "No such buddy (%d)\n", chunk->resource_idx);
+ return;
+ }
+
+ mlx5hws_buddy_free_mem(buddy, chunk->offset, chunk->order);
+}
+
+static struct mlx5hws_buddy_mem *
+hws_pool_buddy_get_next_buddy(struct mlx5hws_pool *pool, int idx,
+ u32 order, bool *is_new_buddy)
+{
+ static struct mlx5hws_buddy_mem *buddy;
+ u32 new_buddy_size;
+
+ buddy = pool->db.buddy_manager->buddies[idx];
+ if (buddy)
+ return buddy;
+
+ new_buddy_size = max(pool->alloc_log_sz, order);
+ *is_new_buddy = true;
+ buddy = mlx5hws_buddy_create(new_buddy_size);
+ if (!buddy) {
+ mlx5hws_err(pool->ctx, "Failed to create buddy order: %d index: %d\n",
+ new_buddy_size, idx);
+ return NULL;
+ }
+
+ if (hws_pool_resource_alloc(pool, new_buddy_size, idx) != 0) {
+ mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n",
+ pool->type, new_buddy_size, idx);
+ mlx5hws_buddy_cleanup(buddy);
+ return NULL;
+ }
+
+ pool->db.buddy_manager->buddies[idx] = buddy;
+
+ return buddy;
+}
+
+static int hws_pool_buddy_get_mem_chunk(struct mlx5hws_pool *pool,
+ int order,
+ u32 *buddy_idx,
+ int *seg)
+{
+ struct mlx5hws_buddy_mem *buddy;
+ bool new_mem = false;
+ int ret = 0;
+ int i;
+
+ *seg = -1;
+
+ /* Find the next free place from the buddy array */
+ while (*seg == -1) {
+ for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
+ buddy = hws_pool_buddy_get_next_buddy(pool, i,
+ order,
+ &new_mem);
+ if (!buddy) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ *seg = mlx5hws_buddy_alloc_mem(buddy, order);
+ if (*seg != -1)
+ goto found;
+
+ if (pool->flags & MLX5HWS_POOL_FLAGS_ONE_RESOURCE) {
+ mlx5hws_err(pool->ctx,
+ "Fail to allocate seg for one resource pool\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (new_mem) {
+ /* We have new memory pool, should be place for us */
+ mlx5hws_err(pool->ctx,
+ "No memory for order: %d with buddy no: %d\n",
+ order, i);
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+ }
+
+found:
+ *buddy_idx = i;
+out:
+ return ret;
+}
+
+static int hws_pool_buddy_db_get_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ int ret = 0;
+
+ /* Go over the buddies and find next free slot */
+ ret = hws_pool_buddy_get_mem_chunk(pool, chunk->order,
+ &chunk->resource_idx,
+ &chunk->offset);
+ if (ret)
+ mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n",
+ chunk->order);
+
+ return ret;
+}
+
+static void hws_pool_buddy_db_uninit(struct mlx5hws_pool *pool)
+{
+ struct mlx5hws_buddy_mem *buddy;
+ int i;
+
+ for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
+ buddy = pool->db.buddy_manager->buddies[i];
+ if (buddy) {
+ mlx5hws_buddy_cleanup(buddy);
+ kfree(buddy);
+ pool->db.buddy_manager->buddies[i] = NULL;
+ }
+ }
+
+ kfree(pool->db.buddy_manager);
+}
+
+static int hws_pool_buddy_db_init(struct mlx5hws_pool *pool, u32 log_range)
+{
+ pool->db.buddy_manager = kzalloc(sizeof(*pool->db.buddy_manager), GFP_KERNEL);
+ if (!pool->db.buddy_manager)
+ return -ENOMEM;
+
+ if (pool->flags & MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE) {
+ bool new_buddy;
+
+ if (!hws_pool_buddy_get_next_buddy(pool, 0, log_range, &new_buddy)) {
+ mlx5hws_err(pool->ctx,
+ "Failed allocating memory on create log_sz: %d\n", log_range);
+ kfree(pool->db.buddy_manager);
+ return -ENOMEM;
+ }
+ }
+
+ pool->p_db_uninit = &hws_pool_buddy_db_uninit;
+ pool->p_get_chunk = &hws_pool_buddy_db_get_chunk;
+ pool->p_put_chunk = &hws_pool_buddy_db_put_chunk;
+
+ return 0;
+}
+
+static int hws_pool_create_resource_on_index(struct mlx5hws_pool *pool,
+ u32 alloc_size, int idx)
+{
+ int ret = hws_pool_resource_alloc(pool, alloc_size, idx);
+
+ if (ret) {
+ mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n",
+ pool->type, alloc_size, idx);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct mlx5hws_pool_elements *
+hws_pool_element_create_new_elem(struct mlx5hws_pool *pool, u32 order, int idx)
+{
+ struct mlx5hws_pool_elements *elem;
+ u32 alloc_size;
+
+ alloc_size = pool->alloc_log_sz;
+
+ elem = kzalloc(sizeof(*elem), GFP_KERNEL);
+ if (!elem)
+ return NULL;
+
+ /* Sharing the same resource, also means that all the elements are with size 1 */
+ if ((pool->flags & MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS) &&
+ !(pool->flags & MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK)) {
+ /* Currently all chunks in size 1 */
+ elem->bitmap = hws_pool_create_and_init_bitmap(alloc_size - order);
+ if (!elem->bitmap) {
+ mlx5hws_err(pool->ctx,
+ "Failed to create bitmap type: %d: size %d index: %d\n",
+ pool->type, alloc_size, idx);
+ goto free_elem;
+ }
+
+ elem->log_size = alloc_size - order;
+ }
+
+ if (hws_pool_create_resource_on_index(pool, alloc_size, idx)) {
+ mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n",
+ pool->type, alloc_size, idx);
+ goto free_db;
+ }
+
+ pool->db.element_manager->elements[idx] = elem;
+
+ return elem;
+
+free_db:
+ bitmap_free(elem->bitmap);
+free_elem:
+ kfree(elem);
+ return NULL;
+}
+
+static int hws_pool_element_find_seg(struct mlx5hws_pool_elements *elem, int *seg)
+{
+ unsigned int segment, size;
+
+ size = 1 << elem->log_size;
+
+ segment = find_first_bit(elem->bitmap, size);
+ if (segment >= size) {
+ elem->is_full = true;
+ return -ENOMEM;
+ }
+
+ bitmap_clear(elem->bitmap, segment, 1);
+ *seg = segment;
+ return 0;
+}
+
+static int
+hws_pool_onesize_element_get_mem_chunk(struct mlx5hws_pool *pool, u32 order,
+ u32 *idx, int *seg)
+{
+ struct mlx5hws_pool_elements *elem;
+
+ elem = pool->db.element_manager->elements[0];
+ if (!elem)
+ elem = hws_pool_element_create_new_elem(pool, order, 0);
+ if (!elem)
+ goto err_no_elem;
+
+ if (hws_pool_element_find_seg(elem, seg) != 0) {
+ mlx5hws_err(pool->ctx, "No more resources (last request order: %d)\n", order);
+ return -ENOMEM;
+ }
+
+ *idx = 0;
+ elem->num_of_elements++;
+ return 0;
+
+err_no_elem:
+ mlx5hws_err(pool->ctx, "Failed to allocate element for order: %d\n", order);
+ return -ENOMEM;
+}
+
+static int
+hws_pool_general_element_get_mem_chunk(struct mlx5hws_pool *pool, u32 order,
+ u32 *idx, int *seg)
+{
+ int ret, i;
+
+ for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
+ if (!pool->resource[i]) {
+ ret = hws_pool_create_resource_on_index(pool, order, i);
+ if (ret)
+ goto err_no_res;
+ *idx = i;
+ *seg = 0; /* One memory slot in that element */
+ return 0;
+ }
+ }
+
+ mlx5hws_err(pool->ctx, "No more resources (last request order: %d)\n", order);
+ return -ENOMEM;
+
+err_no_res:
+ mlx5hws_err(pool->ctx, "Failed to allocate element for order: %d\n", order);
+ return -ENOMEM;
+}
+
+static int hws_pool_general_element_db_get_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ int ret;
+
+ /* Go over all memory elements and find/allocate free slot */
+ ret = hws_pool_general_element_get_mem_chunk(pool, chunk->order,
+ &chunk->resource_idx,
+ &chunk->offset);
+ if (ret)
+ mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n",
+ chunk->order);
+
+ return ret;
+}
+
+static void hws_pool_general_element_db_put_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ if (unlikely(!pool->resource[chunk->resource_idx]))
+ pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx);
+
+ if (pool->flags & MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE)
+ hws_pool_resource_free(pool, chunk->resource_idx);
+}
+
+static void hws_pool_general_element_db_uninit(struct mlx5hws_pool *pool)
+{
+ (void)pool;
+}
+
+/* This memory management works as the following:
+ * - At start doesn't allocate no mem at all.
+ * - When new request for chunk arrived:
+ * allocate resource and give it.
+ * - When free that chunk:
+ * the resource is freed.
+ */
+static int hws_pool_general_element_db_init(struct mlx5hws_pool *pool)
+{
+ pool->p_db_uninit = &hws_pool_general_element_db_uninit;
+ pool->p_get_chunk = &hws_pool_general_element_db_get_chunk;
+ pool->p_put_chunk = &hws_pool_general_element_db_put_chunk;
+
+ return 0;
+}
+
+static void hws_onesize_element_db_destroy_element(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_elements *elem,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ if (unlikely(!pool->resource[chunk->resource_idx]))
+ pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx);
+
+ hws_pool_resource_free(pool, chunk->resource_idx);
+ kfree(elem);
+ pool->db.element_manager->elements[chunk->resource_idx] = NULL;
+}
+
+static void hws_onesize_element_db_put_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ struct mlx5hws_pool_elements *elem;
+
+ if (unlikely(chunk->resource_idx))
+ pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx);
+
+ elem = pool->db.element_manager->elements[chunk->resource_idx];
+ if (!elem) {
+ mlx5hws_err(pool->ctx, "No such element (%d)\n", chunk->resource_idx);
+ return;
+ }
+
+ bitmap_set(elem->bitmap, chunk->offset, 1);
+ elem->is_full = false;
+ elem->num_of_elements--;
+
+ if (pool->flags & MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE &&
+ !elem->num_of_elements)
+ hws_onesize_element_db_destroy_element(pool, elem, chunk);
+}
+
+static int hws_onesize_element_db_get_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ int ret = 0;
+
+ /* Go over all memory elements and find/allocate free slot */
+ ret = hws_pool_onesize_element_get_mem_chunk(pool, chunk->order,
+ &chunk->resource_idx,
+ &chunk->offset);
+ if (ret)
+ mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n",
+ chunk->order);
+
+ return ret;
+}
+
+static void hws_onesize_element_db_uninit(struct mlx5hws_pool *pool)
+{
+ struct mlx5hws_pool_elements *elem;
+ int i;
+
+ for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
+ elem = pool->db.element_manager->elements[i];
+ if (elem) {
+ bitmap_free(elem->bitmap);
+ kfree(elem);
+ pool->db.element_manager->elements[i] = NULL;
+ }
+ }
+ kfree(pool->db.element_manager);
+}
+
+/* This memory management works as the following:
+ * - At start doesn't allocate no mem at all.
+ * - When new request for chunk arrived:
+ * aloocate the first and only slot of memory/resource
+ * when it ended return error.
+ */
+static int hws_pool_onesize_element_db_init(struct mlx5hws_pool *pool)
+{
+ pool->db.element_manager = kzalloc(sizeof(*pool->db.element_manager), GFP_KERNEL);
+ if (!pool->db.element_manager)
+ return -ENOMEM;
+
+ pool->p_db_uninit = &hws_onesize_element_db_uninit;
+ pool->p_get_chunk = &hws_onesize_element_db_get_chunk;
+ pool->p_put_chunk = &hws_onesize_element_db_put_chunk;
+
+ return 0;
+}
+
+static int hws_pool_db_init(struct mlx5hws_pool *pool,
+ enum mlx5hws_db_type db_type)
+{
+ int ret;
+
+ if (db_type == MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE)
+ ret = hws_pool_general_element_db_init(pool);
+ else if (db_type == MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE)
+ ret = hws_pool_onesize_element_db_init(pool);
+ else
+ ret = hws_pool_buddy_db_init(pool, pool->alloc_log_sz);
+
+ if (ret) {
+ mlx5hws_err(pool->ctx, "Failed to init general db : %d (ret: %d)\n", db_type, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hws_pool_db_unint(struct mlx5hws_pool *pool)
+{
+ pool->p_db_uninit(pool);
+}
+
+int mlx5hws_pool_chunk_alloc(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ int ret;
+
+ mutex_lock(&pool->lock);
+ ret = pool->p_get_chunk(pool, chunk);
+ mutex_unlock(&pool->lock);
+
+ return ret;
+}
+
+void mlx5hws_pool_chunk_free(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ mutex_lock(&pool->lock);
+ pool->p_put_chunk(pool, chunk);
+ mutex_unlock(&pool->lock);
+}
+
+struct mlx5hws_pool *
+mlx5hws_pool_create(struct mlx5hws_context *ctx, struct mlx5hws_pool_attr *pool_attr)
+{
+ enum mlx5hws_db_type res_db_type;
+ struct mlx5hws_pool *pool;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return NULL;
+
+ pool->ctx = ctx;
+ pool->type = pool_attr->pool_type;
+ pool->alloc_log_sz = pool_attr->alloc_log_sz;
+ pool->flags = pool_attr->flags;
+ pool->tbl_type = pool_attr->table_type;
+ pool->opt_type = pool_attr->opt_type;
+
+ /* Support general db */
+ if (pool->flags == (MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE |
+ MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK))
+ res_db_type = MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE;
+ else if (pool->flags == (MLX5HWS_POOL_FLAGS_ONE_RESOURCE |
+ MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS))
+ res_db_type = MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE;
+ else
+ res_db_type = MLX5HWS_POOL_DB_TYPE_BUDDY;
+
+ pool->alloc_log_sz = pool_attr->alloc_log_sz;
+
+ if (hws_pool_db_init(pool, res_db_type))
+ goto free_pool;
+
+ mutex_init(&pool->lock);
+
+ return pool;
+
+free_pool:
+ kfree(pool);
+ return NULL;
+}
+
+int mlx5hws_pool_destroy(struct mlx5hws_pool *pool)
+{
+ int i;
+
+ mutex_destroy(&pool->lock);
+
+ for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++)
+ if (pool->resource[i])
+ hws_pool_resource_free(pool, i);
+
+ hws_pool_db_unint(pool);
+
+ kfree(pool);
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.h
new file mode 100644
index 000000000000..621298b352b2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_POOL_H_
+#define MLX5HWS_POOL_H_
+
+#define MLX5HWS_POOL_STC_LOG_SZ 15
+
+#define MLX5HWS_POOL_RESOURCE_ARR_SZ 100
+
+enum mlx5hws_pool_type {
+ MLX5HWS_POOL_TYPE_STE,
+ MLX5HWS_POOL_TYPE_STC,
+};
+
+struct mlx5hws_pool_chunk {
+ u32 resource_idx;
+ /* Internal offset, relative to base index */
+ int offset;
+ int order;
+};
+
+struct mlx5hws_pool_resource {
+ struct mlx5hws_pool *pool;
+ u32 base_id;
+ u32 range;
+};
+
+enum mlx5hws_pool_flags {
+ /* Only a one resource in that pool */
+ MLX5HWS_POOL_FLAGS_ONE_RESOURCE = 1 << 0,
+ MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE = 1 << 1,
+ /* No sharing resources between chunks */
+ MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK = 1 << 2,
+ /* All objects are in the same size */
+ MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS = 1 << 3,
+ /* Managed by buddy allocator */
+ MLX5HWS_POOL_FLAGS_BUDDY_MANAGED = 1 << 4,
+ /* Allocate pool_type memory on pool creation */
+ MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE = 1 << 5,
+
+ /* These values should be used by the caller */
+ MLX5HWS_POOL_FLAGS_FOR_STC_POOL =
+ MLX5HWS_POOL_FLAGS_ONE_RESOURCE |
+ MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS,
+ MLX5HWS_POOL_FLAGS_FOR_MATCHER_STE_POOL =
+ MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE |
+ MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK,
+ MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL =
+ MLX5HWS_POOL_FLAGS_ONE_RESOURCE |
+ MLX5HWS_POOL_FLAGS_BUDDY_MANAGED |
+ MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE,
+};
+
+enum mlx5hws_pool_optimize {
+ MLX5HWS_POOL_OPTIMIZE_NONE = 0x0,
+ MLX5HWS_POOL_OPTIMIZE_ORIG = 0x1,
+ MLX5HWS_POOL_OPTIMIZE_MIRROR = 0x2,
+};
+
+struct mlx5hws_pool_attr {
+ enum mlx5hws_pool_type pool_type;
+ enum mlx5hws_table_type table_type;
+ enum mlx5hws_pool_flags flags;
+ enum mlx5hws_pool_optimize opt_type;
+ /* Allocation size once memory is depleted */
+ size_t alloc_log_sz;
+};
+
+enum mlx5hws_db_type {
+ /* Uses for allocating chunk of big memory, each element has its own resource in the FW*/
+ MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE,
+ /* One resource only, all the elements are with same one size */
+ MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE,
+ /* Many resources, the memory allocated with buddy mechanism */
+ MLX5HWS_POOL_DB_TYPE_BUDDY,
+};
+
+struct mlx5hws_buddy_manager {
+ struct mlx5hws_buddy_mem *buddies[MLX5HWS_POOL_RESOURCE_ARR_SZ];
+};
+
+struct mlx5hws_pool_elements {
+ u32 num_of_elements;
+ unsigned long *bitmap;
+ u32 log_size;
+ bool is_full;
+};
+
+struct mlx5hws_element_manager {
+ struct mlx5hws_pool_elements *elements[MLX5HWS_POOL_RESOURCE_ARR_SZ];
+};
+
+struct mlx5hws_pool_db {
+ enum mlx5hws_db_type type;
+ union {
+ struct mlx5hws_element_manager *element_manager;
+ struct mlx5hws_buddy_manager *buddy_manager;
+ };
+};
+
+typedef int (*mlx5hws_pool_db_get_chunk)(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk);
+typedef void (*mlx5hws_pool_db_put_chunk)(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk);
+typedef void (*mlx5hws_pool_unint_db)(struct mlx5hws_pool *pool);
+
+struct mlx5hws_pool {
+ struct mlx5hws_context *ctx;
+ enum mlx5hws_pool_type type;
+ enum mlx5hws_pool_flags flags;
+ struct mutex lock; /* protect the pool */
+ size_t alloc_log_sz;
+ enum mlx5hws_table_type tbl_type;
+ enum mlx5hws_pool_optimize opt_type;
+ struct mlx5hws_pool_resource *resource[MLX5HWS_POOL_RESOURCE_ARR_SZ];
+ struct mlx5hws_pool_resource *mirror_resource[MLX5HWS_POOL_RESOURCE_ARR_SZ];
+ /* DB */
+ struct mlx5hws_pool_db db;
+ /* Functions */
+ mlx5hws_pool_unint_db p_db_uninit;
+ mlx5hws_pool_db_get_chunk p_get_chunk;
+ mlx5hws_pool_db_put_chunk p_put_chunk;
+};
+
+struct mlx5hws_pool *
+mlx5hws_pool_create(struct mlx5hws_context *ctx,
+ struct mlx5hws_pool_attr *pool_attr);
+
+int mlx5hws_pool_destroy(struct mlx5hws_pool *pool);
+
+int mlx5hws_pool_chunk_alloc(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk);
+
+void mlx5hws_pool_chunk_free(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk);
+
+static inline u32
+mlx5hws_pool_chunk_get_base_id(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ return pool->resource[chunk->resource_idx]->base_id;
+}
+
+static inline u32
+mlx5hws_pool_chunk_get_base_mirror_id(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ return pool->mirror_resource[chunk->resource_idx]->base_id;
+}
+#endif /* MLX5HWS_POOL_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_prm.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_prm.h
new file mode 100644
index 000000000000..de92cecbeb92
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_prm.h
@@ -0,0 +1,514 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5_PRM_H_
+#define MLX5_PRM_H_
+
+#define MLX5_MAX_ACTIONS_DATA_IN_HEADER_MODIFY 512
+
+/* Action type of header modification. */
+enum {
+ MLX5_MODIFICATION_TYPE_SET = 0x1,
+ MLX5_MODIFICATION_TYPE_ADD = 0x2,
+ MLX5_MODIFICATION_TYPE_COPY = 0x3,
+ MLX5_MODIFICATION_TYPE_INSERT = 0x4,
+ MLX5_MODIFICATION_TYPE_REMOVE = 0x5,
+ MLX5_MODIFICATION_TYPE_NOP = 0x6,
+ MLX5_MODIFICATION_TYPE_REMOVE_WORDS = 0x7,
+ MLX5_MODIFICATION_TYPE_ADD_FIELD = 0x8,
+ MLX5_MODIFICATION_TYPE_MAX,
+};
+
+/* The field of packet to be modified. */
+enum mlx5_modification_field {
+ MLX5_MODI_OUT_NONE = -1,
+ MLX5_MODI_OUT_SMAC_47_16 = 1,
+ MLX5_MODI_OUT_SMAC_15_0,
+ MLX5_MODI_OUT_ETHERTYPE,
+ MLX5_MODI_OUT_DMAC_47_16,
+ MLX5_MODI_OUT_DMAC_15_0,
+ MLX5_MODI_OUT_IP_DSCP,
+ MLX5_MODI_OUT_TCP_FLAGS,
+ MLX5_MODI_OUT_TCP_SPORT,
+ MLX5_MODI_OUT_TCP_DPORT,
+ MLX5_MODI_OUT_IPV4_TTL,
+ MLX5_MODI_OUT_UDP_SPORT,
+ MLX5_MODI_OUT_UDP_DPORT,
+ MLX5_MODI_OUT_SIPV6_127_96,
+ MLX5_MODI_OUT_SIPV6_95_64,
+ MLX5_MODI_OUT_SIPV6_63_32,
+ MLX5_MODI_OUT_SIPV6_31_0,
+ MLX5_MODI_OUT_DIPV6_127_96,
+ MLX5_MODI_OUT_DIPV6_95_64,
+ MLX5_MODI_OUT_DIPV6_63_32,
+ MLX5_MODI_OUT_DIPV6_31_0,
+ MLX5_MODI_OUT_SIPV4,
+ MLX5_MODI_OUT_DIPV4,
+ MLX5_MODI_OUT_FIRST_VID,
+ MLX5_MODI_IN_SMAC_47_16 = 0x31,
+ MLX5_MODI_IN_SMAC_15_0,
+ MLX5_MODI_IN_ETHERTYPE,
+ MLX5_MODI_IN_DMAC_47_16,
+ MLX5_MODI_IN_DMAC_15_0,
+ MLX5_MODI_IN_IP_DSCP,
+ MLX5_MODI_IN_TCP_FLAGS,
+ MLX5_MODI_IN_TCP_SPORT,
+ MLX5_MODI_IN_TCP_DPORT,
+ MLX5_MODI_IN_IPV4_TTL,
+ MLX5_MODI_IN_UDP_SPORT,
+ MLX5_MODI_IN_UDP_DPORT,
+ MLX5_MODI_IN_SIPV6_127_96,
+ MLX5_MODI_IN_SIPV6_95_64,
+ MLX5_MODI_IN_SIPV6_63_32,
+ MLX5_MODI_IN_SIPV6_31_0,
+ MLX5_MODI_IN_DIPV6_127_96,
+ MLX5_MODI_IN_DIPV6_95_64,
+ MLX5_MODI_IN_DIPV6_63_32,
+ MLX5_MODI_IN_DIPV6_31_0,
+ MLX5_MODI_IN_SIPV4,
+ MLX5_MODI_IN_DIPV4,
+ MLX5_MODI_OUT_IPV6_HOPLIMIT,
+ MLX5_MODI_IN_IPV6_HOPLIMIT,
+ MLX5_MODI_META_DATA_REG_A,
+ MLX5_MODI_META_DATA_REG_B = 0x50,
+ MLX5_MODI_META_REG_C_0,
+ MLX5_MODI_META_REG_C_1,
+ MLX5_MODI_META_REG_C_2,
+ MLX5_MODI_META_REG_C_3,
+ MLX5_MODI_META_REG_C_4,
+ MLX5_MODI_META_REG_C_5,
+ MLX5_MODI_META_REG_C_6,
+ MLX5_MODI_META_REG_C_7,
+ MLX5_MODI_OUT_TCP_SEQ_NUM,
+ MLX5_MODI_IN_TCP_SEQ_NUM,
+ MLX5_MODI_OUT_TCP_ACK_NUM,
+ MLX5_MODI_IN_TCP_ACK_NUM = 0x5C,
+ MLX5_MODI_GTP_TEID = 0x6E,
+ MLX5_MODI_OUT_IP_ECN = 0x73,
+ MLX5_MODI_TUNNEL_HDR_DW_1 = 0x75,
+ MLX5_MODI_GTPU_FIRST_EXT_DW_0 = 0x76,
+ MLX5_MODI_HASH_RESULT = 0x81,
+ MLX5_MODI_IN_MPLS_LABEL_0 = 0x8a,
+ MLX5_MODI_IN_MPLS_LABEL_1,
+ MLX5_MODI_IN_MPLS_LABEL_2,
+ MLX5_MODI_IN_MPLS_LABEL_3,
+ MLX5_MODI_IN_MPLS_LABEL_4,
+ MLX5_MODI_OUT_IP_PROTOCOL = 0x4A,
+ MLX5_MODI_OUT_IPV6_NEXT_HDR = 0x4A,
+ MLX5_MODI_META_REG_C_8 = 0x8F,
+ MLX5_MODI_META_REG_C_9 = 0x90,
+ MLX5_MODI_META_REG_C_10 = 0x91,
+ MLX5_MODI_META_REG_C_11 = 0x92,
+ MLX5_MODI_META_REG_C_12 = 0x93,
+ MLX5_MODI_META_REG_C_13 = 0x94,
+ MLX5_MODI_META_REG_C_14 = 0x95,
+ MLX5_MODI_META_REG_C_15 = 0x96,
+ MLX5_MODI_OUT_IPV4_TOTAL_LEN = 0x11D,
+ MLX5_MODI_OUT_IPV6_PAYLOAD_LEN = 0x11E,
+ MLX5_MODI_OUT_IPV4_IHL = 0x11F,
+ MLX5_MODI_OUT_TCP_DATA_OFFSET = 0x120,
+ MLX5_MODI_OUT_ESP_SPI = 0x5E,
+ MLX5_MODI_OUT_ESP_SEQ_NUM = 0x82,
+ MLX5_MODI_OUT_IPSEC_NEXT_HDR = 0x126,
+ MLX5_MODI_INVALID = INT_MAX,
+};
+
+enum {
+ MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE = 0x7 << 1,
+ MLX5_GET_HCA_CAP_OP_MOD_ESW_FLOW_TABLE = 0x8 << 1,
+ MLX5_SET_HCA_CAP_OP_MOD_ESW = 0x9 << 1,
+ MLX5_GET_HCA_CAP_OP_MOD_WQE_BASED_FLOW_TABLE = 0x1B << 1,
+ MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 = 0x20 << 1,
+};
+
+enum mlx5_ifc_rtc_update_mode {
+ MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH = 0x0,
+ MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET = 0x1,
+};
+
+enum mlx5_ifc_rtc_access_mode {
+ MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH = 0x0,
+ MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR = 0x1,
+};
+
+enum mlx5_ifc_rtc_ste_format {
+ MLX5_IFC_RTC_STE_FORMAT_8DW = 0x4,
+ MLX5_IFC_RTC_STE_FORMAT_11DW = 0x5,
+ MLX5_IFC_RTC_STE_FORMAT_RANGE = 0x7,
+};
+
+enum mlx5_ifc_rtc_reparse_mode {
+ MLX5_IFC_RTC_REPARSE_NEVER = 0x0,
+ MLX5_IFC_RTC_REPARSE_ALWAYS = 0x1,
+ MLX5_IFC_RTC_REPARSE_BY_STC = 0x2,
+};
+
+#define MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX 16
+
+struct mlx5_ifc_rtc_bits {
+ u8 modify_field_select[0x40];
+ u8 reserved_at_40[0x40];
+ u8 update_index_mode[0x2];
+ u8 reparse_mode[0x2];
+ u8 num_match_ste[0x4];
+ u8 pd[0x18];
+ u8 reserved_at_a0[0x9];
+ u8 access_index_mode[0x3];
+ u8 num_hash_definer[0x4];
+ u8 update_method[0x1];
+ u8 reserved_at_b1[0x2];
+ u8 log_depth[0x5];
+ u8 log_hash_size[0x8];
+ u8 ste_format_0[0x8];
+ u8 table_type[0x8];
+ u8 ste_format_1[0x8];
+ u8 reserved_at_d8[0x8];
+ u8 match_definer_0[0x20];
+ u8 stc_id[0x20];
+ u8 ste_table_base_id[0x20];
+ u8 ste_table_offset[0x20];
+ u8 reserved_at_160[0x8];
+ u8 miss_flow_table_id[0x18];
+ u8 match_definer_1[0x20];
+ u8 reserved_at_1a0[0x260];
+};
+
+enum mlx5_ifc_stc_action_type {
+ MLX5_IFC_STC_ACTION_TYPE_NOP = 0x00,
+ MLX5_IFC_STC_ACTION_TYPE_COPY = 0x05,
+ MLX5_IFC_STC_ACTION_TYPE_SET = 0x06,
+ MLX5_IFC_STC_ACTION_TYPE_ADD = 0x07,
+ MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS = 0x08,
+ MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE = 0x09,
+ MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT = 0x0b,
+ MLX5_IFC_STC_ACTION_TYPE_TAG = 0x0c,
+ MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST = 0x0e,
+ MLX5_IFC_STC_ACTION_TYPE_CRYPTO_IPSEC_ENCRYPTION = 0x10,
+ MLX5_IFC_STC_ACTION_TYPE_CRYPTO_IPSEC_DECRYPTION = 0x11,
+ MLX5_IFC_STC_ACTION_TYPE_ASO = 0x12,
+ MLX5_IFC_STC_ACTION_TYPE_TRAILER = 0x13,
+ MLX5_IFC_STC_ACTION_TYPE_COUNTER = 0x14,
+ MLX5_IFC_STC_ACTION_TYPE_ADD_FIELD = 0x1b,
+ MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE = 0x80,
+ MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_TIR = 0x81,
+ MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT = 0x82,
+ MLX5_IFC_STC_ACTION_TYPE_DROP = 0x83,
+ MLX5_IFC_STC_ACTION_TYPE_ALLOW = 0x84,
+ MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT = 0x85,
+ MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK = 0x86,
+};
+
+enum mlx5_ifc_stc_reparse_mode {
+ MLX5_IFC_STC_REPARSE_IGNORE = 0x0,
+ MLX5_IFC_STC_REPARSE_NEVER = 0x1,
+ MLX5_IFC_STC_REPARSE_ALWAYS = 0x2,
+};
+
+struct mlx5_ifc_stc_ste_param_ste_table_bits {
+ u8 ste_obj_id[0x20];
+ u8 match_definer_id[0x20];
+ u8 reserved_at_40[0x3];
+ u8 log_hash_size[0x5];
+ u8 reserved_at_48[0x38];
+};
+
+struct mlx5_ifc_stc_ste_param_tir_bits {
+ u8 reserved_at_0[0x8];
+ u8 tirn[0x18];
+ u8 reserved_at_20[0x60];
+};
+
+struct mlx5_ifc_stc_ste_param_table_bits {
+ u8 reserved_at_0[0x8];
+ u8 table_id[0x18];
+ u8 reserved_at_20[0x60];
+};
+
+struct mlx5_ifc_stc_ste_param_flow_counter_bits {
+ u8 flow_counter_id[0x20];
+};
+
+enum {
+ MLX5_ASO_CT_NUM_PER_OBJ = 1,
+ MLX5_ASO_METER_NUM_PER_OBJ = 2,
+ MLX5_ASO_IPSEC_NUM_PER_OBJ = 1,
+ MLX5_ASO_FIRST_HIT_NUM_PER_OBJ = 512,
+};
+
+struct mlx5_ifc_stc_ste_param_execute_aso_bits {
+ u8 aso_object_id[0x20];
+ u8 return_reg_id[0x4];
+ u8 aso_type[0x4];
+ u8 reserved_at_28[0x18];
+};
+
+struct mlx5_ifc_stc_ste_param_ipsec_encrypt_bits {
+ u8 ipsec_object_id[0x20];
+};
+
+struct mlx5_ifc_stc_ste_param_ipsec_decrypt_bits {
+ u8 ipsec_object_id[0x20];
+};
+
+struct mlx5_ifc_stc_ste_param_trailer_bits {
+ u8 reserved_at_0[0x8];
+ u8 command[0x4];
+ u8 reserved_at_c[0x2];
+ u8 type[0x2];
+ u8 reserved_at_10[0xa];
+ u8 length[0x6];
+};
+
+struct mlx5_ifc_stc_ste_param_header_modify_list_bits {
+ u8 header_modify_pattern_id[0x20];
+ u8 header_modify_argument_id[0x20];
+};
+
+enum mlx5_ifc_header_anchors {
+ MLX5_HEADER_ANCHOR_PACKET_START = 0x0,
+ MLX5_HEADER_ANCHOR_MAC = 0x1,
+ MLX5_HEADER_ANCHOR_FIRST_VLAN_START = 0x2,
+ MLX5_HEADER_ANCHOR_IPV6_IPV4 = 0x07,
+ MLX5_HEADER_ANCHOR_ESP = 0x08,
+ MLX5_HEADER_ANCHOR_TCP_UDP = 0x09,
+ MLX5_HEADER_ANCHOR_TUNNEL_HEADER = 0x0a,
+ MLX5_HEADER_ANCHOR_INNER_MAC = 0x13,
+ MLX5_HEADER_ANCHOR_INNER_IPV6_IPV4 = 0x19,
+ MLX5_HEADER_ANCHOR_INNER_TCP_UDP = 0x1a,
+ MLX5_HEADER_ANCHOR_L4_PAYLOAD = 0x1b,
+ MLX5_HEADER_ANCHOR_INNER_L4_PAYLOAD = 0x1c
+};
+
+struct mlx5_ifc_stc_ste_param_remove_bits {
+ u8 action_type[0x4];
+ u8 decap[0x1];
+ u8 reserved_at_5[0x5];
+ u8 remove_start_anchor[0x6];
+ u8 reserved_at_10[0x2];
+ u8 remove_end_anchor[0x6];
+ u8 reserved_at_18[0x8];
+};
+
+struct mlx5_ifc_stc_ste_param_remove_words_bits {
+ u8 action_type[0x4];
+ u8 reserved_at_4[0x6];
+ u8 remove_start_anchor[0x6];
+ u8 reserved_at_10[0x1];
+ u8 remove_offset[0x7];
+ u8 reserved_at_18[0x2];
+ u8 remove_size[0x6];
+};
+
+struct mlx5_ifc_stc_ste_param_insert_bits {
+ u8 action_type[0x4];
+ u8 encap[0x1];
+ u8 inline_data[0x1];
+ u8 reserved_at_6[0x4];
+ u8 insert_anchor[0x6];
+ u8 reserved_at_10[0x1];
+ u8 insert_offset[0x7];
+ u8 reserved_at_18[0x1];
+ u8 insert_size[0x7];
+ u8 insert_argument[0x20];
+};
+
+struct mlx5_ifc_stc_ste_param_vport_bits {
+ u8 eswitch_owner_vhca_id[0x10];
+ u8 vport_number[0x10];
+ u8 eswitch_owner_vhca_id_valid[0x1];
+ u8 reserved_at_21[0x5f];
+};
+
+union mlx5_ifc_stc_param_bits {
+ struct mlx5_ifc_stc_ste_param_ste_table_bits ste_table;
+ struct mlx5_ifc_stc_ste_param_tir_bits tir;
+ struct mlx5_ifc_stc_ste_param_table_bits table;
+ struct mlx5_ifc_stc_ste_param_flow_counter_bits counter;
+ struct mlx5_ifc_stc_ste_param_header_modify_list_bits modify_header;
+ struct mlx5_ifc_stc_ste_param_execute_aso_bits aso;
+ struct mlx5_ifc_stc_ste_param_remove_bits remove_header;
+ struct mlx5_ifc_stc_ste_param_insert_bits insert_header;
+ struct mlx5_ifc_set_action_in_bits add;
+ struct mlx5_ifc_set_action_in_bits set;
+ struct mlx5_ifc_copy_action_in_bits copy;
+ struct mlx5_ifc_stc_ste_param_vport_bits vport;
+ struct mlx5_ifc_stc_ste_param_ipsec_encrypt_bits ipsec_encrypt;
+ struct mlx5_ifc_stc_ste_param_ipsec_decrypt_bits ipsec_decrypt;
+ struct mlx5_ifc_stc_ste_param_trailer_bits trailer;
+ u8 reserved_at_0[0x80];
+};
+
+enum {
+ MLX5_IFC_MODIFY_STC_FIELD_SELECT_NEW_STC = BIT(0),
+};
+
+struct mlx5_ifc_stc_bits {
+ u8 modify_field_select[0x40];
+ u8 reserved_at_40[0x46];
+ u8 reparse_mode[0x2];
+ u8 table_type[0x8];
+ u8 ste_action_offset[0x8];
+ u8 action_type[0x8];
+ u8 reserved_at_a0[0x60];
+ union mlx5_ifc_stc_param_bits stc_param;
+ u8 reserved_at_180[0x280];
+};
+
+struct mlx5_ifc_ste_bits {
+ u8 modify_field_select[0x40];
+ u8 reserved_at_40[0x48];
+ u8 table_type[0x8];
+ u8 reserved_at_90[0x370];
+};
+
+struct mlx5_ifc_definer_bits {
+ u8 modify_field_select[0x40];
+ u8 reserved_at_40[0x50];
+ u8 format_id[0x10];
+ u8 reserved_at_60[0x60];
+ u8 format_select_dw3[0x8];
+ u8 format_select_dw2[0x8];
+ u8 format_select_dw1[0x8];
+ u8 format_select_dw0[0x8];
+ u8 format_select_dw7[0x8];
+ u8 format_select_dw6[0x8];
+ u8 format_select_dw5[0x8];
+ u8 format_select_dw4[0x8];
+ u8 reserved_at_100[0x18];
+ u8 format_select_dw8[0x8];
+ u8 reserved_at_120[0x20];
+ u8 format_select_byte3[0x8];
+ u8 format_select_byte2[0x8];
+ u8 format_select_byte1[0x8];
+ u8 format_select_byte0[0x8];
+ u8 format_select_byte7[0x8];
+ u8 format_select_byte6[0x8];
+ u8 format_select_byte5[0x8];
+ u8 format_select_byte4[0x8];
+ u8 reserved_at_180[0x40];
+ u8 ctrl[0xa0];
+ u8 match_mask[0x160];
+};
+
+struct mlx5_ifc_arg_bits {
+ u8 rsvd0[0x88];
+ u8 access_pd[0x18];
+};
+
+struct mlx5_ifc_header_modify_pattern_in_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x40];
+
+ u8 pattern_length[0x8];
+ u8 reserved_at_88[0x18];
+
+ u8 reserved_at_a0[0x60];
+
+ u8 pattern_data[MLX5_MAX_ACTIONS_DATA_IN_HEADER_MODIFY * 8];
+};
+
+struct mlx5_ifc_create_rtc_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_rtc_bits rtc;
+};
+
+struct mlx5_ifc_create_stc_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_stc_bits stc;
+};
+
+struct mlx5_ifc_create_ste_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_ste_bits ste;
+};
+
+struct mlx5_ifc_create_definer_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_definer_bits definer;
+};
+
+struct mlx5_ifc_create_arg_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_arg_bits arg;
+};
+
+struct mlx5_ifc_create_header_modify_pattern_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_header_modify_pattern_in_bits pattern;
+};
+
+struct mlx5_ifc_generate_wqe_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mode[0x10];
+ u8 reserved_at_40[0x40];
+ u8 reserved_at_80[0x8];
+ u8 pdn[0x18];
+ u8 reserved_at_a0[0x160];
+ u8 wqe_ctrl[0x80];
+ u8 wqe_gta_ctrl[0x180];
+ u8 wqe_gta_data_0[0x200];
+ u8 wqe_gta_data_1[0x200];
+};
+
+struct mlx5_ifc_generate_wqe_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x1c0];
+ u8 cqe_data[0x200];
+};
+
+enum mlx5_access_aso_opc_mod {
+ ASO_OPC_MOD_IPSEC = 0x0,
+ ASO_OPC_MOD_CONNECTION_TRACKING = 0x1,
+ ASO_OPC_MOD_POLICER = 0x2,
+ ASO_OPC_MOD_RACE_AVOIDANCE = 0x3,
+ ASO_OPC_MOD_FLOW_HIT = 0x4,
+};
+
+enum {
+ MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION = BIT(0),
+ MLX5_IFC_MODIFY_FLOW_TABLE_RTC_ID = BIT(1),
+};
+
+enum {
+ MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_DEFAULT = 0,
+ MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL = 1,
+};
+
+struct mlx5_ifc_alloc_packet_reformat_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 packet_reformat_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_dealloc_packet_reformat_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 packet_reformat_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_dealloc_packet_reformat_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+#endif /* MLX5_PRM_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c
new file mode 100644
index 000000000000..c79ee70edf03
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c
@@ -0,0 +1,780 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+static void hws_rule_skip(struct mlx5hws_matcher *matcher,
+ struct mlx5hws_match_template *mt,
+ u32 flow_source,
+ bool *skip_rx, bool *skip_tx)
+{
+ /* By default FDB rules are added to both RX and TX */
+ *skip_rx = false;
+ *skip_tx = false;
+
+ if (flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT) {
+ *skip_rx = true;
+ } else if (flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK) {
+ *skip_tx = true;
+ } else {
+ /* If no flow source was set for current rule,
+ * check for flow source in matcher attributes.
+ */
+ if (matcher->attr.optimize_flow_src) {
+ *skip_tx =
+ matcher->attr.optimize_flow_src == MLX5HWS_MATCHER_FLOW_SRC_WIRE;
+ *skip_rx =
+ matcher->attr.optimize_flow_src == MLX5HWS_MATCHER_FLOW_SRC_VPORT;
+ return;
+ }
+ }
+}
+
+static void
+hws_rule_update_copy_tag(struct mlx5hws_rule *rule,
+ struct mlx5hws_wqe_gta_data_seg_ste *wqe_data,
+ bool is_jumbo)
+{
+ struct mlx5hws_rule_match_tag *tag;
+
+ if (!mlx5hws_matcher_is_resizable(rule->matcher)) {
+ tag = &rule->tag;
+ } else {
+ struct mlx5hws_wqe_gta_data_seg_ste *data_seg =
+ (struct mlx5hws_wqe_gta_data_seg_ste *)(void *)rule->resize_info->data_seg;
+ tag = (struct mlx5hws_rule_match_tag *)(void *)data_seg->action;
+ }
+
+ if (is_jumbo)
+ memcpy(wqe_data->jumbo, tag->jumbo, MLX5HWS_JUMBO_TAG_SZ);
+ else
+ memcpy(wqe_data->tag, tag->match, MLX5HWS_MATCH_TAG_SZ);
+}
+
+static void hws_rule_init_dep_wqe(struct mlx5hws_send_ring_dep_wqe *dep_wqe,
+ struct mlx5hws_rule *rule,
+ struct mlx5hws_match_template *mt,
+ struct mlx5hws_rule_attr *attr)
+{
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_table *tbl = matcher->tbl;
+ bool skip_rx, skip_tx;
+
+ dep_wqe->rule = rule;
+ dep_wqe->user_data = attr->user_data;
+ dep_wqe->direct_index = mlx5hws_matcher_is_insert_by_idx(matcher) ?
+ attr->rule_idx : 0;
+
+ if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
+ hws_rule_skip(matcher, mt, attr->flow_source, &skip_rx, &skip_tx);
+
+ if (!skip_rx) {
+ dep_wqe->rtc_0 = matcher->match_ste.rtc_0_id;
+ dep_wqe->retry_rtc_0 = matcher->col_matcher ?
+ matcher->col_matcher->match_ste.rtc_0_id : 0;
+ } else {
+ dep_wqe->rtc_0 = 0;
+ dep_wqe->retry_rtc_0 = 0;
+ }
+
+ if (!skip_tx) {
+ dep_wqe->rtc_1 = matcher->match_ste.rtc_1_id;
+ dep_wqe->retry_rtc_1 = matcher->col_matcher ?
+ matcher->col_matcher->match_ste.rtc_1_id : 0;
+ } else {
+ dep_wqe->rtc_1 = 0;
+ dep_wqe->retry_rtc_1 = 0;
+ }
+ } else {
+ pr_warn("HWS: invalid tbl->type: %d\n", tbl->type);
+ }
+}
+
+static void hws_rule_move_get_rtc(struct mlx5hws_rule *rule,
+ struct mlx5hws_send_ste_attr *ste_attr)
+{
+ struct mlx5hws_matcher *dst_matcher = rule->matcher->resize_dst;
+
+ if (rule->resize_info->rtc_0) {
+ ste_attr->rtc_0 = dst_matcher->match_ste.rtc_0_id;
+ ste_attr->retry_rtc_0 = dst_matcher->col_matcher ?
+ dst_matcher->col_matcher->match_ste.rtc_0_id : 0;
+ }
+ if (rule->resize_info->rtc_1) {
+ ste_attr->rtc_1 = dst_matcher->match_ste.rtc_1_id;
+ ste_attr->retry_rtc_1 = dst_matcher->col_matcher ?
+ dst_matcher->col_matcher->match_ste.rtc_1_id : 0;
+ }
+}
+
+static void hws_rule_gen_comp(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_rule *rule,
+ bool err,
+ void *user_data,
+ enum mlx5hws_rule_status rule_status_on_succ)
+{
+ enum mlx5hws_flow_op_status comp_status;
+
+ if (!err) {
+ comp_status = MLX5HWS_FLOW_OP_SUCCESS;
+ rule->status = rule_status_on_succ;
+ } else {
+ comp_status = MLX5HWS_FLOW_OP_ERROR;
+ rule->status = MLX5HWS_RULE_STATUS_FAILED;
+ }
+
+ mlx5hws_send_engine_inc_rule(queue);
+ mlx5hws_send_engine_gen_comp(queue, user_data, comp_status);
+}
+
+static void
+hws_rule_save_resize_info(struct mlx5hws_rule *rule,
+ struct mlx5hws_send_ste_attr *ste_attr,
+ bool is_update)
+{
+ if (!mlx5hws_matcher_is_resizable(rule->matcher))
+ return;
+
+ if (likely(!is_update)) {
+ rule->resize_info = kzalloc(sizeof(*rule->resize_info), GFP_KERNEL);
+ if (unlikely(!rule->resize_info)) {
+ pr_warn("HWS: resize info isn't allocated for rule\n");
+ return;
+ }
+
+ rule->resize_info->max_stes =
+ rule->matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes;
+ rule->resize_info->action_ste_pool[0] = rule->matcher->action_ste[0].max_stes ?
+ rule->matcher->action_ste[0].pool :
+ NULL;
+ rule->resize_info->action_ste_pool[1] = rule->matcher->action_ste[1].max_stes ?
+ rule->matcher->action_ste[1].pool :
+ NULL;
+ }
+
+ memcpy(rule->resize_info->ctrl_seg, ste_attr->wqe_ctrl,
+ sizeof(rule->resize_info->ctrl_seg));
+ memcpy(rule->resize_info->data_seg, ste_attr->wqe_data,
+ sizeof(rule->resize_info->data_seg));
+}
+
+void mlx5hws_rule_clear_resize_info(struct mlx5hws_rule *rule)
+{
+ if (mlx5hws_matcher_is_resizable(rule->matcher) &&
+ rule->resize_info) {
+ kfree(rule->resize_info);
+ rule->resize_info = NULL;
+ }
+}
+
+static void
+hws_rule_save_delete_info(struct mlx5hws_rule *rule,
+ struct mlx5hws_send_ste_attr *ste_attr)
+{
+ struct mlx5hws_match_template *mt = rule->matcher->mt;
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(mt);
+
+ if (mlx5hws_matcher_is_resizable(rule->matcher))
+ return;
+
+ if (is_jumbo)
+ memcpy(&rule->tag.jumbo, ste_attr->wqe_data->jumbo, MLX5HWS_JUMBO_TAG_SZ);
+ else
+ memcpy(&rule->tag.match, ste_attr->wqe_data->tag, MLX5HWS_MATCH_TAG_SZ);
+}
+
+static void
+hws_rule_clear_delete_info(struct mlx5hws_rule *rule)
+{
+ /* nothing to do here */
+}
+
+static void
+hws_rule_load_delete_info(struct mlx5hws_rule *rule,
+ struct mlx5hws_send_ste_attr *ste_attr)
+{
+ if (unlikely(!mlx5hws_matcher_is_resizable(rule->matcher))) {
+ ste_attr->wqe_tag = &rule->tag;
+ } else {
+ struct mlx5hws_wqe_gta_data_seg_ste *data_seg =
+ (struct mlx5hws_wqe_gta_data_seg_ste *)(void *)rule->resize_info->data_seg;
+ struct mlx5hws_rule_match_tag *tag =
+ (struct mlx5hws_rule_match_tag *)(void *)data_seg->action;
+ ste_attr->wqe_tag = tag;
+ }
+}
+
+static int hws_rule_alloc_action_ste_idx(struct mlx5hws_rule *rule,
+ u8 action_ste_selector)
+{
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_matcher_action_ste *action_ste;
+ struct mlx5hws_pool_chunk ste = {0};
+ int ret;
+
+ action_ste = &matcher->action_ste[action_ste_selector];
+ ste.order = ilog2(roundup_pow_of_two(action_ste->max_stes));
+ ret = mlx5hws_pool_chunk_alloc(action_ste->pool, &ste);
+ if (unlikely(ret)) {
+ mlx5hws_err(matcher->tbl->ctx,
+ "Failed to allocate STE for rule actions");
+ return ret;
+ }
+ rule->action_ste_idx = ste.offset;
+
+ return 0;
+}
+
+static void hws_rule_free_action_ste_idx(struct mlx5hws_rule *rule,
+ u8 action_ste_selector)
+{
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_pool_chunk ste = {0};
+ struct mlx5hws_pool *pool;
+ u8 max_stes;
+
+ if (mlx5hws_matcher_is_resizable(matcher)) {
+ /* Free the original action pool if rule was resized */
+ max_stes = rule->resize_info->max_stes;
+ pool = rule->resize_info->action_ste_pool[action_ste_selector];
+ } else {
+ max_stes = matcher->action_ste[action_ste_selector].max_stes;
+ pool = matcher->action_ste[action_ste_selector].pool;
+ }
+
+ /* This release is safe only when the rule match part was deleted */
+ ste.order = ilog2(roundup_pow_of_two(max_stes));
+ ste.offset = rule->action_ste_idx;
+
+ mlx5hws_pool_chunk_free(pool, &ste);
+}
+
+static int hws_rule_alloc_action_ste(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ int action_ste_idx;
+ int ret;
+
+ ret = hws_rule_alloc_action_ste_idx(rule, 0);
+ if (unlikely(ret))
+ return ret;
+
+ action_ste_idx = rule->action_ste_idx;
+
+ ret = hws_rule_alloc_action_ste_idx(rule, 1);
+ if (unlikely(ret)) {
+ hws_rule_free_action_ste_idx(rule, 0);
+ return ret;
+ }
+
+ /* Both pools have to return the same index */
+ if (unlikely(rule->action_ste_idx != action_ste_idx)) {
+ pr_warn("HWS: allocation of action STE failed - pool indexes mismatch\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void mlx5hws_rule_free_action_ste(struct mlx5hws_rule *rule)
+{
+ if (rule->action_ste_idx > -1) {
+ hws_rule_free_action_ste_idx(rule, 1);
+ hws_rule_free_action_ste_idx(rule, 0);
+ }
+}
+
+static void hws_rule_create_init(struct mlx5hws_rule *rule,
+ struct mlx5hws_send_ste_attr *ste_attr,
+ struct mlx5hws_actions_apply_data *apply,
+ bool is_update)
+{
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_context *ctx = tbl->ctx;
+
+ /* Init rule before reuse */
+ if (!is_update) {
+ /* In update we use these rtc's */
+ rule->rtc_0 = 0;
+ rule->rtc_1 = 0;
+ rule->action_ste_selector = 0;
+ } else {
+ rule->action_ste_selector = !rule->action_ste_selector;
+ }
+
+ rule->pending_wqes = 0;
+ rule->action_ste_idx = -1;
+ rule->status = MLX5HWS_RULE_STATUS_CREATING;
+
+ /* Init default send STE attributes */
+ ste_attr->gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
+ ste_attr->send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+ ste_attr->send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ ste_attr->send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+
+ /* Init default action apply */
+ apply->tbl_type = tbl->type;
+ apply->common_res = &ctx->common_res[tbl->type];
+ apply->jump_to_action_stc = matcher->action_ste[0].stc.offset;
+ apply->require_dep = 0;
+}
+
+static void hws_rule_move_init(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ /* Save the old RTC IDs to be later used in match STE delete */
+ rule->resize_info->rtc_0 = rule->rtc_0;
+ rule->resize_info->rtc_1 = rule->rtc_1;
+ rule->resize_info->rule_idx = attr->rule_idx;
+
+ rule->rtc_0 = 0;
+ rule->rtc_1 = 0;
+
+ rule->pending_wqes = 0;
+ rule->action_ste_idx = -1;
+ rule->action_ste_selector = 0;
+ rule->status = MLX5HWS_RULE_STATUS_CREATING;
+ rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_WRITING;
+}
+
+bool mlx5hws_rule_move_in_progress(struct mlx5hws_rule *rule)
+{
+ return mlx5hws_matcher_is_in_resize(rule->matcher) &&
+ rule->resize_info &&
+ rule->resize_info->state != MLX5HWS_RULE_RESIZE_STATE_IDLE;
+}
+
+static int hws_rule_create_hws(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr,
+ u8 mt_idx,
+ u32 *match_param,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[])
+{
+ struct mlx5hws_action_template *at = &rule->matcher->at[at_idx];
+ struct mlx5hws_match_template *mt = &rule->matcher->mt[mt_idx];
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(mt);
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ struct mlx5hws_send_ste_attr ste_attr = {0};
+ struct mlx5hws_send_ring_dep_wqe *dep_wqe;
+ struct mlx5hws_actions_wqe_setter *setter;
+ struct mlx5hws_actions_apply_data apply;
+ struct mlx5hws_send_engine *queue;
+ u8 total_stes, action_stes;
+ bool is_update;
+ int i, ret;
+
+ is_update = !match_param;
+
+ setter = &at->setters[at->num_of_action_stes];
+ total_stes = at->num_of_action_stes + (is_jumbo && !at->only_term);
+ action_stes = total_stes - 1;
+
+ queue = &ctx->send_queue[attr->queue_id];
+ if (unlikely(mlx5hws_send_engine_err(queue)))
+ return -EIO;
+
+ hws_rule_create_init(rule, &ste_attr, &apply, is_update);
+
+ /* Allocate dependent match WQE since rule might have dependent writes.
+ * The queued dependent WQE can be later aborted or kept as a dependency.
+ * dep_wqe buffers (ctrl, data) are also reused for all STE writes.
+ */
+ dep_wqe = mlx5hws_send_add_new_dep_wqe(queue);
+ hws_rule_init_dep_wqe(dep_wqe, rule, mt, attr);
+
+ ste_attr.wqe_ctrl = &dep_wqe->wqe_ctrl;
+ ste_attr.wqe_data = &dep_wqe->wqe_data;
+ apply.wqe_ctrl = &dep_wqe->wqe_ctrl;
+ apply.wqe_data = (__force __be32 *)&dep_wqe->wqe_data;
+ apply.rule_action = rule_actions;
+ apply.queue = queue;
+
+ if (action_stes) {
+ /* Allocate action STEs for rules that need more than match STE */
+ if (!is_update) {
+ ret = hws_rule_alloc_action_ste(rule, attr);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate action memory %d", ret);
+ mlx5hws_send_abort_new_dep_wqe(queue);
+ return ret;
+ }
+ }
+ /* Skip RX/TX based on the dep_wqe init */
+ ste_attr.rtc_0 = dep_wqe->rtc_0 ?
+ matcher->action_ste[rule->action_ste_selector].rtc_0_id : 0;
+ ste_attr.rtc_1 = dep_wqe->rtc_1 ?
+ matcher->action_ste[rule->action_ste_selector].rtc_1_id : 0;
+ /* Action STEs are written to a specific index last to first */
+ ste_attr.direct_index = rule->action_ste_idx + action_stes;
+ apply.next_direct_idx = ste_attr.direct_index;
+ } else {
+ apply.next_direct_idx = 0;
+ }
+
+ for (i = total_stes; i-- > 0;) {
+ mlx5hws_action_apply_setter(&apply, setter--, !i && is_jumbo);
+
+ if (i == 0) {
+ /* Handle last match STE.
+ * For hash split / linear lookup RTCs, packets reaching any STE
+ * will always match and perform the specified actions, which
+ * makes the tag irrelevant.
+ */
+ if (likely(!mlx5hws_matcher_is_insert_by_idx(matcher) && !is_update))
+ mlx5hws_definer_create_tag(match_param, mt->fc, mt->fc_sz,
+ (u8 *)dep_wqe->wqe_data.action);
+ else if (is_update)
+ hws_rule_update_copy_tag(rule, &dep_wqe->wqe_data, is_jumbo);
+
+ /* Rule has dependent WQEs, match dep_wqe is queued */
+ if (action_stes || apply.require_dep)
+ break;
+
+ /* Rule has no dependencies, abort dep_wqe and send WQE now */
+ mlx5hws_send_abort_new_dep_wqe(queue);
+ ste_attr.wqe_tag_is_jumbo = is_jumbo;
+ ste_attr.send_attr.notify_hw = !attr->burst;
+ ste_attr.send_attr.user_data = dep_wqe->user_data;
+ ste_attr.send_attr.rule = dep_wqe->rule;
+ ste_attr.rtc_0 = dep_wqe->rtc_0;
+ ste_attr.rtc_1 = dep_wqe->rtc_1;
+ ste_attr.used_id_rtc_0 = &rule->rtc_0;
+ ste_attr.used_id_rtc_1 = &rule->rtc_1;
+ ste_attr.retry_rtc_0 = dep_wqe->retry_rtc_0;
+ ste_attr.retry_rtc_1 = dep_wqe->retry_rtc_1;
+ ste_attr.direct_index = dep_wqe->direct_index;
+ } else {
+ apply.next_direct_idx = --ste_attr.direct_index;
+ }
+
+ mlx5hws_send_ste(queue, &ste_attr);
+ }
+
+ /* Backup TAG on the rule for deletion and resize info for
+ * moving rules to a new matcher, only after insertion.
+ */
+ if (!is_update)
+ hws_rule_save_delete_info(rule, &ste_attr);
+
+ hws_rule_save_resize_info(rule, &ste_attr, is_update);
+ mlx5hws_send_engine_inc_rule(queue);
+
+ if (!attr->burst)
+ mlx5hws_send_all_dep_wqe(queue);
+
+ return 0;
+}
+
+static void hws_rule_destroy_failed_hws(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
+ struct mlx5hws_send_engine *queue;
+
+ queue = &ctx->send_queue[attr->queue_id];
+
+ hws_rule_gen_comp(queue, rule, false,
+ attr->user_data, MLX5HWS_RULE_STATUS_DELETED);
+
+ /* Rule failed now we can safely release action STEs */
+ mlx5hws_rule_free_action_ste(rule);
+
+ /* Clear complex tag */
+ hws_rule_clear_delete_info(rule);
+
+ /* Clear info that was saved for resizing */
+ mlx5hws_rule_clear_resize_info(rule);
+
+ /* If a rule that was indicated as burst (need to trigger HW) has failed
+ * insertion we won't ring the HW as nothing is being written to the WQ.
+ * In such case update the last WQE and ring the HW with that work
+ */
+ if (attr->burst)
+ return;
+
+ mlx5hws_send_all_dep_wqe(queue);
+ mlx5hws_send_engine_flush_queue(queue);
+}
+
+static int hws_rule_destroy_hws(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(rule->matcher->mt);
+ struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_wqe_gta_ctrl_seg wqe_ctrl = {0};
+ struct mlx5hws_send_ste_attr ste_attr = {0};
+ struct mlx5hws_send_engine *queue;
+
+ queue = &ctx->send_queue[attr->queue_id];
+
+ if (unlikely(mlx5hws_send_engine_err(queue))) {
+ hws_rule_destroy_failed_hws(rule, attr);
+ return 0;
+ }
+
+ /* Rule is not completed yet */
+ if (rule->status == MLX5HWS_RULE_STATUS_CREATING)
+ return -EBUSY;
+
+ /* Rule failed and doesn't require cleanup */
+ if (rule->status == MLX5HWS_RULE_STATUS_FAILED) {
+ hws_rule_destroy_failed_hws(rule, attr);
+ return 0;
+ }
+
+ if (rule->skip_delete) {
+ /* Rule shouldn't be deleted in HW.
+ * Generate completion as if write succeeded, and we can
+ * safely release action STEs and clear resize info.
+ */
+ hws_rule_gen_comp(queue, rule, false,
+ attr->user_data, MLX5HWS_RULE_STATUS_DELETED);
+
+ mlx5hws_rule_free_action_ste(rule);
+ mlx5hws_rule_clear_resize_info(rule);
+ return 0;
+ }
+
+ mlx5hws_send_engine_inc_rule(queue);
+
+ /* Send dependent WQE */
+ if (!attr->burst)
+ mlx5hws_send_all_dep_wqe(queue);
+
+ rule->status = MLX5HWS_RULE_STATUS_DELETING;
+
+ ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+ ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+
+ ste_attr.send_attr.rule = rule;
+ ste_attr.send_attr.notify_hw = !attr->burst;
+ ste_attr.send_attr.user_data = attr->user_data;
+
+ ste_attr.rtc_0 = rule->rtc_0;
+ ste_attr.rtc_1 = rule->rtc_1;
+ ste_attr.used_id_rtc_0 = &rule->rtc_0;
+ ste_attr.used_id_rtc_1 = &rule->rtc_1;
+ ste_attr.wqe_ctrl = &wqe_ctrl;
+ ste_attr.wqe_tag_is_jumbo = is_jumbo;
+ ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_DEACTIVATE;
+ if (unlikely(mlx5hws_matcher_is_insert_by_idx(matcher)))
+ ste_attr.direct_index = attr->rule_idx;
+
+ hws_rule_load_delete_info(rule, &ste_attr);
+ mlx5hws_send_ste(queue, &ste_attr);
+ hws_rule_clear_delete_info(rule);
+
+ return 0;
+}
+
+static int hws_rule_enqueue_precheck(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
+
+ if (unlikely(!attr->user_data))
+ return -EINVAL;
+
+ /* Check if there is room in queue */
+ if (unlikely(mlx5hws_send_engine_full(&ctx->send_queue[attr->queue_id])))
+ return -EBUSY;
+
+ return 0;
+}
+
+static int hws_rule_enqueue_precheck_move(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ if (unlikely(rule->status != MLX5HWS_RULE_STATUS_CREATED))
+ return -EINVAL;
+
+ return hws_rule_enqueue_precheck(rule, attr);
+}
+
+static int hws_rule_enqueue_precheck_create(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ if (unlikely(mlx5hws_matcher_is_in_resize(rule->matcher)))
+ /* Matcher in resize - new rules are not allowed */
+ return -EAGAIN;
+
+ return hws_rule_enqueue_precheck(rule, attr);
+}
+
+static int hws_rule_enqueue_precheck_update(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ struct mlx5hws_matcher *matcher = rule->matcher;
+
+ if (unlikely(!mlx5hws_matcher_is_resizable(rule->matcher) &&
+ !matcher->attr.optimize_using_rule_idx &&
+ !mlx5hws_matcher_is_insert_by_idx(matcher))) {
+ return -EOPNOTSUPP;
+ }
+
+ if (unlikely(rule->status != MLX5HWS_RULE_STATUS_CREATED))
+ return -EBUSY;
+
+ return hws_rule_enqueue_precheck_create(rule, attr);
+}
+
+int mlx5hws_rule_move_hws_remove(struct mlx5hws_rule *rule,
+ void *queue_ptr,
+ void *user_data)
+{
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(rule->matcher->mt);
+ struct mlx5hws_wqe_gta_ctrl_seg empty_wqe_ctrl = {0};
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_send_engine *queue = queue_ptr;
+ struct mlx5hws_send_ste_attr ste_attr = {0};
+
+ mlx5hws_send_all_dep_wqe(queue);
+
+ rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_DELETING;
+
+ ste_attr.send_attr.fence = 0;
+ ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+ ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+ ste_attr.send_attr.rule = rule;
+ ste_attr.send_attr.notify_hw = 1;
+ ste_attr.send_attr.user_data = user_data;
+ ste_attr.rtc_0 = rule->resize_info->rtc_0;
+ ste_attr.rtc_1 = rule->resize_info->rtc_1;
+ ste_attr.used_id_rtc_0 = &rule->resize_info->rtc_0;
+ ste_attr.used_id_rtc_1 = &rule->resize_info->rtc_1;
+ ste_attr.wqe_ctrl = &empty_wqe_ctrl;
+ ste_attr.wqe_tag_is_jumbo = is_jumbo;
+ ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_DEACTIVATE;
+
+ if (unlikely(mlx5hws_matcher_is_insert_by_idx(matcher)))
+ ste_attr.direct_index = rule->resize_info->rule_idx;
+
+ hws_rule_load_delete_info(rule, &ste_attr);
+ mlx5hws_send_ste(queue, &ste_attr);
+
+ return 0;
+}
+
+int mlx5hws_rule_move_hws_add(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(rule->matcher->mt);
+ struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_send_ste_attr ste_attr = {0};
+ struct mlx5hws_send_engine *queue;
+ int ret;
+
+ ret = hws_rule_enqueue_precheck_move(rule, attr);
+ if (unlikely(ret))
+ return ret;
+
+ queue = &ctx->send_queue[attr->queue_id];
+
+ ret = mlx5hws_send_engine_err(queue);
+ if (ret)
+ return ret;
+
+ hws_rule_move_init(rule, attr);
+ hws_rule_move_get_rtc(rule, &ste_attr);
+
+ ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+ ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+ ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
+ ste_attr.wqe_tag_is_jumbo = is_jumbo;
+
+ ste_attr.send_attr.rule = rule;
+ ste_attr.send_attr.fence = 0;
+ ste_attr.send_attr.notify_hw = !attr->burst;
+ ste_attr.send_attr.user_data = attr->user_data;
+
+ ste_attr.used_id_rtc_0 = &rule->rtc_0;
+ ste_attr.used_id_rtc_1 = &rule->rtc_1;
+ ste_attr.wqe_ctrl = (struct mlx5hws_wqe_gta_ctrl_seg *)rule->resize_info->ctrl_seg;
+ ste_attr.wqe_data = (struct mlx5hws_wqe_gta_data_seg_ste *)rule->resize_info->data_seg;
+ ste_attr.direct_index = mlx5hws_matcher_is_insert_by_idx(matcher) ?
+ attr->rule_idx : 0;
+
+ mlx5hws_send_ste(queue, &ste_attr);
+ mlx5hws_send_engine_inc_rule(queue);
+
+ if (!attr->burst)
+ mlx5hws_send_all_dep_wqe(queue);
+
+ return 0;
+}
+
+int mlx5hws_rule_create(struct mlx5hws_matcher *matcher,
+ u8 mt_idx,
+ u32 *match_param,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[],
+ struct mlx5hws_rule_attr *attr,
+ struct mlx5hws_rule *rule_handle)
+{
+ int ret;
+
+ rule_handle->matcher = matcher;
+
+ ret = hws_rule_enqueue_precheck_create(rule_handle, attr);
+ if (unlikely(ret))
+ return ret;
+
+ if (unlikely(!(matcher->num_of_mt >= mt_idx) ||
+ !(matcher->num_of_at >= at_idx) ||
+ !match_param)) {
+ pr_warn("HWS: Invalid rule creation parameters (MTs, ATs or match params)\n");
+ return -EINVAL;
+ }
+
+ ret = hws_rule_create_hws(rule_handle,
+ attr,
+ mt_idx,
+ match_param,
+ at_idx,
+ rule_actions);
+
+ return ret;
+}
+
+int mlx5hws_rule_destroy(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ int ret;
+
+ ret = hws_rule_enqueue_precheck(rule, attr);
+ if (unlikely(ret))
+ return -ret;
+
+ ret = hws_rule_destroy_hws(rule, attr);
+
+ return -ret;
+}
+
+int mlx5hws_rule_action_update(struct mlx5hws_rule *rule,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[],
+ struct mlx5hws_rule_attr *attr)
+{
+ int ret;
+
+ ret = hws_rule_enqueue_precheck_update(rule, attr);
+ if (unlikely(ret))
+ return -ret;
+
+ ret = hws_rule_create_hws(rule,
+ attr,
+ 0,
+ NULL,
+ at_idx,
+ rule_actions);
+
+ return -ret;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.h
new file mode 100644
index 000000000000..495cdd17e9f3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_RULE_H_
+#define MLX5HWS_RULE_H_
+
+enum {
+ MLX5HWS_STE_CTRL_SZ = 20,
+ MLX5HWS_ACTIONS_SZ = 12,
+ MLX5HWS_MATCH_TAG_SZ = 32,
+ MLX5HWS_JUMBO_TAG_SZ = 44,
+};
+
+enum mlx5hws_rule_status {
+ MLX5HWS_RULE_STATUS_UNKNOWN,
+ MLX5HWS_RULE_STATUS_CREATING,
+ MLX5HWS_RULE_STATUS_CREATED,
+ MLX5HWS_RULE_STATUS_DELETING,
+ MLX5HWS_RULE_STATUS_DELETED,
+ MLX5HWS_RULE_STATUS_FAILING,
+ MLX5HWS_RULE_STATUS_FAILED,
+};
+
+enum mlx5hws_rule_move_state {
+ MLX5HWS_RULE_RESIZE_STATE_IDLE,
+ MLX5HWS_RULE_RESIZE_STATE_WRITING,
+ MLX5HWS_RULE_RESIZE_STATE_DELETING,
+};
+
+enum mlx5hws_rule_jumbo_match_tag_offset {
+ MLX5HWS_RULE_JUMBO_MATCH_TAG_OFFSET_DW0 = 8,
+};
+
+struct mlx5hws_rule_match_tag {
+ union {
+ u8 jumbo[MLX5HWS_JUMBO_TAG_SZ];
+ struct {
+ u8 reserved[MLX5HWS_ACTIONS_SZ];
+ u8 match[MLX5HWS_MATCH_TAG_SZ];
+ };
+ };
+};
+
+struct mlx5hws_rule_resize_info {
+ struct mlx5hws_pool *action_ste_pool[2];
+ u32 rtc_0;
+ u32 rtc_1;
+ u32 rule_idx;
+ u8 state;
+ u8 max_stes;
+ u8 ctrl_seg[MLX5HWS_WQE_SZ_GTA_CTRL]; /* Ctrl segment of STE: 48 bytes */
+ u8 data_seg[MLX5HWS_WQE_SZ_GTA_DATA]; /* Data segment of STE: 64 bytes */
+};
+
+struct mlx5hws_rule {
+ struct mlx5hws_matcher *matcher;
+ union {
+ struct mlx5hws_rule_match_tag tag;
+ struct mlx5hws_rule_resize_info *resize_info;
+ };
+ u32 rtc_0; /* The RTC into which the STE was inserted */
+ u32 rtc_1; /* The RTC into which the STE was inserted */
+ int action_ste_idx; /* STE array index */
+ u8 status; /* enum mlx5hws_rule_status */
+ u8 action_ste_selector; /* For rule update - which action STE is in use */
+ u8 pending_wqes;
+ bool skip_delete; /* For complex rules - another rule with same tag
+ * still exists, so don't actually delete this rule.
+ */
+};
+
+void mlx5hws_rule_free_action_ste(struct mlx5hws_rule *rule);
+
+int mlx5hws_rule_move_hws_remove(struct mlx5hws_rule *rule,
+ void *queue, void *user_data);
+
+int mlx5hws_rule_move_hws_add(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr);
+
+bool mlx5hws_rule_move_in_progress(struct mlx5hws_rule *rule);
+
+void mlx5hws_rule_clear_resize_info(struct mlx5hws_rule *rule);
+
+#endif /* MLX5HWS_RULE_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c
new file mode 100644
index 000000000000..fb97a15c041a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c
@@ -0,0 +1,1209 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+#include "lib/clock.h"
+
+enum { CQ_OK = 0, CQ_EMPTY = -1, CQ_POLL_ERR = -2 };
+
+struct mlx5hws_send_ring_dep_wqe *
+mlx5hws_send_add_new_dep_wqe(struct mlx5hws_send_engine *queue)
+{
+ struct mlx5hws_send_ring_sq *send_sq = &queue->send_ring.send_sq;
+ unsigned int idx = send_sq->head_dep_idx++ & (queue->num_entries - 1);
+
+ memset(&send_sq->dep_wqe[idx].wqe_data.tag, 0, MLX5HWS_MATCH_TAG_SZ);
+
+ return &send_sq->dep_wqe[idx];
+}
+
+void mlx5hws_send_abort_new_dep_wqe(struct mlx5hws_send_engine *queue)
+{
+ queue->send_ring.send_sq.head_dep_idx--;
+}
+
+void mlx5hws_send_all_dep_wqe(struct mlx5hws_send_engine *queue)
+{
+ struct mlx5hws_send_ring_sq *send_sq = &queue->send_ring.send_sq;
+ struct mlx5hws_send_ste_attr ste_attr = {0};
+ struct mlx5hws_send_ring_dep_wqe *dep_wqe;
+
+ ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+ ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+ ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
+
+ /* Fence first from previous depend WQEs */
+ ste_attr.send_attr.fence = 1;
+
+ while (send_sq->head_dep_idx != send_sq->tail_dep_idx) {
+ dep_wqe = &send_sq->dep_wqe[send_sq->tail_dep_idx++ & (queue->num_entries - 1)];
+
+ /* Notify HW on the last WQE */
+ ste_attr.send_attr.notify_hw = (send_sq->tail_dep_idx == send_sq->head_dep_idx);
+ ste_attr.send_attr.user_data = dep_wqe->user_data;
+ ste_attr.send_attr.rule = dep_wqe->rule;
+
+ ste_attr.rtc_0 = dep_wqe->rtc_0;
+ ste_attr.rtc_1 = dep_wqe->rtc_1;
+ ste_attr.retry_rtc_0 = dep_wqe->retry_rtc_0;
+ ste_attr.retry_rtc_1 = dep_wqe->retry_rtc_1;
+ ste_attr.used_id_rtc_0 = &dep_wqe->rule->rtc_0;
+ ste_attr.used_id_rtc_1 = &dep_wqe->rule->rtc_1;
+ ste_attr.wqe_ctrl = &dep_wqe->wqe_ctrl;
+ ste_attr.wqe_data = &dep_wqe->wqe_data;
+ ste_attr.direct_index = dep_wqe->direct_index;
+
+ mlx5hws_send_ste(queue, &ste_attr);
+
+ /* Fencing is done only on the first WQE */
+ ste_attr.send_attr.fence = 0;
+ }
+}
+
+struct mlx5hws_send_engine_post_ctrl
+mlx5hws_send_engine_post_start(struct mlx5hws_send_engine *queue)
+{
+ struct mlx5hws_send_engine_post_ctrl ctrl;
+
+ ctrl.queue = queue;
+ /* Currently only one send ring is supported */
+ ctrl.send_ring = &queue->send_ring;
+ ctrl.num_wqebbs = 0;
+
+ return ctrl;
+}
+
+void mlx5hws_send_engine_post_req_wqe(struct mlx5hws_send_engine_post_ctrl *ctrl,
+ char **buf, size_t *len)
+{
+ struct mlx5hws_send_ring_sq *send_sq = &ctrl->send_ring->send_sq;
+ unsigned int idx;
+
+ idx = (send_sq->cur_post + ctrl->num_wqebbs) & send_sq->buf_mask;
+
+ /* Note that *buf is a single MLX5_SEND_WQE_BB. It cannot be used
+ * as buffer of more than one WQE_BB, since the two MLX5_SEND_WQE_BB
+ * can be on 2 different kernel memory pages.
+ */
+ *buf = mlx5_wq_cyc_get_wqe(&send_sq->wq, idx);
+ *len = MLX5_SEND_WQE_BB;
+
+ if (!ctrl->num_wqebbs) {
+ *buf += sizeof(struct mlx5hws_wqe_ctrl_seg);
+ *len -= sizeof(struct mlx5hws_wqe_ctrl_seg);
+ }
+
+ ctrl->num_wqebbs++;
+}
+
+static void hws_send_engine_post_ring(struct mlx5hws_send_ring_sq *sq,
+ struct mlx5hws_wqe_ctrl_seg *doorbell_cseg)
+{
+ /* ensure wqe is visible to device before updating doorbell record */
+ dma_wmb();
+
+ *sq->wq.db = cpu_to_be32(sq->cur_post);
+
+ /* ensure doorbell record is visible to device before ringing the
+ * doorbell
+ */
+ wmb();
+
+ mlx5_write64((__be32 *)doorbell_cseg, sq->uar_map);
+
+ /* Ensure doorbell is written on uar_page before poll_cq */
+ WRITE_ONCE(doorbell_cseg, NULL);
+}
+
+static void
+hws_send_wqe_set_tag(struct mlx5hws_wqe_gta_data_seg_ste *wqe_data,
+ struct mlx5hws_rule_match_tag *tag,
+ bool is_jumbo)
+{
+ if (is_jumbo) {
+ /* Clear previous possibly dirty control */
+ memset(wqe_data, 0, MLX5HWS_STE_CTRL_SZ);
+ memcpy(wqe_data->jumbo, tag->jumbo, MLX5HWS_JUMBO_TAG_SZ);
+ } else {
+ /* Clear previous possibly dirty control and actions */
+ memset(wqe_data, 0, MLX5HWS_STE_CTRL_SZ + MLX5HWS_ACTIONS_SZ);
+ memcpy(wqe_data->tag, tag->match, MLX5HWS_MATCH_TAG_SZ);
+ }
+}
+
+void mlx5hws_send_engine_post_end(struct mlx5hws_send_engine_post_ctrl *ctrl,
+ struct mlx5hws_send_engine_post_attr *attr)
+{
+ struct mlx5hws_wqe_ctrl_seg *wqe_ctrl;
+ struct mlx5hws_send_ring_sq *sq;
+ unsigned int idx;
+ u32 flags = 0;
+
+ sq = &ctrl->send_ring->send_sq;
+ idx = sq->cur_post & sq->buf_mask;
+ sq->last_idx = idx;
+
+ wqe_ctrl = mlx5_wq_cyc_get_wqe(&sq->wq, idx);
+
+ wqe_ctrl->opmod_idx_opcode =
+ cpu_to_be32((attr->opmod << 24) |
+ ((sq->cur_post & 0xffff) << 8) |
+ attr->opcode);
+ wqe_ctrl->qpn_ds =
+ cpu_to_be32((attr->len + sizeof(struct mlx5hws_wqe_ctrl_seg)) / 16 |
+ sq->sqn << 8);
+ wqe_ctrl->imm = cpu_to_be32(attr->id);
+
+ flags |= attr->notify_hw ? MLX5_WQE_CTRL_CQ_UPDATE : 0;
+ flags |= attr->fence ? MLX5_WQE_CTRL_INITIATOR_SMALL_FENCE : 0;
+ wqe_ctrl->flags = cpu_to_be32(flags);
+
+ sq->wr_priv[idx].id = attr->id;
+ sq->wr_priv[idx].retry_id = attr->retry_id;
+
+ sq->wr_priv[idx].rule = attr->rule;
+ sq->wr_priv[idx].user_data = attr->user_data;
+ sq->wr_priv[idx].num_wqebbs = ctrl->num_wqebbs;
+
+ if (attr->rule) {
+ sq->wr_priv[idx].rule->pending_wqes++;
+ sq->wr_priv[idx].used_id = attr->used_id;
+ }
+
+ sq->cur_post += ctrl->num_wqebbs;
+
+ if (attr->notify_hw)
+ hws_send_engine_post_ring(sq, wqe_ctrl);
+}
+
+static void hws_send_wqe(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_engine_post_attr *send_attr,
+ struct mlx5hws_wqe_gta_ctrl_seg *send_wqe_ctrl,
+ void *send_wqe_data,
+ void *send_wqe_tag,
+ bool is_jumbo,
+ u8 gta_opcode,
+ u32 direct_index)
+{
+ struct mlx5hws_wqe_gta_data_seg_ste *wqe_data;
+ struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
+ struct mlx5hws_send_engine_post_ctrl ctrl;
+ size_t wqe_len;
+
+ ctrl = mlx5hws_send_engine_post_start(queue);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_data, &wqe_len);
+
+ wqe_ctrl->op_dirix = cpu_to_be32(gta_opcode << 28 | direct_index);
+ memcpy(wqe_ctrl->stc_ix, send_wqe_ctrl->stc_ix,
+ sizeof(send_wqe_ctrl->stc_ix));
+
+ if (send_wqe_data)
+ memcpy(wqe_data, send_wqe_data, sizeof(*wqe_data));
+ else
+ hws_send_wqe_set_tag(wqe_data, send_wqe_tag, is_jumbo);
+
+ mlx5hws_send_engine_post_end(&ctrl, send_attr);
+}
+
+void mlx5hws_send_ste(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ste_attr *ste_attr)
+{
+ struct mlx5hws_send_engine_post_attr *send_attr = &ste_attr->send_attr;
+ u8 notify_hw = send_attr->notify_hw;
+ u8 fence = send_attr->fence;
+
+ if (ste_attr->rtc_1) {
+ send_attr->id = ste_attr->rtc_1;
+ send_attr->used_id = ste_attr->used_id_rtc_1;
+ send_attr->retry_id = ste_attr->retry_rtc_1;
+ send_attr->fence = fence;
+ send_attr->notify_hw = notify_hw && !ste_attr->rtc_0;
+ hws_send_wqe(queue, send_attr,
+ ste_attr->wqe_ctrl,
+ ste_attr->wqe_data,
+ ste_attr->wqe_tag,
+ ste_attr->wqe_tag_is_jumbo,
+ ste_attr->gta_opcode,
+ ste_attr->direct_index);
+ }
+
+ if (ste_attr->rtc_0) {
+ send_attr->id = ste_attr->rtc_0;
+ send_attr->used_id = ste_attr->used_id_rtc_0;
+ send_attr->retry_id = ste_attr->retry_rtc_0;
+ send_attr->fence = fence && !ste_attr->rtc_1;
+ send_attr->notify_hw = notify_hw;
+ hws_send_wqe(queue, send_attr,
+ ste_attr->wqe_ctrl,
+ ste_attr->wqe_data,
+ ste_attr->wqe_tag,
+ ste_attr->wqe_tag_is_jumbo,
+ ste_attr->gta_opcode,
+ ste_attr->direct_index);
+ }
+
+ /* Restore to original requested values */
+ send_attr->notify_hw = notify_hw;
+ send_attr->fence = fence;
+}
+
+static void hws_send_engine_retry_post_send(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_priv *priv,
+ u16 wqe_cnt)
+{
+ struct mlx5hws_send_engine_post_attr send_attr = {0};
+ struct mlx5hws_wqe_gta_data_seg_ste *wqe_data;
+ struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
+ struct mlx5hws_send_engine_post_ctrl ctrl;
+ struct mlx5hws_send_ring_sq *send_sq;
+ unsigned int idx;
+ size_t wqe_len;
+ char *p;
+
+ send_attr.rule = priv->rule;
+ send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+ send_attr.len = MLX5_SEND_WQE_BB * 2 - sizeof(struct mlx5hws_wqe_ctrl_seg);
+ send_attr.notify_hw = 1;
+ send_attr.fence = 0;
+ send_attr.user_data = priv->user_data;
+ send_attr.id = priv->retry_id;
+ send_attr.used_id = priv->used_id;
+
+ ctrl = mlx5hws_send_engine_post_start(queue);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_data, &wqe_len);
+
+ send_sq = &ctrl.send_ring->send_sq;
+ idx = wqe_cnt & send_sq->buf_mask;
+ p = mlx5_wq_cyc_get_wqe(&send_sq->wq, idx);
+
+ /* Copy old gta ctrl */
+ memcpy(wqe_ctrl, p + sizeof(struct mlx5hws_wqe_ctrl_seg),
+ MLX5_SEND_WQE_BB - sizeof(struct mlx5hws_wqe_ctrl_seg));
+
+ idx = (wqe_cnt + 1) & send_sq->buf_mask;
+ p = mlx5_wq_cyc_get_wqe(&send_sq->wq, idx);
+
+ /* Copy old gta data */
+ memcpy(wqe_data, p, MLX5_SEND_WQE_BB);
+
+ mlx5hws_send_engine_post_end(&ctrl, &send_attr);
+}
+
+void mlx5hws_send_engine_flush_queue(struct mlx5hws_send_engine *queue)
+{
+ struct mlx5hws_send_ring_sq *sq = &queue->send_ring.send_sq;
+ struct mlx5hws_wqe_ctrl_seg *wqe_ctrl;
+
+ wqe_ctrl = mlx5_wq_cyc_get_wqe(&sq->wq, sq->last_idx);
+ wqe_ctrl->flags |= cpu_to_be32(MLX5_WQE_CTRL_CQ_UPDATE);
+
+ hws_send_engine_post_ring(sq, wqe_ctrl);
+}
+
+static void
+hws_send_engine_update_rule_resize(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_priv *priv,
+ enum mlx5hws_flow_op_status *status)
+{
+ switch (priv->rule->resize_info->state) {
+ case MLX5HWS_RULE_RESIZE_STATE_WRITING:
+ if (priv->rule->status == MLX5HWS_RULE_STATUS_FAILING) {
+ /* Backup original RTCs */
+ u32 orig_rtc_0 = priv->rule->resize_info->rtc_0;
+ u32 orig_rtc_1 = priv->rule->resize_info->rtc_1;
+
+ /* Delete partially failed move rule using resize_info */
+ priv->rule->resize_info->rtc_0 = priv->rule->rtc_0;
+ priv->rule->resize_info->rtc_1 = priv->rule->rtc_1;
+
+ /* Move rule to original RTC for future delete */
+ priv->rule->rtc_0 = orig_rtc_0;
+ priv->rule->rtc_1 = orig_rtc_1;
+ }
+ /* Clean leftovers */
+ mlx5hws_rule_move_hws_remove(priv->rule, queue, priv->user_data);
+ break;
+
+ case MLX5HWS_RULE_RESIZE_STATE_DELETING:
+ if (priv->rule->status == MLX5HWS_RULE_STATUS_FAILING) {
+ *status = MLX5HWS_FLOW_OP_ERROR;
+ } else {
+ *status = MLX5HWS_FLOW_OP_SUCCESS;
+ priv->rule->matcher = priv->rule->matcher->resize_dst;
+ }
+ priv->rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_IDLE;
+ priv->rule->status = MLX5HWS_RULE_STATUS_CREATED;
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void hws_send_engine_update_rule(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_priv *priv,
+ u16 wqe_cnt,
+ enum mlx5hws_flow_op_status *status)
+{
+ priv->rule->pending_wqes--;
+
+ if (*status == MLX5HWS_FLOW_OP_ERROR) {
+ if (priv->retry_id) {
+ hws_send_engine_retry_post_send(queue, priv, wqe_cnt);
+ return;
+ }
+ /* Some part of the rule failed */
+ priv->rule->status = MLX5HWS_RULE_STATUS_FAILING;
+ *priv->used_id = 0;
+ } else {
+ *priv->used_id = priv->id;
+ }
+
+ /* Update rule status for the last completion */
+ if (!priv->rule->pending_wqes) {
+ if (unlikely(mlx5hws_rule_move_in_progress(priv->rule))) {
+ hws_send_engine_update_rule_resize(queue, priv, status);
+ return;
+ }
+
+ if (unlikely(priv->rule->status == MLX5HWS_RULE_STATUS_FAILING)) {
+ /* Rule completely failed and doesn't require cleanup */
+ if (!priv->rule->rtc_0 && !priv->rule->rtc_1)
+ priv->rule->status = MLX5HWS_RULE_STATUS_FAILED;
+
+ *status = MLX5HWS_FLOW_OP_ERROR;
+ } else {
+ /* Increase the status, this only works on good flow as the enum
+ * is arrange it away creating -> created -> deleting -> deleted
+ */
+ priv->rule->status++;
+ *status = MLX5HWS_FLOW_OP_SUCCESS;
+ /* Rule was deleted now we can safely release action STEs
+ * and clear resize info
+ */
+ if (priv->rule->status == MLX5HWS_RULE_STATUS_DELETED) {
+ mlx5hws_rule_free_action_ste(priv->rule);
+ mlx5hws_rule_clear_resize_info(priv->rule);
+ }
+ }
+ }
+}
+
+static void hws_send_engine_update(struct mlx5hws_send_engine *queue,
+ struct mlx5_cqe64 *cqe,
+ struct mlx5hws_send_ring_priv *priv,
+ struct mlx5hws_flow_op_result res[],
+ s64 *i,
+ u32 res_nb,
+ u16 wqe_cnt)
+{
+ enum mlx5hws_flow_op_status status;
+
+ if (!cqe || (likely(be32_to_cpu(cqe->byte_cnt) >> 31 == 0) &&
+ likely(get_cqe_opcode(cqe) == MLX5_CQE_REQ))) {
+ status = MLX5HWS_FLOW_OP_SUCCESS;
+ } else {
+ status = MLX5HWS_FLOW_OP_ERROR;
+ }
+
+ if (priv->user_data) {
+ if (priv->rule) {
+ hws_send_engine_update_rule(queue, priv, wqe_cnt, &status);
+ /* Completion is provided on the last rule WQE */
+ if (priv->rule->pending_wqes)
+ return;
+ }
+
+ if (*i < res_nb) {
+ res[*i].user_data = priv->user_data;
+ res[*i].status = status;
+ (*i)++;
+ mlx5hws_send_engine_dec_rule(queue);
+ } else {
+ mlx5hws_send_engine_gen_comp(queue, priv->user_data, status);
+ }
+ }
+}
+
+static int mlx5hws_parse_cqe(struct mlx5hws_send_ring_cq *cq,
+ struct mlx5_cqe64 *cqe64)
+{
+ if (unlikely(get_cqe_opcode(cqe64) != MLX5_CQE_REQ)) {
+ struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe64;
+
+ mlx5_core_err(cq->mdev, "Bad OP in HWS SQ CQE: 0x%x\n", get_cqe_opcode(cqe64));
+ mlx5_core_err(cq->mdev, "vendor_err_synd=%x\n", err_cqe->vendor_err_synd);
+ mlx5_core_err(cq->mdev, "syndrome=%x\n", err_cqe->syndrome);
+ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET,
+ 16, 1, err_cqe,
+ sizeof(*err_cqe), false);
+ return CQ_POLL_ERR;
+ }
+
+ return CQ_OK;
+}
+
+static int mlx5hws_cq_poll_one(struct mlx5hws_send_ring_cq *cq)
+{
+ struct mlx5_cqe64 *cqe64;
+ int err;
+
+ cqe64 = mlx5_cqwq_get_cqe(&cq->wq);
+ if (!cqe64) {
+ if (unlikely(cq->mdev->state ==
+ MLX5_DEVICE_STATE_INTERNAL_ERROR)) {
+ mlx5_core_dbg_once(cq->mdev,
+ "Polling CQ while device is shutting down\n");
+ return CQ_POLL_ERR;
+ }
+ return CQ_EMPTY;
+ }
+
+ mlx5_cqwq_pop(&cq->wq);
+ err = mlx5hws_parse_cqe(cq, cqe64);
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ return err;
+}
+
+static void hws_send_engine_poll_cq(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_flow_op_result res[],
+ s64 *polled,
+ u32 res_nb)
+{
+ struct mlx5hws_send_ring *send_ring = &queue->send_ring;
+ struct mlx5hws_send_ring_cq *cq = &send_ring->send_cq;
+ struct mlx5hws_send_ring_sq *sq = &send_ring->send_sq;
+ struct mlx5hws_send_ring_priv *priv;
+ struct mlx5_cqe64 *cqe;
+ u8 cqe_opcode;
+ u16 wqe_cnt;
+
+ cqe = mlx5_cqwq_get_cqe(&cq->wq);
+ if (!cqe)
+ return;
+
+ cqe_opcode = get_cqe_opcode(cqe);
+ if (cqe_opcode == MLX5_CQE_INVALID)
+ return;
+
+ if (unlikely(cqe_opcode != MLX5_CQE_REQ))
+ queue->err = true;
+
+ wqe_cnt = be16_to_cpu(cqe->wqe_counter) & sq->buf_mask;
+
+ while (cq->poll_wqe != wqe_cnt) {
+ priv = &sq->wr_priv[cq->poll_wqe];
+ hws_send_engine_update(queue, NULL, priv, res, polled, res_nb, 0);
+ cq->poll_wqe = (cq->poll_wqe + priv->num_wqebbs) & sq->buf_mask;
+ }
+
+ priv = &sq->wr_priv[wqe_cnt];
+ cq->poll_wqe = (wqe_cnt + priv->num_wqebbs) & sq->buf_mask;
+ hws_send_engine_update(queue, cqe, priv, res, polled, res_nb, wqe_cnt);
+ mlx5hws_cq_poll_one(cq);
+}
+
+static void hws_send_engine_poll_list(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_flow_op_result res[],
+ s64 *polled,
+ u32 res_nb)
+{
+ struct mlx5hws_completed_poll *comp = &queue->completed;
+
+ while (comp->ci != comp->pi) {
+ if (*polled < res_nb) {
+ res[*polled].status =
+ comp->entries[comp->ci].status;
+ res[*polled].user_data =
+ comp->entries[comp->ci].user_data;
+ (*polled)++;
+ comp->ci = (comp->ci + 1) & comp->mask;
+ mlx5hws_send_engine_dec_rule(queue);
+ } else {
+ return;
+ }
+ }
+}
+
+static int hws_send_engine_poll(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_flow_op_result res[],
+ u32 res_nb)
+{
+ s64 polled = 0;
+
+ hws_send_engine_poll_list(queue, res, &polled, res_nb);
+
+ if (polled >= res_nb)
+ return polled;
+
+ hws_send_engine_poll_cq(queue, res, &polled, res_nb);
+
+ return polled;
+}
+
+int mlx5hws_send_queue_poll(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ struct mlx5hws_flow_op_result res[],
+ u32 res_nb)
+{
+ return hws_send_engine_poll(&ctx->send_queue[queue_id], res, res_nb);
+}
+
+static int hws_send_ring_alloc_sq(struct mlx5_core_dev *mdev,
+ int numa_node,
+ struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_sq *sq,
+ void *sqc_data)
+{
+ void *sqc_wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ struct mlx5_wq_param param;
+ size_t buf_sz;
+ int err;
+
+ sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
+ sq->mdev = mdev;
+
+ param.db_numa_node = numa_node;
+ param.buf_numa_node = numa_node;
+ err = mlx5_wq_cyc_create(mdev, &param, sqc_wq, wq, &sq->wq_ctrl);
+ if (err)
+ return err;
+ wq->db = &wq->db[MLX5_SND_DBR];
+
+ buf_sz = queue->num_entries * MAX_WQES_PER_RULE;
+ sq->dep_wqe = kcalloc(queue->num_entries, sizeof(*sq->dep_wqe), GFP_KERNEL);
+ if (!sq->dep_wqe) {
+ err = -ENOMEM;
+ goto destroy_wq_cyc;
+ }
+
+ sq->wr_priv = kzalloc(sizeof(*sq->wr_priv) * buf_sz, GFP_KERNEL);
+ if (!sq->dep_wqe) {
+ err = -ENOMEM;
+ goto free_dep_wqe;
+ }
+
+ sq->buf_mask = (queue->num_entries * MAX_WQES_PER_RULE) - 1;
+
+ return 0;
+
+free_dep_wqe:
+ kfree(sq->dep_wqe);
+destroy_wq_cyc:
+ mlx5_wq_destroy(&sq->wq_ctrl);
+ return err;
+}
+
+static void hws_send_ring_free_sq(struct mlx5hws_send_ring_sq *sq)
+{
+ if (!sq)
+ return;
+ kfree(sq->wr_priv);
+ kfree(sq->dep_wqe);
+ mlx5_wq_destroy(&sq->wq_ctrl);
+}
+
+static int hws_send_ring_create_sq(struct mlx5_core_dev *mdev, u32 pdn,
+ void *sqc_data,
+ struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_sq *sq,
+ struct mlx5hws_send_ring_cq *cq)
+{
+ void *in, *sqc, *wq;
+ int inlen, err;
+ u8 ts_format;
+
+ inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
+ sizeof(u64) * sq->wq_ctrl.buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+ wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ memcpy(sqc, sqc_data, MLX5_ST_SZ_BYTES(sqc));
+ MLX5_SET(sqc, sqc, cqn, cq->mcq.cqn);
+
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
+ MLX5_SET(sqc, sqc, flush_in_error_en, 1);
+
+ ts_format = mlx5_is_real_time_sq(mdev) ? MLX5_TIMESTAMP_FORMAT_REAL_TIME :
+ MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
+ MLX5_SET(sqc, sqc, ts_format, ts_format);
+
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
+ MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index);
+ MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
+
+ mlx5_fill_page_frag_array(&sq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+
+ err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
+
+ kvfree(in);
+
+ return err;
+}
+
+static int hws_send_ring_set_sq_rdy(struct mlx5_core_dev *mdev, u32 sqn)
+{
+ void *in, *sqc;
+ int inlen, err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
+ sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
+
+ err = mlx5_core_modify_sq(mdev, sqn, in);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void hws_send_ring_close_sq(struct mlx5hws_send_ring_sq *sq)
+{
+ mlx5_core_destroy_sq(sq->mdev, sq->sqn);
+ mlx5_wq_destroy(&sq->wq_ctrl);
+ kfree(sq->wr_priv);
+ kfree(sq->dep_wqe);
+}
+
+static int hws_send_ring_create_sq_rdy(struct mlx5_core_dev *mdev, u32 pdn,
+ void *sqc_data,
+ struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_sq *sq,
+ struct mlx5hws_send_ring_cq *cq)
+{
+ int err;
+
+ err = hws_send_ring_create_sq(mdev, pdn, sqc_data, queue, sq, cq);
+ if (err)
+ return err;
+
+ err = hws_send_ring_set_sq_rdy(mdev, sq->sqn);
+ if (err)
+ hws_send_ring_close_sq(sq);
+
+ return err;
+}
+
+static int hws_send_ring_open_sq(struct mlx5hws_context *ctx,
+ int numa_node,
+ struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_sq *sq,
+ struct mlx5hws_send_ring_cq *cq)
+{
+ size_t buf_sz, sq_log_buf_sz;
+ void *sqc_data, *wq;
+ int err;
+
+ sqc_data = kvzalloc(MLX5_ST_SZ_BYTES(sqc), GFP_KERNEL);
+ if (!sqc_data)
+ return -ENOMEM;
+
+ buf_sz = queue->num_entries * MAX_WQES_PER_RULE;
+ sq_log_buf_sz = ilog2(roundup_pow_of_two(buf_sz));
+
+ wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
+ MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
+ MLX5_SET(wq, wq, pd, ctx->pd_num);
+ MLX5_SET(wq, wq, log_wq_sz, sq_log_buf_sz);
+
+ err = hws_send_ring_alloc_sq(ctx->mdev, numa_node, queue, sq, sqc_data);
+ if (err)
+ goto err_free_sqc;
+
+ err = hws_send_ring_create_sq_rdy(ctx->mdev, ctx->pd_num, sqc_data,
+ queue, sq, cq);
+ if (err)
+ goto err_free_sq;
+
+ kvfree(sqc_data);
+
+ return 0;
+err_free_sq:
+ hws_send_ring_free_sq(sq);
+err_free_sqc:
+ kvfree(sqc_data);
+ return err;
+}
+
+static void hws_cq_complete(struct mlx5_core_cq *mcq,
+ struct mlx5_eqe *eqe)
+{
+ pr_err("CQ completion CQ: #%u\n", mcq->cqn);
+}
+
+static int hws_send_ring_alloc_cq(struct mlx5_core_dev *mdev,
+ int numa_node,
+ struct mlx5hws_send_engine *queue,
+ void *cqc_data,
+ struct mlx5hws_send_ring_cq *cq)
+{
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ struct mlx5_wq_param param;
+ struct mlx5_cqe64 *cqe;
+ int err;
+ u32 i;
+
+ param.buf_numa_node = numa_node;
+ param.db_numa_node = numa_node;
+
+ err = mlx5_cqwq_create(mdev, &param, cqc_data, &cq->wq, &cq->wq_ctrl);
+ if (err)
+ return err;
+
+ mcq->cqe_sz = 64;
+ mcq->set_ci_db = cq->wq_ctrl.db.db;
+ mcq->arm_db = cq->wq_ctrl.db.db + 1;
+ mcq->comp = hws_cq_complete;
+
+ for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
+ cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
+ cqe->op_own = 0xf1;
+ }
+
+ cq->mdev = mdev;
+
+ return 0;
+}
+
+static int hws_send_ring_create_cq(struct mlx5_core_dev *mdev,
+ struct mlx5hws_send_engine *queue,
+ void *cqc_data,
+ struct mlx5hws_send_ring_cq *cq)
+{
+ u32 out[MLX5_ST_SZ_DW(create_cq_out)];
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ void *in, *cqc;
+ int inlen, eqn;
+ int err;
+
+ err = mlx5_comp_eqn_get(mdev, 0, &eqn);
+ if (err)
+ return err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ sizeof(u64) * cq->wq_ctrl.buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+ memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc));
+ mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
+
+ MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
+
+ err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
+
+ kvfree(in);
+
+ return err;
+}
+
+static int hws_send_ring_open_cq(struct mlx5_core_dev *mdev,
+ struct mlx5hws_send_engine *queue,
+ int numa_node,
+ struct mlx5hws_send_ring_cq *cq)
+{
+ void *cqc_data;
+ int err;
+
+ cqc_data = kvzalloc(MLX5_ST_SZ_BYTES(cqc), GFP_KERNEL);
+ if (!cqc_data)
+ return -ENOMEM;
+
+ MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc_data, cqe_sz, queue->num_entries);
+ MLX5_SET(cqc, cqc_data, log_cq_size, ilog2(queue->num_entries));
+
+ err = hws_send_ring_alloc_cq(mdev, numa_node, queue, cqc_data, cq);
+ if (err)
+ goto err_out;
+
+ err = hws_send_ring_create_cq(mdev, queue, cqc_data, cq);
+ if (err)
+ goto err_free_cq;
+
+ kvfree(cqc_data);
+
+ return 0;
+
+err_free_cq:
+ mlx5_wq_destroy(&cq->wq_ctrl);
+err_out:
+ kvfree(cqc_data);
+ return err;
+}
+
+static void hws_send_ring_close_cq(struct mlx5hws_send_ring_cq *cq)
+{
+ mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
+ mlx5_wq_destroy(&cq->wq_ctrl);
+}
+
+static void hws_send_ring_close(struct mlx5hws_send_engine *queue)
+{
+ hws_send_ring_close_sq(&queue->send_ring.send_sq);
+ hws_send_ring_close_cq(&queue->send_ring.send_cq);
+}
+
+static int mlx5hws_send_ring_open(struct mlx5hws_context *ctx,
+ struct mlx5hws_send_engine *queue)
+{
+ int numa_node = dev_to_node(mlx5_core_dma_dev(ctx->mdev));
+ struct mlx5hws_send_ring *ring = &queue->send_ring;
+ int err;
+
+ err = hws_send_ring_open_cq(ctx->mdev, queue, numa_node, &ring->send_cq);
+ if (err)
+ return err;
+
+ err = hws_send_ring_open_sq(ctx, numa_node, queue, &ring->send_sq,
+ &ring->send_cq);
+ if (err)
+ goto close_cq;
+
+ return err;
+
+close_cq:
+ hws_send_ring_close_cq(&ring->send_cq);
+ return err;
+}
+
+void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue)
+{
+ hws_send_ring_close(queue);
+ kfree(queue->completed.entries);
+}
+
+int mlx5hws_send_queue_open(struct mlx5hws_context *ctx,
+ struct mlx5hws_send_engine *queue,
+ u16 queue_size)
+{
+ int err;
+
+ mutex_init(&queue->lock);
+
+ queue->num_entries = roundup_pow_of_two(queue_size);
+ queue->used_entries = 0;
+
+ queue->completed.entries = kcalloc(queue->num_entries,
+ sizeof(queue->completed.entries[0]),
+ GFP_KERNEL);
+ if (!queue->completed.entries)
+ return -ENOMEM;
+
+ queue->completed.pi = 0;
+ queue->completed.ci = 0;
+ queue->completed.mask = queue->num_entries - 1;
+ err = mlx5hws_send_ring_open(ctx, queue);
+ if (err)
+ goto free_completed_entries;
+
+ return 0;
+
+free_completed_entries:
+ kfree(queue->completed.entries);
+ return err;
+}
+
+static void __hws_send_queues_close(struct mlx5hws_context *ctx, u16 queues)
+{
+ while (queues--)
+ mlx5hws_send_queue_close(&ctx->send_queue[queues]);
+}
+
+static void hws_send_queues_bwc_locks_destroy(struct mlx5hws_context *ctx)
+{
+ int bwc_queues = ctx->queues - 1;
+ int i;
+
+ if (!mlx5hws_context_bwc_supported(ctx))
+ return;
+
+ for (i = 0; i < bwc_queues; i++)
+ mutex_destroy(&ctx->bwc_send_queue_locks[i]);
+ kfree(ctx->bwc_send_queue_locks);
+}
+
+void mlx5hws_send_queues_close(struct mlx5hws_context *ctx)
+{
+ hws_send_queues_bwc_locks_destroy(ctx);
+ __hws_send_queues_close(ctx, ctx->queues);
+ kfree(ctx->send_queue);
+}
+
+static int hws_bwc_send_queues_init(struct mlx5hws_context *ctx)
+{
+ /* Number of BWC queues is equal to number of the usual HWS queues */
+ int bwc_queues = ctx->queues - 1;
+ int i;
+
+ if (!mlx5hws_context_bwc_supported(ctx))
+ return 0;
+
+ ctx->queues += bwc_queues;
+
+ ctx->bwc_send_queue_locks = kcalloc(bwc_queues,
+ sizeof(*ctx->bwc_send_queue_locks),
+ GFP_KERNEL);
+
+ if (!ctx->bwc_send_queue_locks)
+ return -ENOMEM;
+
+ for (i = 0; i < bwc_queues; i++)
+ mutex_init(&ctx->bwc_send_queue_locks[i]);
+
+ return 0;
+}
+
+int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
+ u16 queues,
+ u16 queue_size)
+{
+ int err = 0;
+ u32 i;
+
+ /* Open one extra queue for control path */
+ ctx->queues = queues + 1;
+
+ /* open a separate set of queues and locks for bwc API */
+ err = hws_bwc_send_queues_init(ctx);
+ if (err)
+ return err;
+
+ ctx->send_queue = kcalloc(ctx->queues, sizeof(*ctx->send_queue), GFP_KERNEL);
+ if (!ctx->send_queue) {
+ err = -ENOMEM;
+ goto free_bwc_locks;
+ }
+
+ for (i = 0; i < ctx->queues; i++) {
+ err = mlx5hws_send_queue_open(ctx, &ctx->send_queue[i], queue_size);
+ if (err)
+ goto close_send_queues;
+ }
+
+ return 0;
+
+close_send_queues:
+ __hws_send_queues_close(ctx, i);
+
+ kfree(ctx->send_queue);
+
+free_bwc_locks:
+ hws_send_queues_bwc_locks_destroy(ctx);
+
+ return err;
+}
+
+int mlx5hws_send_queue_action(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ u32 actions)
+{
+ struct mlx5hws_send_ring_sq *send_sq;
+ struct mlx5hws_send_engine *queue;
+ bool wait_comp = false;
+ s64 polled = 0;
+
+ queue = &ctx->send_queue[queue_id];
+ send_sq = &queue->send_ring.send_sq;
+
+ switch (actions) {
+ case MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC:
+ wait_comp = true;
+ fallthrough;
+ case MLX5HWS_SEND_QUEUE_ACTION_DRAIN_ASYNC:
+ if (send_sq->head_dep_idx != send_sq->tail_dep_idx)
+ /* Send dependent WQEs to drain the queue */
+ mlx5hws_send_all_dep_wqe(queue);
+ else
+ /* Signal on the last posted WQE */
+ mlx5hws_send_engine_flush_queue(queue);
+
+ /* Poll queue until empty */
+ while (wait_comp && !mlx5hws_send_engine_empty(queue))
+ hws_send_engine_poll_cq(queue, NULL, &polled, 0);
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+hws_send_wqe_fw(struct mlx5_core_dev *mdev,
+ u32 pd_num,
+ struct mlx5hws_send_engine_post_attr *send_attr,
+ struct mlx5hws_wqe_gta_ctrl_seg *send_wqe_ctrl,
+ void *send_wqe_match_data,
+ void *send_wqe_match_tag,
+ void *send_wqe_range_data,
+ void *send_wqe_range_tag,
+ bool is_jumbo,
+ u8 gta_opcode)
+{
+ bool has_range = send_wqe_range_data || send_wqe_range_tag;
+ bool has_match = send_wqe_match_data || send_wqe_match_tag;
+ struct mlx5hws_wqe_gta_data_seg_ste gta_wqe_data0 = {0};
+ struct mlx5hws_wqe_gta_data_seg_ste gta_wqe_data1 = {0};
+ struct mlx5hws_wqe_gta_ctrl_seg gta_wqe_ctrl = {0};
+ struct mlx5hws_cmd_generate_wqe_attr attr = {0};
+ struct mlx5hws_wqe_ctrl_seg wqe_ctrl = {0};
+ struct mlx5_cqe64 cqe;
+ u32 flags = 0;
+ int ret;
+
+ /* Set WQE control */
+ wqe_ctrl.opmod_idx_opcode = cpu_to_be32((send_attr->opmod << 24) | send_attr->opcode);
+ wqe_ctrl.qpn_ds = cpu_to_be32((send_attr->len + sizeof(struct mlx5hws_wqe_ctrl_seg)) / 16);
+ flags |= send_attr->notify_hw ? MLX5_WQE_CTRL_CQ_UPDATE : 0;
+ wqe_ctrl.flags = cpu_to_be32(flags);
+ wqe_ctrl.imm = cpu_to_be32(send_attr->id);
+
+ /* Set GTA WQE CTRL */
+ memcpy(gta_wqe_ctrl.stc_ix, send_wqe_ctrl->stc_ix, sizeof(send_wqe_ctrl->stc_ix));
+ gta_wqe_ctrl.op_dirix = cpu_to_be32(gta_opcode << 28);
+
+ /* Set GTA match WQE DATA */
+ if (has_match) {
+ if (send_wqe_match_data)
+ memcpy(&gta_wqe_data0, send_wqe_match_data, sizeof(gta_wqe_data0));
+ else
+ hws_send_wqe_set_tag(&gta_wqe_data0, send_wqe_match_tag, is_jumbo);
+
+ gta_wqe_data0.rsvd1_definer = cpu_to_be32(send_attr->match_definer_id << 8);
+ attr.gta_data_0 = (u8 *)&gta_wqe_data0;
+ }
+
+ /* Set GTA range WQE DATA */
+ if (has_range) {
+ if (send_wqe_range_data)
+ memcpy(&gta_wqe_data1, send_wqe_range_data, sizeof(gta_wqe_data1));
+ else
+ hws_send_wqe_set_tag(&gta_wqe_data1, send_wqe_range_tag, false);
+
+ gta_wqe_data1.rsvd1_definer = cpu_to_be32(send_attr->range_definer_id << 8);
+ attr.gta_data_1 = (u8 *)&gta_wqe_data1;
+ }
+
+ attr.pdn = pd_num;
+ attr.wqe_ctrl = (u8 *)&wqe_ctrl;
+ attr.gta_ctrl = (u8 *)&gta_wqe_ctrl;
+
+send_wqe:
+ ret = mlx5hws_cmd_generate_wqe(mdev, &attr, &cqe);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to write WQE using command");
+ return ret;
+ }
+
+ if ((get_cqe_opcode(&cqe) == MLX5_CQE_REQ) &&
+ (be32_to_cpu(cqe.byte_cnt) >> 31 == 0)) {
+ *send_attr->used_id = send_attr->id;
+ return 0;
+ }
+
+ /* Retry if rule failed */
+ if (send_attr->retry_id) {
+ wqe_ctrl.imm = cpu_to_be32(send_attr->retry_id);
+ send_attr->id = send_attr->retry_id;
+ send_attr->retry_id = 0;
+ goto send_wqe;
+ }
+
+ return -1;
+}
+
+void mlx5hws_send_stes_fw(struct mlx5hws_context *ctx,
+ struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ste_attr *ste_attr)
+{
+ struct mlx5hws_send_engine_post_attr *send_attr = &ste_attr->send_attr;
+ struct mlx5hws_rule *rule = send_attr->rule;
+ struct mlx5_core_dev *mdev;
+ u16 queue_id;
+ u32 pdn;
+ int ret;
+
+ queue_id = queue - ctx->send_queue;
+ mdev = ctx->mdev;
+ pdn = ctx->pd_num;
+
+ /* Writing through FW can't HW fence, therefore we drain the queue */
+ if (send_attr->fence)
+ mlx5hws_send_queue_action(ctx,
+ queue_id,
+ MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC);
+
+ if (ste_attr->rtc_1) {
+ send_attr->id = ste_attr->rtc_1;
+ send_attr->used_id = ste_attr->used_id_rtc_1;
+ send_attr->retry_id = ste_attr->retry_rtc_1;
+ ret = hws_send_wqe_fw(mdev, pdn, send_attr,
+ ste_attr->wqe_ctrl,
+ ste_attr->wqe_data,
+ ste_attr->wqe_tag,
+ ste_attr->range_wqe_data,
+ ste_attr->range_wqe_tag,
+ ste_attr->wqe_tag_is_jumbo,
+ ste_attr->gta_opcode);
+ if (ret)
+ goto fail_rule;
+ }
+
+ if (ste_attr->rtc_0) {
+ send_attr->id = ste_attr->rtc_0;
+ send_attr->used_id = ste_attr->used_id_rtc_0;
+ send_attr->retry_id = ste_attr->retry_rtc_0;
+ ret = hws_send_wqe_fw(mdev, pdn, send_attr,
+ ste_attr->wqe_ctrl,
+ ste_attr->wqe_data,
+ ste_attr->wqe_tag,
+ ste_attr->range_wqe_data,
+ ste_attr->range_wqe_tag,
+ ste_attr->wqe_tag_is_jumbo,
+ ste_attr->gta_opcode);
+ if (ret)
+ goto fail_rule;
+ }
+
+ /* Increase the status, this only works on good flow as the enum
+ * is arrange it away creating -> created -> deleting -> deleted
+ */
+ if (likely(rule))
+ rule->status++;
+
+ mlx5hws_send_engine_gen_comp(queue, send_attr->user_data, MLX5HWS_FLOW_OP_SUCCESS);
+
+ return;
+
+fail_rule:
+ if (likely(rule))
+ rule->status = !rule->rtc_0 && !rule->rtc_1 ?
+ MLX5HWS_RULE_STATUS_FAILED : MLX5HWS_RULE_STATUS_FAILING;
+
+ mlx5hws_send_engine_gen_comp(queue, send_attr->user_data, MLX5HWS_FLOW_OP_ERROR);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.h
new file mode 100644
index 000000000000..b50825d6dc53
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.h
@@ -0,0 +1,270 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_SEND_H_
+#define MLX5HWS_SEND_H_
+
+/* As a single operation requires at least two WQEBBS.
+ * This means a maximum of 16 such operations per rule.
+ */
+#define MAX_WQES_PER_RULE 32
+
+enum mlx5hws_wqe_opcode {
+ MLX5HWS_WQE_OPCODE_TBL_ACCESS = 0x2c,
+};
+
+enum mlx5hws_wqe_opmod {
+ MLX5HWS_WQE_OPMOD_GTA_STE = 0,
+ MLX5HWS_WQE_OPMOD_GTA_MOD_ARG = 1,
+};
+
+enum mlx5hws_wqe_gta_opcode {
+ MLX5HWS_WQE_GTA_OP_ACTIVATE = 0,
+ MLX5HWS_WQE_GTA_OP_DEACTIVATE = 1,
+};
+
+enum mlx5hws_wqe_gta_opmod {
+ MLX5HWS_WQE_GTA_OPMOD_STE = 0,
+ MLX5HWS_WQE_GTA_OPMOD_MOD_ARG = 1,
+};
+
+enum mlx5hws_wqe_gta_sz {
+ MLX5HWS_WQE_SZ_GTA_CTRL = 48,
+ MLX5HWS_WQE_SZ_GTA_DATA = 64,
+};
+
+/* WQE Control segment. */
+struct mlx5hws_wqe_ctrl_seg {
+ __be32 opmod_idx_opcode;
+ __be32 qpn_ds;
+ __be32 flags;
+ __be32 imm;
+};
+
+struct mlx5hws_wqe_gta_ctrl_seg {
+ __be32 op_dirix;
+ __be32 stc_ix[5];
+ __be32 rsvd0[6];
+};
+
+struct mlx5hws_wqe_gta_data_seg_ste {
+ __be32 rsvd0_ctr_id;
+ __be32 rsvd1_definer;
+ __be32 rsvd2[3];
+ union {
+ struct {
+ __be32 action[3];
+ __be32 tag[8];
+ };
+ __be32 jumbo[11];
+ };
+};
+
+struct mlx5hws_wqe_gta_data_seg_arg {
+ __be32 action_args[8];
+};
+
+struct mlx5hws_wqe_gta {
+ struct mlx5hws_wqe_gta_ctrl_seg gta_ctrl;
+ union {
+ struct mlx5hws_wqe_gta_data_seg_ste seg_ste;
+ struct mlx5hws_wqe_gta_data_seg_arg seg_arg;
+ };
+};
+
+struct mlx5hws_send_ring_cq {
+ struct mlx5_core_dev *mdev;
+ struct mlx5_cqwq wq;
+ struct mlx5_wq_ctrl wq_ctrl;
+ struct mlx5_core_cq mcq;
+ u16 poll_wqe;
+};
+
+struct mlx5hws_send_ring_priv {
+ struct mlx5hws_rule *rule;
+ void *user_data;
+ u32 num_wqebbs;
+ u32 id;
+ u32 retry_id;
+ u32 *used_id;
+};
+
+struct mlx5hws_send_ring_dep_wqe {
+ struct mlx5hws_wqe_gta_ctrl_seg wqe_ctrl;
+ struct mlx5hws_wqe_gta_data_seg_ste wqe_data;
+ struct mlx5hws_rule *rule;
+ u32 rtc_0;
+ u32 rtc_1;
+ u32 retry_rtc_0;
+ u32 retry_rtc_1;
+ u32 direct_index;
+ void *user_data;
+};
+
+struct mlx5hws_send_ring_sq {
+ struct mlx5_core_dev *mdev;
+ u16 cur_post;
+ u16 buf_mask;
+ struct mlx5hws_send_ring_priv *wr_priv;
+ unsigned int last_idx;
+ struct mlx5hws_send_ring_dep_wqe *dep_wqe;
+ unsigned int head_dep_idx;
+ unsigned int tail_dep_idx;
+ u32 sqn;
+ struct mlx5_wq_cyc wq;
+ struct mlx5_wq_ctrl wq_ctrl;
+ void __iomem *uar_map;
+};
+
+struct mlx5hws_send_ring {
+ struct mlx5hws_send_ring_cq send_cq;
+ struct mlx5hws_send_ring_sq send_sq;
+};
+
+struct mlx5hws_completed_poll_entry {
+ void *user_data;
+ enum mlx5hws_flow_op_status status;
+};
+
+struct mlx5hws_completed_poll {
+ struct mlx5hws_completed_poll_entry *entries;
+ u16 ci;
+ u16 pi;
+ u16 mask;
+};
+
+struct mlx5hws_send_engine {
+ struct mlx5hws_send_ring send_ring;
+ struct mlx5_uars_page *uar; /* Uar is shared between rings of a queue */
+ struct mlx5hws_completed_poll completed;
+ u16 used_entries;
+ u16 num_entries;
+ bool err;
+ struct mutex lock; /* Protects the send engine */
+};
+
+struct mlx5hws_send_engine_post_ctrl {
+ struct mlx5hws_send_engine *queue;
+ struct mlx5hws_send_ring *send_ring;
+ size_t num_wqebbs;
+};
+
+struct mlx5hws_send_engine_post_attr {
+ u8 opcode;
+ u8 opmod;
+ u8 notify_hw;
+ u8 fence;
+ u8 match_definer_id;
+ u8 range_definer_id;
+ size_t len;
+ struct mlx5hws_rule *rule;
+ u32 id;
+ u32 retry_id;
+ u32 *used_id;
+ void *user_data;
+};
+
+struct mlx5hws_send_ste_attr {
+ u32 rtc_0;
+ u32 rtc_1;
+ u32 retry_rtc_0;
+ u32 retry_rtc_1;
+ u32 *used_id_rtc_0;
+ u32 *used_id_rtc_1;
+ bool wqe_tag_is_jumbo;
+ u8 gta_opcode;
+ u32 direct_index;
+ struct mlx5hws_send_engine_post_attr send_attr;
+ struct mlx5hws_rule_match_tag *wqe_tag;
+ struct mlx5hws_rule_match_tag *range_wqe_tag;
+ struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
+ struct mlx5hws_wqe_gta_data_seg_ste *wqe_data;
+ struct mlx5hws_wqe_gta_data_seg_ste *range_wqe_data;
+};
+
+struct mlx5hws_send_ring_dep_wqe *
+mlx5hws_send_add_new_dep_wqe(struct mlx5hws_send_engine *queue);
+
+void mlx5hws_send_abort_new_dep_wqe(struct mlx5hws_send_engine *queue);
+
+void mlx5hws_send_all_dep_wqe(struct mlx5hws_send_engine *queue);
+
+void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue);
+
+int mlx5hws_send_queue_open(struct mlx5hws_context *ctx,
+ struct mlx5hws_send_engine *queue,
+ u16 queue_size);
+
+void mlx5hws_send_queues_close(struct mlx5hws_context *ctx);
+
+int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
+ u16 queues,
+ u16 queue_size);
+
+int mlx5hws_send_queue_action(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ u32 actions);
+
+int mlx5hws_send_test(struct mlx5hws_context *ctx,
+ u16 queues,
+ u16 queue_size);
+
+struct mlx5hws_send_engine_post_ctrl
+mlx5hws_send_engine_post_start(struct mlx5hws_send_engine *queue);
+
+void mlx5hws_send_engine_post_req_wqe(struct mlx5hws_send_engine_post_ctrl *ctrl,
+ char **buf, size_t *len);
+
+void mlx5hws_send_engine_post_end(struct mlx5hws_send_engine_post_ctrl *ctrl,
+ struct mlx5hws_send_engine_post_attr *attr);
+
+void mlx5hws_send_ste(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ste_attr *ste_attr);
+
+void mlx5hws_send_stes_fw(struct mlx5hws_context *ctx,
+ struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ste_attr *ste_attr);
+
+void mlx5hws_send_engine_flush_queue(struct mlx5hws_send_engine *queue);
+
+static inline bool mlx5hws_send_engine_empty(struct mlx5hws_send_engine *queue)
+{
+ struct mlx5hws_send_ring_sq *send_sq = &queue->send_ring.send_sq;
+ struct mlx5hws_send_ring_cq *send_cq = &queue->send_ring.send_cq;
+
+ return ((send_sq->cur_post & send_sq->buf_mask) == send_cq->poll_wqe);
+}
+
+static inline bool mlx5hws_send_engine_full(struct mlx5hws_send_engine *queue)
+{
+ return queue->used_entries >= queue->num_entries;
+}
+
+static inline void mlx5hws_send_engine_inc_rule(struct mlx5hws_send_engine *queue)
+{
+ queue->used_entries++;
+}
+
+static inline void mlx5hws_send_engine_dec_rule(struct mlx5hws_send_engine *queue)
+{
+ queue->used_entries--;
+}
+
+static inline void mlx5hws_send_engine_gen_comp(struct mlx5hws_send_engine *queue,
+ void *user_data,
+ int comp_status)
+{
+ struct mlx5hws_completed_poll *comp = &queue->completed;
+
+ comp->entries[comp->pi].status = comp_status;
+ comp->entries[comp->pi].user_data = user_data;
+
+ comp->pi = (comp->pi + 1) & comp->mask;
+}
+
+static inline bool mlx5hws_send_engine_err(struct mlx5hws_send_engine *queue)
+{
+ return queue->err;
+}
+
+#endif /* MLX5HWS_SEND_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.c
new file mode 100644
index 000000000000..9dbc3e9da5ea
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.c
@@ -0,0 +1,493 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+u32 mlx5hws_table_get_id(struct mlx5hws_table *tbl)
+{
+ return tbl->ft_id;
+}
+
+static void hws_table_init_next_ft_attr(struct mlx5hws_table *tbl,
+ struct mlx5hws_cmd_ft_create_attr *ft_attr)
+{
+ ft_attr->type = tbl->fw_ft_type;
+ if (tbl->type == MLX5HWS_TABLE_TYPE_FDB)
+ ft_attr->level = tbl->ctx->caps->fdb_ft.max_level - 1;
+ else
+ ft_attr->level = tbl->ctx->caps->nic_ft.max_level - 1;
+ ft_attr->rtc_valid = true;
+}
+
+static void hws_table_set_cap_attr(struct mlx5hws_table *tbl,
+ struct mlx5hws_cmd_ft_create_attr *ft_attr)
+{
+ /* Enabling reformat_en or decap_en for the first flow table
+ * must be done when all VFs are down.
+ * However, HWS doesn't know when it is required to create the first FT.
+ * On the other hand, HWS doesn't use all these FT capabilities at all
+ * (the API doesn't even provide a way to specify these flags), so we'll
+ * just set these caps on all the flow tables.
+ * If HCA_CAP.fdb_dynamic_tunnel is set, this constraint is N/A.
+ */
+ if (!MLX5_CAP_ESW_FLOWTABLE(tbl->ctx->mdev, fdb_dynamic_tunnel)) {
+ ft_attr->reformat_en = true;
+ ft_attr->decap_en = true;
+ }
+}
+
+static int hws_table_up_default_fdb_miss_tbl(struct mlx5hws_table *tbl)
+{
+ struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
+ struct mlx5hws_cmd_set_fte_attr fte_attr = {0};
+ struct mlx5hws_cmd_forward_tbl *default_miss;
+ struct mlx5hws_cmd_set_fte_dest dest = {0};
+ struct mlx5hws_context *ctx = tbl->ctx;
+ u8 tbl_type = tbl->type;
+
+ if (tbl->type != MLX5HWS_TABLE_TYPE_FDB)
+ return 0;
+
+ if (ctx->common_res[tbl_type].default_miss) {
+ ctx->common_res[tbl_type].default_miss->refcount++;
+ return 0;
+ }
+
+ ft_attr.type = tbl->fw_ft_type;
+ ft_attr.level = tbl->ctx->caps->fdb_ft.max_level; /* The last level */
+ ft_attr.rtc_valid = false;
+
+ dest.destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest.destination_id = ctx->caps->eswitch_manager_vport_number;
+
+ fte_attr.action_flags = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ fte_attr.dests_num = 1;
+ fte_attr.dests = &dest;
+
+ default_miss = mlx5hws_cmd_forward_tbl_create(ctx->mdev, &ft_attr, &fte_attr);
+ if (!default_miss) {
+ mlx5hws_err(ctx, "Failed to default miss table type: 0x%x\n", tbl_type);
+ return -EINVAL;
+ }
+
+ /* ctx->ctrl_lock must be held here */
+ ctx->common_res[tbl_type].default_miss = default_miss;
+ ctx->common_res[tbl_type].default_miss->refcount++;
+
+ return 0;
+}
+
+/* Called under ctx->ctrl_lock */
+static void hws_table_down_default_fdb_miss_tbl(struct mlx5hws_table *tbl)
+{
+ struct mlx5hws_cmd_forward_tbl *default_miss;
+ struct mlx5hws_context *ctx = tbl->ctx;
+ u8 tbl_type = tbl->type;
+
+ if (tbl->type != MLX5HWS_TABLE_TYPE_FDB)
+ return;
+
+ default_miss = ctx->common_res[tbl_type].default_miss;
+ if (--default_miss->refcount)
+ return;
+
+ mlx5hws_cmd_forward_tbl_destroy(ctx->mdev, default_miss);
+ ctx->common_res[tbl_type].default_miss = NULL;
+}
+
+static int hws_table_connect_to_default_miss_tbl(struct mlx5hws_table *tbl, u32 ft_id)
+{
+ struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
+ int ret;
+
+ if (unlikely(tbl->type != MLX5HWS_TABLE_TYPE_FDB))
+ pr_warn("HWS: invalid table type %d\n", tbl->type);
+
+ mlx5hws_cmd_set_attr_connect_miss_tbl(tbl->ctx,
+ tbl->fw_ft_type,
+ tbl->type,
+ &ft_attr);
+
+ ret = mlx5hws_cmd_flow_table_modify(tbl->ctx->mdev, &ft_attr, ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed to connect FT to default FDB FT\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int mlx5hws_table_create_default_ft(struct mlx5_core_dev *mdev,
+ struct mlx5hws_table *tbl,
+ u32 *ft_id)
+{
+ struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
+ int ret;
+
+ hws_table_init_next_ft_attr(tbl, &ft_attr);
+ hws_table_set_cap_attr(tbl, &ft_attr);
+
+ ret = mlx5hws_cmd_flow_table_create(mdev, &ft_attr, ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed creating default ft\n");
+ return ret;
+ }
+
+ if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
+ /* Take/create ref over the default miss */
+ ret = hws_table_up_default_fdb_miss_tbl(tbl);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed to get default fdb miss\n");
+ goto free_ft_obj;
+ }
+ ret = hws_table_connect_to_default_miss_tbl(tbl, *ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed connecting to default miss tbl\n");
+ goto down_miss_tbl;
+ }
+ }
+
+ return 0;
+
+down_miss_tbl:
+ hws_table_down_default_fdb_miss_tbl(tbl);
+free_ft_obj:
+ mlx5hws_cmd_flow_table_destroy(mdev, ft_attr.type, *ft_id);
+ return ret;
+}
+
+void mlx5hws_table_destroy_default_ft(struct mlx5hws_table *tbl,
+ u32 ft_id)
+{
+ mlx5hws_cmd_flow_table_destroy(tbl->ctx->mdev, tbl->fw_ft_type, ft_id);
+ hws_table_down_default_fdb_miss_tbl(tbl);
+}
+
+static int hws_table_init_check_hws_support(struct mlx5hws_context *ctx,
+ struct mlx5hws_table *tbl)
+{
+ if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT)) {
+ mlx5hws_err(ctx, "HWS not supported, cannot create mlx5hws_table\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int hws_table_init(struct mlx5hws_table *tbl)
+{
+ struct mlx5hws_context *ctx = tbl->ctx;
+ int ret;
+
+ ret = hws_table_init_check_hws_support(ctx, tbl);
+ if (ret)
+ return ret;
+
+ if (mlx5hws_table_get_fw_ft_type(tbl->type, (u8 *)&tbl->fw_ft_type)) {
+ pr_warn("HWS: invalid table type %d\n", tbl->type);
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&ctx->ctrl_lock);
+ ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl, &tbl->ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed to create flow table object\n");
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+ }
+
+ ret = mlx5hws_action_get_default_stc(ctx, tbl->type);
+ if (ret)
+ goto tbl_destroy;
+
+ INIT_LIST_HEAD(&tbl->matchers_list);
+ INIT_LIST_HEAD(&tbl->default_miss.head);
+
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return 0;
+
+tbl_destroy:
+ mlx5hws_table_destroy_default_ft(tbl, tbl->ft_id);
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+}
+
+static void hws_table_uninit(struct mlx5hws_table *tbl)
+{
+ mutex_lock(&tbl->ctx->ctrl_lock);
+ mlx5hws_action_put_default_stc(tbl->ctx, tbl->type);
+ mlx5hws_table_destroy_default_ft(tbl, tbl->ft_id);
+ mutex_unlock(&tbl->ctx->ctrl_lock);
+}
+
+struct mlx5hws_table *mlx5hws_table_create(struct mlx5hws_context *ctx,
+ struct mlx5hws_table_attr *attr)
+{
+ struct mlx5hws_table *tbl;
+ int ret;
+
+ if (attr->type > MLX5HWS_TABLE_TYPE_FDB) {
+ mlx5hws_err(ctx, "Invalid table type %d\n", attr->type);
+ return NULL;
+ }
+
+ tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
+ if (!tbl)
+ return NULL;
+
+ tbl->ctx = ctx;
+ tbl->type = attr->type;
+ tbl->level = attr->level;
+
+ ret = hws_table_init(tbl);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to initialise table\n");
+ goto free_tbl;
+ }
+
+ mutex_lock(&ctx->ctrl_lock);
+ list_add(&tbl->tbl_list_node, &ctx->tbl_list);
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return tbl;
+
+free_tbl:
+ kfree(tbl);
+ return NULL;
+}
+
+int mlx5hws_table_destroy(struct mlx5hws_table *tbl)
+{
+ struct mlx5hws_context *ctx = tbl->ctx;
+ int ret;
+
+ mutex_lock(&ctx->ctrl_lock);
+ if (!list_empty(&tbl->matchers_list)) {
+ mlx5hws_err(tbl->ctx, "Cannot destroy table containing matchers\n");
+ ret = -EBUSY;
+ goto unlock_err;
+ }
+
+ if (!list_empty(&tbl->default_miss.head)) {
+ mlx5hws_err(tbl->ctx, "Cannot destroy table pointed by default miss\n");
+ ret = -EBUSY;
+ goto unlock_err;
+ }
+
+ list_del_init(&tbl->tbl_list_node);
+ mutex_unlock(&ctx->ctrl_lock);
+
+ hws_table_uninit(tbl);
+ kfree(tbl);
+
+ return 0;
+
+unlock_err:
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+}
+
+static u32 hws_table_get_last_ft(struct mlx5hws_table *tbl)
+{
+ struct mlx5hws_matcher *matcher;
+
+ if (list_empty(&tbl->matchers_list))
+ return tbl->ft_id;
+
+ matcher = list_last_entry(&tbl->matchers_list, struct mlx5hws_matcher, list_node);
+ return matcher->end_ft_id;
+}
+
+int mlx5hws_table_ft_set_default_next_ft(struct mlx5hws_table *tbl, u32 ft_id)
+{
+ struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
+ int ret;
+
+ /* Due to FW limitation, resetting the flow table to default action will
+ * disconnect RTC when ignore_flow_level_rtc_valid is not supported.
+ */
+ if (!tbl->ctx->caps->nic_ft.ignore_flow_level_rtc_valid)
+ return 0;
+
+ if (tbl->type == MLX5HWS_TABLE_TYPE_FDB)
+ return hws_table_connect_to_default_miss_tbl(tbl, ft_id);
+
+ ft_attr.type = tbl->fw_ft_type;
+ ft_attr.modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION;
+ ft_attr.table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_DEFAULT;
+
+ ret = mlx5hws_cmd_flow_table_modify(tbl->ctx->mdev, &ft_attr, ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed to set FT default miss action\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int mlx5hws_table_ft_set_next_rtc(struct mlx5hws_context *ctx,
+ u32 ft_id,
+ u32 fw_ft_type,
+ u32 rtc_0_id,
+ u32 rtc_1_id)
+{
+ struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
+
+ ft_attr.modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_RTC_ID;
+ ft_attr.type = fw_ft_type;
+ ft_attr.rtc_id_0 = rtc_0_id;
+ ft_attr.rtc_id_1 = rtc_1_id;
+
+ return mlx5hws_cmd_flow_table_modify(ctx->mdev, &ft_attr, ft_id);
+}
+
+static int hws_table_ft_set_next_ft(struct mlx5hws_context *ctx,
+ u32 ft_id,
+ u32 fw_ft_type,
+ u32 next_ft_id)
+{
+ struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
+
+ ft_attr.modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION;
+ ft_attr.table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL;
+ ft_attr.type = fw_ft_type;
+ ft_attr.table_miss_id = next_ft_id;
+
+ return mlx5hws_cmd_flow_table_modify(ctx->mdev, &ft_attr, ft_id);
+}
+
+int mlx5hws_table_update_connected_miss_tables(struct mlx5hws_table *dst_tbl)
+{
+ struct mlx5hws_table *src_tbl;
+ int ret;
+
+ if (list_empty(&dst_tbl->default_miss.head))
+ return 0;
+
+ list_for_each_entry(src_tbl, &dst_tbl->default_miss.head, default_miss.next) {
+ ret = mlx5hws_table_connect_to_miss_table(src_tbl, dst_tbl);
+ if (ret) {
+ mlx5hws_err(dst_tbl->ctx,
+ "Failed to update source miss table, unexpected behavior\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int mlx5hws_table_connect_to_miss_table(struct mlx5hws_table *src_tbl,
+ struct mlx5hws_table *dst_tbl)
+{
+ struct mlx5hws_matcher *matcher;
+ u32 last_ft_id;
+ int ret;
+
+ last_ft_id = hws_table_get_last_ft(src_tbl);
+
+ if (dst_tbl) {
+ if (list_empty(&dst_tbl->matchers_list)) {
+ /* Connect src_tbl last_ft to dst_tbl start anchor */
+ ret = hws_table_ft_set_next_ft(src_tbl->ctx,
+ last_ft_id,
+ src_tbl->fw_ft_type,
+ dst_tbl->ft_id);
+ if (ret)
+ return ret;
+
+ /* Reset last_ft RTC to default RTC */
+ ret = mlx5hws_table_ft_set_next_rtc(src_tbl->ctx,
+ last_ft_id,
+ src_tbl->fw_ft_type,
+ 0, 0);
+ if (ret)
+ return ret;
+ } else {
+ /* Connect src_tbl last_ft to first matcher RTC */
+ matcher = list_first_entry(&dst_tbl->matchers_list,
+ struct mlx5hws_matcher,
+ list_node);
+ ret = mlx5hws_table_ft_set_next_rtc(src_tbl->ctx,
+ last_ft_id,
+ src_tbl->fw_ft_type,
+ matcher->match_ste.rtc_0_id,
+ matcher->match_ste.rtc_1_id);
+ if (ret)
+ return ret;
+
+ /* Reset next miss FT to default */
+ ret = mlx5hws_table_ft_set_default_next_ft(src_tbl, last_ft_id);
+ if (ret)
+ return ret;
+ }
+ } else {
+ /* Reset next miss FT to default */
+ ret = mlx5hws_table_ft_set_default_next_ft(src_tbl, last_ft_id);
+ if (ret)
+ return ret;
+
+ /* Reset last_ft RTC to default RTC */
+ ret = mlx5hws_table_ft_set_next_rtc(src_tbl->ctx,
+ last_ft_id,
+ src_tbl->fw_ft_type,
+ 0, 0);
+ if (ret)
+ return ret;
+ }
+
+ src_tbl->default_miss.miss_tbl = dst_tbl;
+
+ return 0;
+}
+
+static int hws_table_set_default_miss_not_valid(struct mlx5hws_table *tbl,
+ struct mlx5hws_table *miss_tbl)
+{
+ if (!tbl->ctx->caps->nic_ft.ignore_flow_level_rtc_valid) {
+ mlx5hws_err(tbl->ctx, "Default miss table is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if ((miss_tbl && miss_tbl->type != tbl->type)) {
+ mlx5hws_err(tbl->ctx, "Invalid arguments\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int mlx5hws_table_set_default_miss(struct mlx5hws_table *tbl,
+ struct mlx5hws_table *miss_tbl)
+{
+ struct mlx5hws_context *ctx = tbl->ctx;
+ struct mlx5hws_table *old_miss_tbl;
+ int ret;
+
+ ret = hws_table_set_default_miss_not_valid(tbl, miss_tbl);
+ if (ret)
+ return ret;
+
+ mutex_lock(&ctx->ctrl_lock);
+
+ old_miss_tbl = tbl->default_miss.miss_tbl;
+ ret = mlx5hws_table_connect_to_miss_table(tbl, miss_tbl);
+ if (ret)
+ goto out;
+
+ if (old_miss_tbl)
+ list_del_init(&tbl->default_miss.next);
+
+ old_miss_tbl = tbl->default_miss.miss_tbl;
+ if (old_miss_tbl)
+ list_del_init(&old_miss_tbl->default_miss.head);
+
+ if (miss_tbl)
+ list_add(&tbl->default_miss.next, &miss_tbl->default_miss.head);
+
+ mutex_unlock(&ctx->ctrl_lock);
+ return 0;
+out:
+ mutex_unlock(&ctx->ctrl_lock);
+ return -ret;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.h
new file mode 100644
index 000000000000..dd50420eec9e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_TABLE_H_
+#define MLX5HWS_TABLE_H_
+
+struct mlx5hws_default_miss {
+ /* My miss table */
+ struct mlx5hws_table *miss_tbl;
+ struct list_head next;
+ /* Tables missing to my table */
+ struct list_head head;
+};
+
+struct mlx5hws_table {
+ struct mlx5hws_context *ctx;
+ u32 ft_id;
+ enum mlx5hws_table_type type;
+ u32 fw_ft_type;
+ u32 level;
+ struct list_head matchers_list;
+ struct list_head tbl_list_node;
+ struct mlx5hws_default_miss default_miss;
+};
+
+static inline
+u32 mlx5hws_table_get_fw_ft_type(enum mlx5hws_table_type type,
+ u8 *ret_type)
+{
+ if (type != MLX5HWS_TABLE_TYPE_FDB)
+ return -EOPNOTSUPP;
+
+ *ret_type = FS_FT_FDB;
+
+ return 0;
+}
+
+static inline
+u32 mlx5hws_table_get_res_fw_ft_type(enum mlx5hws_table_type tbl_type,
+ bool is_mirror)
+{
+ if (tbl_type == MLX5HWS_TABLE_TYPE_FDB)
+ return is_mirror ? FS_FT_FDB_TX : FS_FT_FDB_RX;
+
+ return 0;
+}
+
+int mlx5hws_table_create_default_ft(struct mlx5_core_dev *mdev,
+ struct mlx5hws_table *tbl,
+ u32 *ft_id);
+
+void mlx5hws_table_destroy_default_ft(struct mlx5hws_table *tbl,
+ u32 ft_id);
+
+int mlx5hws_table_connect_to_miss_table(struct mlx5hws_table *src_tbl,
+ struct mlx5hws_table *dst_tbl);
+
+int mlx5hws_table_update_connected_miss_tables(struct mlx5hws_table *dst_tbl);
+
+int mlx5hws_table_ft_set_default_next_ft(struct mlx5hws_table *tbl, u32 ft_id);
+
+int mlx5hws_table_ft_set_next_rtc(struct mlx5hws_context *ctx,
+ u32 ft_id,
+ u32 fw_ft_type,
+ u32 rtc_0_id,
+ u32 rtc_1_id);
+
+#endif /* MLX5HWS_TABLE_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.c
new file mode 100644
index 000000000000..faf42421c43f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+int mlx5hws_vport_init_vports(struct mlx5hws_context *ctx)
+{
+ int ret;
+
+ if (!ctx->caps->eswitch_manager)
+ return 0;
+
+ xa_init(&ctx->vports.vport_gvmi_xa);
+
+ /* Set gvmi for eswitch manager and uplink vports only. Rest of the vports
+ * (vport 0 of other function, VFs and SFs) will be queried dynamically.
+ */
+
+ ret = mlx5hws_cmd_query_gvmi(ctx->mdev, false, 0, &ctx->vports.esw_manager_gvmi);
+ if (ret)
+ return ret;
+
+ ctx->vports.uplink_gvmi = 0;
+ return 0;
+}
+
+void mlx5hws_vport_uninit_vports(struct mlx5hws_context *ctx)
+{
+ if (ctx->caps->eswitch_manager)
+ xa_destroy(&ctx->vports.vport_gvmi_xa);
+}
+
+static int hws_vport_add_gvmi(struct mlx5hws_context *ctx, u16 vport)
+{
+ u16 vport_gvmi;
+ int ret;
+
+ ret = mlx5hws_cmd_query_gvmi(ctx->mdev, true, vport, &vport_gvmi);
+ if (ret)
+ return -EINVAL;
+
+ ret = xa_insert(&ctx->vports.vport_gvmi_xa, vport,
+ xa_mk_value(vport_gvmi), GFP_KERNEL);
+ if (ret)
+ mlx5hws_dbg(ctx, "Couldn't insert new vport gvmi into xarray (%d)\n", ret);
+
+ return ret;
+}
+
+static bool hws_vport_is_esw_mgr_vport(struct mlx5hws_context *ctx, u16 vport)
+{
+ return ctx->caps->is_ecpf ? vport == MLX5_VPORT_ECPF :
+ vport == MLX5_VPORT_PF;
+}
+
+int mlx5hws_vport_get_gvmi(struct mlx5hws_context *ctx, u16 vport, u16 *vport_gvmi)
+{
+ void *entry;
+ int ret;
+
+ if (!ctx->caps->eswitch_manager)
+ return -EINVAL;
+
+ if (hws_vport_is_esw_mgr_vport(ctx, vport)) {
+ *vport_gvmi = ctx->vports.esw_manager_gvmi;
+ return 0;
+ }
+
+ if (vport == MLX5_VPORT_UPLINK) {
+ *vport_gvmi = ctx->vports.uplink_gvmi;
+ return 0;
+ }
+
+load_entry:
+ entry = xa_load(&ctx->vports.vport_gvmi_xa, vport);
+
+ if (!xa_is_value(entry)) {
+ ret = hws_vport_add_gvmi(ctx, vport);
+ if (ret && ret != -EBUSY)
+ return ret;
+ goto load_entry;
+ }
+
+ *vport_gvmi = (u16)xa_to_value(entry);
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.h
new file mode 100644
index 000000000000..0912fc166b3a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_VPORT_H_
+#define MLX5HWS_VPORT_H_
+
+int mlx5hws_vport_init_vports(struct mlx5hws_context *ctx);
+
+void mlx5hws_vport_uninit_vports(struct mlx5hws_context *ctx);
+
+int mlx5hws_vport_get_gvmi(struct mlx5hws_context *ctx, u16 vport, u16 *vport_gvmi);
+
+#endif /* MLX5HWS_VPORT_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
index e87049dfd223..ef05ae8f5039 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
@@ -10,6 +10,56 @@
#define FBNIC_SN_STR_LEN 24
+static int fbnic_version_running_put(struct devlink_info_req *req,
+ struct fbnic_fw_ver *fw_ver,
+ char *ver_name)
+{
+ char running_ver[FBNIC_FW_VER_MAX_SIZE];
+ int err;
+
+ fbnic_mk_fw_ver_str(fw_ver->version, running_ver);
+ err = devlink_info_version_running_put(req, ver_name, running_ver);
+ if (err)
+ return err;
+
+ if (strlen(fw_ver->commit) > 0) {
+ char commit_name[FBNIC_SN_STR_LEN];
+
+ snprintf(commit_name, FBNIC_SN_STR_LEN, "%s.commit", ver_name);
+ err = devlink_info_version_running_put(req, commit_name,
+ fw_ver->commit);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int fbnic_version_stored_put(struct devlink_info_req *req,
+ struct fbnic_fw_ver *fw_ver,
+ char *ver_name)
+{
+ char stored_ver[FBNIC_FW_VER_MAX_SIZE];
+ int err;
+
+ fbnic_mk_fw_ver_str(fw_ver->version, stored_ver);
+ err = devlink_info_version_stored_put(req, ver_name, stored_ver);
+ if (err)
+ return err;
+
+ if (strlen(fw_ver->commit) > 0) {
+ char commit_name[FBNIC_SN_STR_LEN];
+
+ snprintf(commit_name, FBNIC_SN_STR_LEN, "%s.commit", ver_name);
+ err = devlink_info_version_stored_put(req, commit_name,
+ fw_ver->commit);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int fbnic_devlink_info_get(struct devlink *devlink,
struct devlink_info_req *req,
struct netlink_ext_ack *extack)
@@ -17,6 +67,31 @@ static int fbnic_devlink_info_get(struct devlink *devlink,
struct fbnic_dev *fbd = devlink_priv(devlink);
int err;
+ err = fbnic_version_running_put(req, &fbd->fw_cap.running.mgmt,
+ DEVLINK_INFO_VERSION_GENERIC_FW);
+ if (err)
+ return err;
+
+ err = fbnic_version_running_put(req, &fbd->fw_cap.running.bootloader,
+ DEVLINK_INFO_VERSION_GENERIC_FW_BOOTLOADER);
+ if (err)
+ return err;
+
+ err = fbnic_version_stored_put(req, &fbd->fw_cap.stored.mgmt,
+ DEVLINK_INFO_VERSION_GENERIC_FW);
+ if (err)
+ return err;
+
+ err = fbnic_version_stored_put(req, &fbd->fw_cap.stored.bootloader,
+ DEVLINK_INFO_VERSION_GENERIC_FW_BOOTLOADER);
+ if (err)
+ return err;
+
+ err = fbnic_version_stored_put(req, &fbd->fw_cap.stored.undi,
+ DEVLINK_INFO_VERSION_GENERIC_FW_UNDI);
+ if (err)
+ return err;
+
if (fbd->dsn) {
unsigned char serial[FBNIC_SN_STR_LEN];
u8 dsn[8];
diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig
index ce2435987fb6..2e3eb37a45cd 100644
--- a/drivers/net/ethernet/microchip/Kconfig
+++ b/drivers/net/ethernet/microchip/Kconfig
@@ -46,12 +46,13 @@ config LAN743X
tristate "LAN743x support"
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
- select PHYLIB
select FIXED_PHY
select CRC16
select CRC32
+ select PHYLINK
help
- Support for the Microchip LAN743x PCI Express Gigabit Ethernet chip
+ Support for the Microchip LAN743x and PCI11x1x families of PCI
+ Express Ethernet devices
To compile this driver as a module, choose M here. The module will be
called lan743x.
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index 0f1c0edec460..1a1cbd034eda 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -1054,61 +1054,55 @@ static int lan743x_ethtool_get_eee(struct net_device *netdev,
struct ethtool_keee *eee)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
- struct phy_device *phydev = netdev->phydev;
- u32 buf;
- int ret;
-
- if (!phydev)
- return -EIO;
- if (!phydev->drv) {
- netif_err(adapter, drv, adapter->netdev,
- "Missing PHY Driver\n");
- return -EIO;
- }
- ret = phy_ethtool_get_eee(phydev, eee);
- if (ret < 0)
- return ret;
+ eee->tx_lpi_timer = lan743x_csr_read(adapter,
+ MAC_EEE_TX_LPI_REQ_DLY_CNT);
- buf = lan743x_csr_read(adapter, MAC_CR);
- if (buf & MAC_CR_EEE_EN_) {
- /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
- buf = lan743x_csr_read(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT);
- eee->tx_lpi_timer = buf;
- } else {
- eee->tx_lpi_timer = 0;
- }
-
- return 0;
+ return phylink_ethtool_get_eee(adapter->phylink, eee);
}
static int lan743x_ethtool_set_eee(struct net_device *netdev,
struct ethtool_keee *eee)
{
- struct lan743x_adapter *adapter;
- struct phy_device *phydev;
- u32 buf = 0;
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ u32 tx_lpi_timer;
- if (!netdev)
- return -EINVAL;
- adapter = netdev_priv(netdev);
- if (!adapter)
- return -EINVAL;
- phydev = netdev->phydev;
- if (!phydev)
- return -EIO;
- if (!phydev->drv) {
- netif_err(adapter, drv, adapter->netdev,
- "Missing PHY Driver\n");
- return -EIO;
- }
+ tx_lpi_timer = lan743x_csr_read(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT);
+ if (tx_lpi_timer != eee->tx_lpi_timer) {
+ u32 mac_cr = lan743x_csr_read(adapter, MAC_CR);
+
+ /* Software should only change this field when Energy Efficient
+ * Ethernet Enable (EEEEN) is cleared.
+ * This function will trigger an autonegotiation restart and
+ * eee will be reenabled during link up if eee was negotiated.
+ */
+ lan743x_mac_eee_enable(adapter, false);
+ lan743x_csr_write(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT,
+ eee->tx_lpi_timer);
- if (eee->eee_enabled) {
- buf = (u32)eee->tx_lpi_timer;
- lan743x_csr_write(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT, buf);
+ if (mac_cr & MAC_CR_EEE_EN_)
+ lan743x_mac_eee_enable(adapter, true);
}
- return phy_ethtool_set_eee(phydev, eee);
+ return phylink_ethtool_set_eee(adapter->phylink, eee);
+}
+
+static int
+lan743x_ethtool_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ return phylink_ethtool_ksettings_set(adapter->phylink, cmd);
+}
+
+static int
+lan743x_ethtool_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ return phylink_ethtool_ksettings_get(adapter->phylink, cmd);
}
#ifdef CONFIG_PM
@@ -1120,8 +1114,7 @@ static void lan743x_ethtool_get_wol(struct net_device *netdev,
wol->supported = 0;
wol->wolopts = 0;
- if (netdev->phydev)
- phy_ethtool_get_wol(netdev->phydev, wol);
+ phylink_ethtool_get_wol(adapter->phylink, wol);
if (wol->supported != adapter->phy_wol_supported)
netif_warn(adapter, drv, adapter->netdev,
@@ -1162,7 +1155,7 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
!(adapter->phy_wol_supported & WAKE_MAGICSECURE))
phy_wol.wolopts &= ~WAKE_MAGIC;
- ret = phy_ethtool_set_wol(netdev->phydev, &phy_wol);
+ ret = phylink_ethtool_set_wol(adapter->phylink, wol);
if (ret && (ret != -EOPNOTSUPP))
return ret;
@@ -1351,44 +1344,16 @@ static void lan743x_get_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *pause)
{
struct lan743x_adapter *adapter = netdev_priv(dev);
- struct lan743x_phy *phy = &adapter->phy;
- if (phy->fc_request_control & FLOW_CTRL_TX)
- pause->tx_pause = 1;
- if (phy->fc_request_control & FLOW_CTRL_RX)
- pause->rx_pause = 1;
- pause->autoneg = phy->fc_autoneg;
+ phylink_ethtool_get_pauseparam(adapter->phylink, pause);
}
static int lan743x_set_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *pause)
{
struct lan743x_adapter *adapter = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
- struct lan743x_phy *phy = &adapter->phy;
-
- if (!phydev)
- return -ENODEV;
-
- if (!phy_validate_pause(phydev, pause))
- return -EINVAL;
-
- phy->fc_request_control = 0;
- if (pause->rx_pause)
- phy->fc_request_control |= FLOW_CTRL_RX;
- if (pause->tx_pause)
- phy->fc_request_control |= FLOW_CTRL_TX;
-
- phy->fc_autoneg = pause->autoneg;
-
- if (pause->autoneg == AUTONEG_DISABLE)
- lan743x_mac_flow_ctrl_set_enables(adapter, pause->tx_pause,
- pause->rx_pause);
- else
- phy_set_asym_pause(phydev, pause->rx_pause, pause->tx_pause);
-
- return 0;
+ return phylink_ethtool_set_pauseparam(adapter->phylink, pause);
}
const struct ethtool_ops lan743x_ethtool_ops = {
@@ -1413,8 +1378,8 @@ const struct ethtool_ops lan743x_ethtool_ops = {
.get_ts_info = lan743x_ethtool_get_ts_info,
.get_eee = lan743x_ethtool_get_eee,
.set_eee = lan743x_ethtool_set_eee,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_link_ksettings = lan743x_ethtool_get_link_ksettings,
+ .set_link_ksettings = lan743x_ethtool_set_link_ksettings,
.get_regs_len = lan743x_get_regs_len,
.get_regs = lan743x_get_regs,
.get_pauseparam = lan743x_get_pauseparam,
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index e418539565b1..4dc5adcda6a3 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -15,6 +15,7 @@
#include <linux/rtnetlink.h>
#include <linux/iopoll.h>
#include <linux/crc16.h>
+#include <linux/phylink.h>
#include "lan743x_main.h"
#include "lan743x_ethtool.h"
@@ -992,6 +993,42 @@ static int lan743x_sgmii_write(struct lan743x_adapter *adapter,
return ret;
}
+static int lan743x_get_lsd(int speed, int duplex, u8 mss)
+{
+ int lsd;
+
+ switch (speed) {
+ case SPEED_2500:
+ if (mss == MASTER_SLAVE_STATE_SLAVE)
+ lsd = LINK_2500_SLAVE;
+ else
+ lsd = LINK_2500_MASTER;
+ break;
+ case SPEED_1000:
+ if (mss == MASTER_SLAVE_STATE_SLAVE)
+ lsd = LINK_1000_SLAVE;
+ else
+ lsd = LINK_1000_MASTER;
+ break;
+ case SPEED_100:
+ if (duplex == DUPLEX_FULL)
+ lsd = LINK_100FD;
+ else
+ lsd = LINK_100HD;
+ break;
+ case SPEED_10:
+ if (duplex == DUPLEX_FULL)
+ lsd = LINK_10FD;
+ else
+ lsd = LINK_10HD;
+ break;
+ default:
+ lsd = -EINVAL;
+ }
+
+ return lsd;
+}
+
static int lan743x_sgmii_mpll_set(struct lan743x_adapter *adapter,
u16 baud)
{
@@ -1041,26 +1078,7 @@ static int lan743x_sgmii_2_5G_mode_set(struct lan743x_adapter *adapter,
VR_MII_BAUD_RATE_1P25GBPS);
}
-static int lan743x_is_sgmii_2_5G_mode(struct lan743x_adapter *adapter,
- bool *status)
-{
- int ret;
-
- ret = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2,
- VR_MII_GEN2_4_MPLL_CTRL1);
- if (ret < 0)
- return ret;
-
- if (ret == VR_MII_MPLL_MULTIPLIER_125 ||
- ret == VR_MII_MPLL_MULTIPLIER_50)
- *status = true;
- else
- *status = false;
-
- return 0;
-}
-
-static int lan743x_sgmii_aneg_update(struct lan743x_adapter *adapter)
+static int lan743x_serdes_clock_and_aneg_update(struct lan743x_adapter *adapter)
{
enum lan743x_sgmii_lsd lsd = adapter->sgmii_lsd;
int mii_ctrl;
@@ -1147,68 +1165,11 @@ static int lan743x_pcs_seq_state(struct lan743x_adapter *adapter, u8 state)
return 0;
}
-static int lan743x_sgmii_config(struct lan743x_adapter *adapter)
+static int lan743x_pcs_power_reset(struct lan743x_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
- struct phy_device *phydev = netdev->phydev;
- enum lan743x_sgmii_lsd lsd = POWER_DOWN;
int mii_ctl;
- bool status;
int ret;
- switch (phydev->speed) {
- case SPEED_2500:
- if (phydev->master_slave_state == MASTER_SLAVE_STATE_MASTER)
- lsd = LINK_2500_MASTER;
- else
- lsd = LINK_2500_SLAVE;
- break;
- case SPEED_1000:
- if (phydev->master_slave_state == MASTER_SLAVE_STATE_MASTER)
- lsd = LINK_1000_MASTER;
- else
- lsd = LINK_1000_SLAVE;
- break;
- case SPEED_100:
- if (phydev->duplex)
- lsd = LINK_100FD;
- else
- lsd = LINK_100HD;
- break;
- case SPEED_10:
- if (phydev->duplex)
- lsd = LINK_10FD;
- else
- lsd = LINK_10HD;
- break;
- default:
- netif_err(adapter, drv, adapter->netdev,
- "Invalid speed %d\n", phydev->speed);
- return -EINVAL;
- }
-
- adapter->sgmii_lsd = lsd;
- ret = lan743x_sgmii_aneg_update(adapter);
- if (ret < 0) {
- netif_err(adapter, drv, adapter->netdev,
- "error %d SGMII cfg failed\n", ret);
- return ret;
- }
-
- ret = lan743x_is_sgmii_2_5G_mode(adapter, &status);
- if (ret < 0) {
- netif_err(adapter, drv, adapter->netdev,
- "error %d SGMII get mode failed\n", ret);
- return ret;
- }
-
- if (status)
- netif_dbg(adapter, drv, adapter->netdev,
- "SGMII 2.5G mode enable\n");
- else
- netif_dbg(adapter, drv, adapter->netdev,
- "SGMII 1G mode enable\n");
-
/* SGMII/1000/2500BASE-X PCS power down */
mii_ctl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, MII_BMCR);
if (mii_ctl < 0)
@@ -1229,11 +1190,7 @@ static int lan743x_sgmii_config(struct lan743x_adapter *adapter)
if (ret < 0)
return ret;
- ret = lan743x_pcs_seq_state(adapter, PCS_POWER_STATE_UP);
- if (ret < 0)
- return ret;
-
- return 0;
+ return lan743x_pcs_seq_state(adapter, PCS_POWER_STATE_UP);
}
static void lan743x_mac_set_address(struct lan743x_adapter *adapter,
@@ -1389,103 +1346,11 @@ static int lan743x_phy_reset(struct lan743x_adapter *adapter)
50000, 1000000);
}
-static void lan743x_phy_update_flowcontrol(struct lan743x_adapter *adapter,
- u16 local_adv, u16 remote_adv)
-{
- struct lan743x_phy *phy = &adapter->phy;
- u8 cap;
-
- if (phy->fc_autoneg)
- cap = mii_resolve_flowctrl_fdx(local_adv, remote_adv);
- else
- cap = phy->fc_request_control;
-
- lan743x_mac_flow_ctrl_set_enables(adapter,
- cap & FLOW_CTRL_TX,
- cap & FLOW_CTRL_RX);
-}
-
static int lan743x_phy_init(struct lan743x_adapter *adapter)
{
return lan743x_phy_reset(adapter);
}
-static void lan743x_phy_link_status_change(struct net_device *netdev)
-{
- struct lan743x_adapter *adapter = netdev_priv(netdev);
- struct phy_device *phydev = netdev->phydev;
- u32 data;
-
- phy_print_status(phydev);
- if (phydev->state == PHY_RUNNING) {
- int remote_advertisement = 0;
- int local_advertisement = 0;
-
- data = lan743x_csr_read(adapter, MAC_CR);
-
- /* set duplex mode */
- if (phydev->duplex)
- data |= MAC_CR_DPX_;
- else
- data &= ~MAC_CR_DPX_;
-
- /* set bus speed */
- switch (phydev->speed) {
- case SPEED_10:
- data &= ~MAC_CR_CFG_H_;
- data &= ~MAC_CR_CFG_L_;
- break;
- case SPEED_100:
- data &= ~MAC_CR_CFG_H_;
- data |= MAC_CR_CFG_L_;
- break;
- case SPEED_1000:
- data |= MAC_CR_CFG_H_;
- data &= ~MAC_CR_CFG_L_;
- break;
- case SPEED_2500:
- data |= MAC_CR_CFG_H_;
- data |= MAC_CR_CFG_L_;
- break;
- }
- lan743x_csr_write(adapter, MAC_CR, data);
-
- local_advertisement =
- linkmode_adv_to_mii_adv_t(phydev->advertising);
- remote_advertisement =
- linkmode_adv_to_mii_adv_t(phydev->lp_advertising);
-
- lan743x_phy_update_flowcontrol(adapter, local_advertisement,
- remote_advertisement);
- lan743x_ptp_update_latency(adapter, phydev->speed);
- if (phydev->interface == PHY_INTERFACE_MODE_SGMII ||
- phydev->interface == PHY_INTERFACE_MODE_1000BASEX ||
- phydev->interface == PHY_INTERFACE_MODE_2500BASEX)
- lan743x_sgmii_config(adapter);
-
- data = lan743x_csr_read(adapter, MAC_CR);
- if (phydev->enable_tx_lpi)
- data |= MAC_CR_EEE_EN_;
- else
- data &= ~MAC_CR_EEE_EN_;
- lan743x_csr_write(adapter, MAC_CR, data);
- }
-}
-
-static void lan743x_phy_close(struct lan743x_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- struct phy_device *phydev = netdev->phydev;
-
- phy_stop(netdev->phydev);
- phy_disconnect(netdev->phydev);
-
- /* using phydev here as phy_disconnect NULLs netdev->phydev */
- if (phy_is_pseudo_fixed_link(phydev))
- fixed_phy_unregister(phydev);
-
-}
-
static void lan743x_phy_interface_select(struct lan743x_adapter *adapter)
{
u32 id_rev;
@@ -1502,65 +1367,9 @@ static void lan743x_phy_interface_select(struct lan743x_adapter *adapter)
adapter->phy_interface = PHY_INTERFACE_MODE_MII;
else
adapter->phy_interface = PHY_INTERFACE_MODE_RGMII;
-}
-
-static int lan743x_phy_open(struct lan743x_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- struct lan743x_phy *phy = &adapter->phy;
- struct fixed_phy_status fphy_status = {
- .link = 1,
- .speed = SPEED_1000,
- .duplex = DUPLEX_FULL,
- };
- struct phy_device *phydev;
- int ret = -EIO;
-
- /* try devicetree phy, or fixed link */
- phydev = of_phy_get_and_connect(netdev, adapter->pdev->dev.of_node,
- lan743x_phy_link_status_change);
-
- if (!phydev) {
- /* try internal phy */
- phydev = phy_find_first(adapter->mdiobus);
- if (!phydev) {
- if ((adapter->csr.id_rev & ID_REV_ID_MASK_) ==
- ID_REV_ID_LAN7431_) {
- phydev = fixed_phy_register(PHY_POLL,
- &fphy_status, NULL);
- if (IS_ERR(phydev)) {
- netdev_err(netdev, "No PHY/fixed_PHY found\n");
- return PTR_ERR(phydev);
- }
- } else {
- goto return_error;
- }
- }
-
- lan743x_phy_interface_select(adapter);
-
- ret = phy_connect_direct(netdev, phydev,
- lan743x_phy_link_status_change,
- adapter->phy_interface);
- if (ret)
- goto return_error;
- }
-
- /* MAC doesn't support 1000T Half */
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
-
- /* support both flow controls */
- phy_support_asym_pause(phydev);
- phy->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
- phy->fc_autoneg = phydev->autoneg;
-
- phy_start(phydev);
- phy_start_aneg(phydev);
- phy_attached_info(phydev);
- return 0;
-return_error:
- return ret;
+ netif_dbg(adapter, drv, adapter->netdev,
+ "selected phy interface: 0x%X\n", adapter->phy_interface);
}
static void lan743x_rfe_open(struct lan743x_adapter *adapter)
@@ -3061,6 +2870,336 @@ return_error:
return ret;
}
+static int lan743x_phylink_sgmii_config(struct lan743x_adapter *adapter)
+{
+ u32 sgmii_ctl;
+ int ret;
+
+ ret = lan743x_get_lsd(SPEED_1000, DUPLEX_FULL,
+ MASTER_SLAVE_STATE_MASTER);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "error %d link-speed-duplex(LSD) invalid\n", ret);
+ return ret;
+ }
+
+ adapter->sgmii_lsd = ret;
+ netif_dbg(adapter, drv, adapter->netdev,
+ "Link Speed Duplex (lsd) : 0x%X\n", adapter->sgmii_lsd);
+
+ /* LINK_STATUS_SOURCE from the External PHY via SGMII */
+ sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
+ sgmii_ctl &= ~SGMII_CTL_LINK_STATUS_SOURCE_;
+ lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
+
+ ret = lan743x_serdes_clock_and_aneg_update(adapter);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "error %d sgmii aneg update failed\n", ret);
+ return ret;
+ }
+
+ return lan743x_pcs_power_reset(adapter);
+}
+
+static int lan743x_phylink_1000basex_config(struct lan743x_adapter *adapter)
+{
+ u32 sgmii_ctl;
+ int ret;
+
+ ret = lan743x_get_lsd(SPEED_1000, DUPLEX_FULL,
+ MASTER_SLAVE_STATE_MASTER);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "error %d link-speed-duplex(LSD) invalid\n", ret);
+ return ret;
+ }
+
+ adapter->sgmii_lsd = ret;
+ netif_dbg(adapter, drv, adapter->netdev,
+ "Link Speed Duplex (lsd) : 0x%X\n", adapter->sgmii_lsd);
+
+ /* LINK_STATUS_SOURCE from 1000BASE-X PCS link status */
+ sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
+ sgmii_ctl |= SGMII_CTL_LINK_STATUS_SOURCE_;
+ lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
+
+ ret = lan743x_serdes_clock_and_aneg_update(adapter);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "error %d 1000basex aneg update failed\n", ret);
+ return ret;
+ }
+
+ return lan743x_pcs_power_reset(adapter);
+}
+
+static int lan743x_phylink_2500basex_config(struct lan743x_adapter *adapter)
+{
+ u32 sgmii_ctl;
+ int ret;
+
+ ret = lan743x_get_lsd(SPEED_2500, DUPLEX_FULL,
+ MASTER_SLAVE_STATE_MASTER);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "error %d link-speed-duplex(LSD) invalid\n", ret);
+ return ret;
+ }
+
+ adapter->sgmii_lsd = ret;
+ netif_dbg(adapter, drv, adapter->netdev,
+ "Link Speed Duplex (lsd) : 0x%X\n", adapter->sgmii_lsd);
+
+ /* LINK_STATUS_SOURCE from 2500BASE-X PCS link status */
+ sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
+ sgmii_ctl |= SGMII_CTL_LINK_STATUS_SOURCE_;
+ lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
+
+ ret = lan743x_serdes_clock_and_aneg_update(adapter);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "error %d 2500basex aneg update failed\n", ret);
+ return ret;
+ }
+
+ return lan743x_pcs_power_reset(adapter);
+}
+
+void lan743x_mac_eee_enable(struct lan743x_adapter *adapter, bool enable)
+{
+ u32 mac_cr;
+
+ mac_cr = lan743x_csr_read(adapter, MAC_CR);
+ if (enable)
+ mac_cr |= MAC_CR_EEE_EN_;
+ else
+ mac_cr &= ~MAC_CR_EEE_EN_;
+ lan743x_csr_write(adapter, MAC_CR, mac_cr);
+}
+
+static void lan743x_phylink_mac_config(struct phylink_config *config,
+ unsigned int link_an_mode,
+ const struct phylink_link_state *state)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ int ret;
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_2500BASEX:
+ ret = lan743x_phylink_2500basex_config(adapter);
+ if (ret < 0)
+ netif_err(adapter, drv, adapter->netdev,
+ "2500BASEX config failed. Error %d\n", ret);
+ else
+ netif_dbg(adapter, drv, adapter->netdev,
+ "2500BASEX mode selected and configured\n");
+ break;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ ret = lan743x_phylink_1000basex_config(adapter);
+ if (ret < 0)
+ netif_err(adapter, drv, adapter->netdev,
+ "1000BASEX config failed. Error %d\n", ret);
+ else
+ netif_dbg(adapter, drv, adapter->netdev,
+ "1000BASEX mode selected and configured\n");
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ ret = lan743x_phylink_sgmii_config(adapter);
+ if (ret < 0)
+ netif_err(adapter, drv, adapter->netdev,
+ "SGMII config failed. Error %d\n", ret);
+ else
+ netif_dbg(adapter, drv, adapter->netdev,
+ "SGMII mode selected and configured\n");
+ break;
+ default:
+ netif_dbg(adapter, drv, adapter->netdev,
+ "RGMII/GMII/MII(0x%X) mode enable\n",
+ state->interface);
+ break;
+ }
+}
+
+static void lan743x_phylink_mac_link_down(struct phylink_config *config,
+ unsigned int link_an_mode,
+ phy_interface_t interface)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ netif_tx_stop_all_queues(to_net_dev(config->dev));
+ lan743x_mac_eee_enable(adapter, false);
+}
+
+static void lan743x_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
+ unsigned int link_an_mode,
+ phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ int mac_cr;
+ u8 cap;
+
+ mac_cr = lan743x_csr_read(adapter, MAC_CR);
+ /* Pre-initialize register bits.
+ * Resulting value corresponds to SPEED_10
+ */
+ mac_cr &= ~(MAC_CR_CFG_H_ | MAC_CR_CFG_L_);
+ if (speed == SPEED_2500)
+ mac_cr |= MAC_CR_CFG_H_ | MAC_CR_CFG_L_;
+ else if (speed == SPEED_1000)
+ mac_cr |= MAC_CR_CFG_H_;
+ else if (speed == SPEED_100)
+ mac_cr |= MAC_CR_CFG_L_;
+
+ lan743x_csr_write(adapter, MAC_CR, mac_cr);
+
+ lan743x_ptp_update_latency(adapter, speed);
+
+ /* Flow Control operation */
+ cap = 0;
+ if (tx_pause)
+ cap |= FLOW_CTRL_TX;
+ if (rx_pause)
+ cap |= FLOW_CTRL_RX;
+
+ lan743x_mac_flow_ctrl_set_enables(adapter,
+ cap & FLOW_CTRL_TX,
+ cap & FLOW_CTRL_RX);
+
+ if (phydev)
+ lan743x_mac_eee_enable(adapter, phydev->enable_tx_lpi);
+
+ netif_tx_wake_all_queues(netdev);
+}
+
+static const struct phylink_mac_ops lan743x_phylink_mac_ops = {
+ .mac_config = lan743x_phylink_mac_config,
+ .mac_link_down = lan743x_phylink_mac_link_down,
+ .mac_link_up = lan743x_phylink_mac_link_up,
+};
+
+static int lan743x_phylink_create(struct lan743x_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct phylink *pl;
+
+ adapter->phylink_config.dev = &netdev->dev;
+ adapter->phylink_config.type = PHYLINK_NETDEV;
+ adapter->phylink_config.mac_managed_pm = false;
+
+ adapter->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
+ MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD;
+
+ lan743x_phy_interface_select(adapter);
+
+ switch (adapter->phy_interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ adapter->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ adapter->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ adapter->phylink_config.supported_interfaces);
+ adapter->phylink_config.mac_capabilities |= MAC_2500FD;
+ break;
+ case PHY_INTERFACE_MODE_GMII:
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ adapter->phylink_config.supported_interfaces);
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ adapter->phylink_config.supported_interfaces);
+ break;
+ default:
+ phy_interface_set_rgmii(adapter->phylink_config.supported_interfaces);
+ }
+
+ pl = phylink_create(&adapter->phylink_config, NULL,
+ adapter->phy_interface, &lan743x_phylink_mac_ops);
+
+ if (IS_ERR(pl)) {
+ netdev_err(netdev, "Could not create phylink (%pe)\n", pl);
+ return PTR_ERR(pl);
+ }
+
+ adapter->phylink = pl;
+ netdev_dbg(netdev, "lan743x phylink created");
+
+ return 0;
+}
+
+static bool lan743x_phy_handle_exists(struct device_node *dn)
+{
+ dn = of_parse_phandle(dn, "phy-handle", 0);
+ of_node_put(dn);
+ return dn != NULL;
+}
+
+static int lan743x_phylink_connect(struct lan743x_adapter *adapter)
+{
+ struct device_node *dn = adapter->pdev->dev.of_node;
+ struct net_device *dev = adapter->netdev;
+ struct phy_device *phydev;
+ int ret;
+
+ if (dn)
+ ret = phylink_of_phy_connect(adapter->phylink, dn, 0);
+
+ if (!dn || (ret && !lan743x_phy_handle_exists(dn))) {
+ phydev = phy_find_first(adapter->mdiobus);
+ if (phydev) {
+ /* attach the mac to the phy */
+ ret = phylink_connect_phy(adapter->phylink, phydev);
+ } else if (((adapter->csr.id_rev & ID_REV_ID_MASK_) ==
+ ID_REV_ID_LAN7431_) || adapter->is_pci11x1x) {
+ struct phylink_link_state state;
+ unsigned long caps;
+
+ caps = adapter->phylink_config.mac_capabilities;
+ if (caps & MAC_2500FD) {
+ state.speed = SPEED_2500;
+ state.duplex = DUPLEX_FULL;
+ } else if (caps & MAC_1000FD) {
+ state.speed = SPEED_1000;
+ state.duplex = DUPLEX_FULL;
+ } else {
+ state.speed = SPEED_UNKNOWN;
+ state.duplex = DUPLEX_UNKNOWN;
+ }
+
+ ret = phylink_set_fixed_link(adapter->phylink, &state);
+ if (ret) {
+ netdev_err(dev, "Could not set fixed link\n");
+ return ret;
+ }
+ } else {
+ netdev_err(dev, "no PHY found\n");
+ return -ENXIO;
+ }
+ }
+
+ if (ret) {
+ netdev_err(dev, "Could not attach PHY (%d)\n", ret);
+ return ret;
+ }
+
+ phylink_start(adapter->phylink);
+
+ return 0;
+}
+
+static void lan743x_phylink_disconnect(struct lan743x_adapter *adapter)
+{
+ phylink_stop(adapter->phylink);
+ phylink_disconnect_phy(adapter->phylink);
+}
+
static int lan743x_netdev_close(struct net_device *netdev)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
@@ -3074,7 +3213,7 @@ static int lan743x_netdev_close(struct net_device *netdev)
lan743x_ptp_close(adapter);
- lan743x_phy_close(adapter);
+ lan743x_phylink_disconnect(adapter);
lan743x_mac_close(adapter);
@@ -3097,13 +3236,13 @@ static int lan743x_netdev_open(struct net_device *netdev)
if (ret)
goto close_intr;
- ret = lan743x_phy_open(adapter);
+ ret = lan743x_phylink_connect(adapter);
if (ret)
goto close_mac;
ret = lan743x_ptp_open(adapter);
if (ret)
- goto close_phy;
+ goto close_mac;
lan743x_rfe_open(adapter);
@@ -3119,6 +3258,9 @@ static int lan743x_netdev_open(struct net_device *netdev)
goto close_tx;
}
+ if (netdev->phydev)
+ phy_support_eee(netdev->phydev);
+
#ifdef CONFIG_PM
if (adapter->netdev->phydev) {
struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
@@ -3143,9 +3285,8 @@ close_rx:
lan743x_rx_close(&adapter->rx[index]);
}
lan743x_ptp_close(adapter);
-
-close_phy:
- lan743x_phy_close(adapter);
+ if (adapter->phylink)
+ lan743x_phylink_disconnect(adapter);
close_mac:
lan743x_mac_close(adapter);
@@ -3174,11 +3315,14 @@ static netdev_tx_t lan743x_netdev_xmit_frame(struct sk_buff *skb,
static int lan743x_netdev_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd)
{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
if (!netif_running(netdev))
return -EINVAL;
if (cmd == SIOCSHWTSTAMP)
return lan743x_ptp_ioctl(netdev, ifr, cmd);
- return phy_mii_ioctl(netdev->phydev, ifr, cmd);
+
+ return phylink_mii_ioctl(adapter->phylink, ifr, cmd);
}
static void lan743x_netdev_set_multicast(struct net_device *netdev)
@@ -3283,10 +3427,17 @@ static void lan743x_mdiobus_cleanup(struct lan743x_adapter *adapter)
mdiobus_unregister(adapter->mdiobus);
}
+static void lan743x_destroy_phylink(struct lan743x_adapter *adapter)
+{
+ phylink_destroy(adapter->phylink);
+ adapter->phylink = NULL;
+}
+
static void lan743x_full_cleanup(struct lan743x_adapter *adapter)
{
unregister_netdev(adapter->netdev);
+ lan743x_destroy_phylink(adapter);
lan743x_mdiobus_cleanup(adapter);
lan743x_hardware_cleanup(adapter);
lan743x_pci_cleanup(adapter);
@@ -3500,14 +3651,21 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev,
NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
adapter->netdev->hw_features = adapter->netdev->features;
- /* carrier off reporting is important to ethtool even BEFORE open */
- netif_carrier_off(netdev);
+ ret = lan743x_phylink_create(adapter);
+ if (ret < 0) {
+ netif_err(adapter, probe, netdev,
+ "failed to setup phylink (%d)\n", ret);
+ goto cleanup_mdiobus;
+ }
ret = register_netdev(adapter->netdev);
if (ret < 0)
- goto cleanup_mdiobus;
+ goto cleanup_phylink;
return 0;
+cleanup_phylink:
+ lan743x_destroy_phylink(adapter);
+
cleanup_mdiobus:
lan743x_mdiobus_cleanup(adapter);
@@ -3763,6 +3921,7 @@ static int lan743x_pm_resume(struct device *dev)
MAC_WK_SRC_WK_FR_SAVED_;
lan743x_csr_write(adapter, MAC_WK_SRC, data);
+ rtnl_lock();
/* open netdev when netdev is at running state while resume.
* For instance, it is true when system wakesup after pm-suspend
* However, it is false when system wakes up after suspend GUI menu
@@ -3771,6 +3930,7 @@ static int lan743x_pm_resume(struct device *dev)
lan743x_netdev_open(netdev);
netif_device_attach(netdev);
+ rtnl_unlock();
return 0;
}
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 3b2585a384e2..8ef897c114d3 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -5,6 +5,7 @@
#define _LAN743X_H
#include <linux/phy.h>
+#include <linux/phylink.h>
#include "lan743x_ptp.h"
#define DRIVER_AUTHOR "Bryan Whitehead <Bryan.Whitehead@microchip.com>"
@@ -1083,6 +1084,8 @@ struct lan743x_adapter {
u32 flags;
u32 hw_cfg;
phy_interface_t phy_interface;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
};
#define LAN743X_COMPONENT_FLAG_RX(channel) BIT(20 + (channel))
@@ -1203,5 +1206,6 @@ void lan743x_hs_syslock_release(struct lan743x_adapter *adapter);
void lan743x_mac_flow_ctrl_set_enables(struct lan743x_adapter *adapter,
bool tx_enable, bool rx_enable);
int lan743x_sgmii_read(struct lan743x_adapter *adapter, u8 mmd, u16 addr);
+void lan743x_mac_eee_enable(struct lan743x_adapter *adapter, bool enable);
#endif /* _LAN743X_H */
diff --git a/drivers/net/ethernet/microchip/lan966x/Kconfig b/drivers/net/ethernet/microchip/lan966x/Kconfig
index f9ebffc04eb8..f663b6e12466 100644
--- a/drivers/net/ethernet/microchip/lan966x/Kconfig
+++ b/drivers/net/ethernet/microchip/lan966x/Kconfig
@@ -8,6 +8,7 @@ config LAN966X_SWITCH
select PHYLINK
select PAGE_POOL
select VCAP
+ select FDMA
help
This driver supports the Lan966x network switch device.
diff --git a/drivers/net/ethernet/microchip/lan966x/Makefile b/drivers/net/ethernet/microchip/lan966x/Makefile
index 3b6ac331691d..4cdbe263502c 100644
--- a/drivers/net/ethernet/microchip/lan966x/Makefile
+++ b/drivers/net/ethernet/microchip/lan966x/Makefile
@@ -20,3 +20,4 @@ lan966x-switch-$(CONFIG_DEBUG_FS) += lan966x_vcap_debugfs.o
# Provide include files
ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/vcap
+ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/fdma
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
index 3960534ac2ad..502670718104 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
@@ -6,31 +6,55 @@
#include "lan966x_main.h"
-static int lan966x_fdma_channel_active(struct lan966x *lan966x)
-{
- return lan_rd(lan966x, FDMA_CH_ACTIVE);
-}
-
-static struct page *lan966x_fdma_rx_alloc_page(struct lan966x_rx *rx,
- struct lan966x_db *db)
+static int lan966x_fdma_rx_dataptr_cb(struct fdma *fdma, int dcb, int db,
+ u64 *dataptr)
{
+ struct lan966x *lan966x = (struct lan966x *)fdma->priv;
+ struct lan966x_rx *rx = &lan966x->rx;
struct page *page;
page = page_pool_dev_alloc_pages(rx->page_pool);
if (unlikely(!page))
- return NULL;
+ return -ENOMEM;
+
+ rx->page[dcb][db] = page;
+ *dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM;
+
+ return 0;
+}
- db->dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM;
+static int lan966x_fdma_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
+ u64 *dataptr)
+{
+ struct lan966x *lan966x = (struct lan966x *)fdma->priv;
+
+ *dataptr = lan966x->tx.dcbs_buf[dcb].dma_addr;
- return page;
+ return 0;
+}
+
+static int lan966x_fdma_xdp_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
+ u64 *dataptr)
+{
+ struct lan966x *lan966x = (struct lan966x *)fdma->priv;
+
+ *dataptr = lan966x->tx.dcbs_buf[dcb].dma_addr + XDP_PACKET_HEADROOM;
+
+ return 0;
+}
+
+static int lan966x_fdma_channel_active(struct lan966x *lan966x)
+{
+ return lan_rd(lan966x, FDMA_CH_ACTIVE);
}
static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
{
+ struct fdma *fdma = &rx->fdma;
int i, j;
- for (i = 0; i < FDMA_DCB_MAX; ++i) {
- for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j)
+ for (i = 0; i < fdma->n_dcbs; ++i) {
+ for (j = 0; j < fdma->n_dbs; ++j)
page_pool_put_full_page(rx->page_pool,
rx->page[i][j], false);
}
@@ -38,41 +62,23 @@ static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
static void lan966x_fdma_rx_free_page(struct lan966x_rx *rx)
{
+ struct fdma *fdma = &rx->fdma;
struct page *page;
- page = rx->page[rx->dcb_index][rx->db_index];
+ page = rx->page[fdma->dcb_index][fdma->db_index];
if (unlikely(!page))
return;
page_pool_recycle_direct(rx->page_pool, page);
}
-static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx,
- struct lan966x_rx_dcb *dcb,
- u64 nextptr)
-{
- struct lan966x_db *db;
- int i;
-
- for (i = 0; i < FDMA_RX_DCB_MAX_DBS; ++i) {
- db = &dcb->db[i];
- db->status = FDMA_DCB_STATUS_INTR;
- }
-
- dcb->nextptr = FDMA_DCB_INVALID_DATA;
- dcb->info = FDMA_DCB_INFO_DATAL(PAGE_SIZE << rx->page_order);
-
- rx->last_entry->nextptr = nextptr;
- rx->last_entry = dcb;
-}
-
static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx)
{
struct lan966x *lan966x = rx->lan966x;
struct page_pool_params pp_params = {
.order = rx->page_order,
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
- .pool_size = FDMA_DCB_MAX,
+ .pool_size = rx->fdma.n_dcbs,
.nid = NUMA_NO_NODE,
.dev = lan966x->dev,
.dma_dir = DMA_FROM_DEVICE,
@@ -104,84 +110,41 @@ static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx)
static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
{
struct lan966x *lan966x = rx->lan966x;
- struct lan966x_rx_dcb *dcb;
- struct lan966x_db *db;
- struct page *page;
- int i, j;
- int size;
+ struct fdma *fdma = &rx->fdma;
+ int err;
if (lan966x_fdma_rx_alloc_page_pool(rx))
return PTR_ERR(rx->page_pool);
- /* calculate how many pages are needed to allocate the dcbs */
- size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
- size = ALIGN(size, PAGE_SIZE);
-
- rx->dcbs = dma_alloc_coherent(lan966x->dev, size, &rx->dma, GFP_KERNEL);
- if (!rx->dcbs)
- return -ENOMEM;
-
- rx->last_entry = rx->dcbs;
- rx->db_index = 0;
- rx->dcb_index = 0;
-
- /* Now for each dcb allocate the dbs */
- for (i = 0; i < FDMA_DCB_MAX; ++i) {
- dcb = &rx->dcbs[i];
- dcb->info = 0;
-
- /* For each db allocate a page and map it to the DB dataptr. */
- for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) {
- db = &dcb->db[j];
- page = lan966x_fdma_rx_alloc_page(rx, db);
- if (!page)
- return -ENOMEM;
-
- db->status = 0;
- rx->page[i][j] = page;
- }
+ err = fdma_alloc_coherent(lan966x->dev, fdma);
+ if (err)
+ return err;
- lan966x_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * i);
- }
+ fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size),
+ FDMA_DCB_STATUS_INTR);
return 0;
}
-static void lan966x_fdma_rx_advance_dcb(struct lan966x_rx *rx)
-{
- rx->dcb_index++;
- rx->dcb_index &= FDMA_DCB_MAX - 1;
-}
-
-static void lan966x_fdma_rx_free(struct lan966x_rx *rx)
-{
- struct lan966x *lan966x = rx->lan966x;
- u32 size;
-
- /* Now it is possible to do the cleanup of dcb */
- size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
- size = ALIGN(size, PAGE_SIZE);
- dma_free_coherent(lan966x->dev, size, rx->dcbs, rx->dma);
-}
-
static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
{
struct lan966x *lan966x = rx->lan966x;
+ struct fdma *fdma = &rx->fdma;
u32 mask;
/* When activating a channel, first is required to write the first DCB
* address and then to activate it
*/
- lan_wr(lower_32_bits((u64)rx->dma), lan966x,
- FDMA_DCB_LLP(rx->channel_id));
- lan_wr(upper_32_bits((u64)rx->dma), lan966x,
- FDMA_DCB_LLP1(rx->channel_id));
+ lan_wr(lower_32_bits((u64)fdma->dma), lan966x,
+ FDMA_DCB_LLP(fdma->channel_id));
+ lan_wr(upper_32_bits((u64)fdma->dma), lan966x,
+ FDMA_DCB_LLP1(fdma->channel_id));
- lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) |
+ lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
FDMA_CH_CFG_CH_MEM_SET(1),
- lan966x, FDMA_CH_CFG(rx->channel_id));
+ lan966x, FDMA_CH_CFG(fdma->channel_id));
/* Start fdma */
lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0),
@@ -191,13 +154,13 @@ static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
/* Enable interrupts */
mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
- mask |= BIT(rx->channel_id);
+ mask |= BIT(fdma->channel_id);
lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
FDMA_INTR_DB_ENA_INTR_DB_ENA,
lan966x, FDMA_INTR_DB_ENA);
/* Activate the channel */
- lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(rx->channel_id)),
+ lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)),
FDMA_CH_ACTIVATE_CH_ACTIVATE,
lan966x, FDMA_CH_ACTIVATE);
}
@@ -205,18 +168,19 @@ static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
static void lan966x_fdma_rx_disable(struct lan966x_rx *rx)
{
struct lan966x *lan966x = rx->lan966x;
+ struct fdma *fdma = &rx->fdma;
u32 val;
/* Disable the channel */
- lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(rx->channel_id)),
+ lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)),
FDMA_CH_DISABLE_CH_DISABLE,
lan966x, FDMA_CH_DISABLE);
readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
- val, !(val & BIT(rx->channel_id)),
+ val, !(val & BIT(fdma->channel_id)),
READL_SLEEP_US, READL_TIMEOUT_US);
- lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(rx->channel_id)),
+ lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)),
FDMA_CH_DB_DISCARD_DB_DISCARD,
lan966x, FDMA_CH_DB_DISCARD);
}
@@ -225,50 +189,27 @@ static void lan966x_fdma_rx_reload(struct lan966x_rx *rx)
{
struct lan966x *lan966x = rx->lan966x;
- lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->channel_id)),
+ lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->fdma.channel_id)),
FDMA_CH_RELOAD_CH_RELOAD,
lan966x, FDMA_CH_RELOAD);
}
-static void lan966x_fdma_tx_add_dcb(struct lan966x_tx *tx,
- struct lan966x_tx_dcb *dcb)
-{
- dcb->nextptr = FDMA_DCB_INVALID_DATA;
- dcb->info = 0;
-}
-
static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx)
{
struct lan966x *lan966x = tx->lan966x;
- struct lan966x_tx_dcb *dcb;
- struct lan966x_db *db;
- int size;
- int i, j;
+ struct fdma *fdma = &tx->fdma;
+ int err;
- tx->dcbs_buf = kcalloc(FDMA_DCB_MAX, sizeof(struct lan966x_tx_dcb_buf),
+ tx->dcbs_buf = kcalloc(fdma->n_dcbs, sizeof(struct lan966x_tx_dcb_buf),
GFP_KERNEL);
if (!tx->dcbs_buf)
return -ENOMEM;
- /* calculate how many pages are needed to allocate the dcbs */
- size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
- size = ALIGN(size, PAGE_SIZE);
- tx->dcbs = dma_alloc_coherent(lan966x->dev, size, &tx->dma, GFP_KERNEL);
- if (!tx->dcbs)
+ err = fdma_alloc_coherent(lan966x->dev, fdma);
+ if (err)
goto out;
- /* Now for each dcb allocate the db */
- for (i = 0; i < FDMA_DCB_MAX; ++i) {
- dcb = &tx->dcbs[i];
-
- for (j = 0; j < FDMA_TX_DCB_MAX_DBS; ++j) {
- db = &dcb->db[j];
- db->dataptr = 0;
- db->status = 0;
- }
-
- lan966x_fdma_tx_add_dcb(tx, dcb);
- }
+ fdma_dcbs_init(fdma, 0, 0);
return 0;
@@ -280,33 +221,30 @@ out:
static void lan966x_fdma_tx_free(struct lan966x_tx *tx)
{
struct lan966x *lan966x = tx->lan966x;
- int size;
kfree(tx->dcbs_buf);
-
- size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
- size = ALIGN(size, PAGE_SIZE);
- dma_free_coherent(lan966x->dev, size, tx->dcbs, tx->dma);
+ fdma_free_coherent(lan966x->dev, &tx->fdma);
}
static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
{
struct lan966x *lan966x = tx->lan966x;
+ struct fdma *fdma = &tx->fdma;
u32 mask;
/* When activating a channel, first is required to write the first DCB
* address and then to activate it
*/
- lan_wr(lower_32_bits((u64)tx->dma), lan966x,
- FDMA_DCB_LLP(tx->channel_id));
- lan_wr(upper_32_bits((u64)tx->dma), lan966x,
- FDMA_DCB_LLP1(tx->channel_id));
+ lan_wr(lower_32_bits((u64)fdma->dma), lan966x,
+ FDMA_DCB_LLP(fdma->channel_id));
+ lan_wr(upper_32_bits((u64)fdma->dma), lan966x,
+ FDMA_DCB_LLP1(fdma->channel_id));
- lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) |
+ lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
FDMA_CH_CFG_CH_MEM_SET(1),
- lan966x, FDMA_CH_CFG(tx->channel_id));
+ lan966x, FDMA_CH_CFG(fdma->channel_id));
/* Start fdma */
lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0),
@@ -316,13 +254,13 @@ static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
/* Enable interrupts */
mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
- mask |= BIT(tx->channel_id);
+ mask |= BIT(fdma->channel_id);
lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
FDMA_INTR_DB_ENA_INTR_DB_ENA,
lan966x, FDMA_INTR_DB_ENA);
/* Activate the channel */
- lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(tx->channel_id)),
+ lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)),
FDMA_CH_ACTIVATE_CH_ACTIVATE,
lan966x, FDMA_CH_ACTIVATE);
}
@@ -330,23 +268,23 @@ static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
static void lan966x_fdma_tx_disable(struct lan966x_tx *tx)
{
struct lan966x *lan966x = tx->lan966x;
+ struct fdma *fdma = &tx->fdma;
u32 val;
/* Disable the channel */
- lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(tx->channel_id)),
+ lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)),
FDMA_CH_DISABLE_CH_DISABLE,
lan966x, FDMA_CH_DISABLE);
readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
- val, !(val & BIT(tx->channel_id)),
+ val, !(val & BIT(fdma->channel_id)),
READL_SLEEP_US, READL_TIMEOUT_US);
- lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(tx->channel_id)),
+ lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)),
FDMA_CH_DB_DISCARD_DB_DISCARD,
lan966x, FDMA_CH_DB_DISCARD);
tx->activated = false;
- tx->last_in_use = -1;
}
static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
@@ -354,7 +292,7 @@ static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
struct lan966x *lan966x = tx->lan966x;
/* Write the registers to reload the channel */
- lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->channel_id)),
+ lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->fdma.channel_id)),
FDMA_CH_RELOAD_CH_RELOAD,
lan966x, FDMA_CH_RELOAD);
}
@@ -393,23 +331,24 @@ static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
struct lan966x_tx *tx = &lan966x->tx;
struct lan966x_rx *rx = &lan966x->rx;
struct lan966x_tx_dcb_buf *dcb_buf;
+ struct fdma *fdma = &tx->fdma;
struct xdp_frame_bulk bq;
- struct lan966x_db *db;
unsigned long flags;
bool clear = false;
+ struct fdma_db *db;
int i;
xdp_frame_bulk_init(&bq);
spin_lock_irqsave(&lan966x->tx_lock, flags);
- for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ for (i = 0; i < fdma->n_dcbs; ++i) {
dcb_buf = &tx->dcbs_buf[i];
if (!dcb_buf->used)
continue;
- db = &tx->dcbs[i].db[0];
- if (!(db->status & FDMA_DCB_STATUS_DONE))
+ db = fdma_db_get(fdma, i, 0);
+ if (!fdma_db_is_done(db))
continue;
dcb_buf->dev->stats.tx_packets++;
@@ -449,27 +388,16 @@ static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
spin_unlock_irqrestore(&lan966x->tx_lock, flags);
}
-static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx)
-{
- struct lan966x_db *db;
-
- /* Check if there is any data */
- db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
- if (unlikely(!(db->status & FDMA_DCB_STATUS_DONE)))
- return false;
-
- return true;
-}
-
static int lan966x_fdma_rx_check_frame(struct lan966x_rx *rx, u64 *src_port)
{
struct lan966x *lan966x = rx->lan966x;
+ struct fdma *fdma = &rx->fdma;
struct lan966x_port *port;
- struct lan966x_db *db;
+ struct fdma_db *db;
struct page *page;
- db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
- page = rx->page[rx->dcb_index][rx->db_index];
+ db = fdma_db_next_get(fdma);
+ page = rx->page[fdma->dcb_index][fdma->db_index];
if (unlikely(!page))
return FDMA_ERROR;
@@ -494,16 +422,17 @@ static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx,
u64 src_port)
{
struct lan966x *lan966x = rx->lan966x;
- struct lan966x_db *db;
+ struct fdma *fdma = &rx->fdma;
struct sk_buff *skb;
+ struct fdma_db *db;
struct page *page;
u64 timestamp;
/* Get the received frame and unmap it */
- db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
- page = rx->page[rx->dcb_index][rx->db_index];
+ db = fdma_db_next_get(fdma);
+ page = rx->page[fdma->dcb_index][fdma->db_index];
- skb = build_skb(page_address(page), PAGE_SIZE << rx->page_order);
+ skb = build_skb(page_address(page), fdma->db_size);
if (unlikely(!skb))
goto free_page;
@@ -546,21 +475,19 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
{
struct lan966x *lan966x = container_of(napi, struct lan966x, napi);
struct lan966x_rx *rx = &lan966x->rx;
- int dcb_reload = rx->dcb_index;
- struct lan966x_rx_dcb *old_dcb;
- struct lan966x_db *db;
+ int old_dcb, dcb_reload, counter = 0;
+ struct fdma *fdma = &rx->fdma;
bool redirect = false;
struct sk_buff *skb;
- struct page *page;
- int counter = 0;
u64 src_port;
- u64 nextptr;
+
+ dcb_reload = fdma->dcb_index;
lan966x_fdma_tx_clear_buf(lan966x, weight);
/* Get all received skb */
while (counter < weight) {
- if (!lan966x_fdma_rx_more_frames(rx))
+ if (!fdma_has_frames(fdma))
break;
counter++;
@@ -570,22 +497,22 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
break;
case FDMA_ERROR:
lan966x_fdma_rx_free_page(rx);
- lan966x_fdma_rx_advance_dcb(rx);
+ fdma_dcb_advance(fdma);
goto allocate_new;
case FDMA_REDIRECT:
redirect = true;
fallthrough;
case FDMA_TX:
- lan966x_fdma_rx_advance_dcb(rx);
+ fdma_dcb_advance(fdma);
continue;
case FDMA_DROP:
lan966x_fdma_rx_free_page(rx);
- lan966x_fdma_rx_advance_dcb(rx);
+ fdma_dcb_advance(fdma);
continue;
}
skb = lan966x_fdma_rx_get_frame(rx, src_port);
- lan966x_fdma_rx_advance_dcb(rx);
+ fdma_dcb_advance(fdma);
if (!skb)
goto allocate_new;
@@ -594,20 +521,14 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
allocate_new:
/* Allocate new pages and map them */
- while (dcb_reload != rx->dcb_index) {
- db = &rx->dcbs[dcb_reload].db[rx->db_index];
- page = lan966x_fdma_rx_alloc_page(rx, db);
- if (unlikely(!page))
- break;
- rx->page[dcb_reload][rx->db_index] = page;
-
- old_dcb = &rx->dcbs[dcb_reload];
+ while (dcb_reload != fdma->dcb_index) {
+ old_dcb = dcb_reload;
dcb_reload++;
- dcb_reload &= FDMA_DCB_MAX - 1;
+ dcb_reload &= fdma->n_dcbs - 1;
+
+ fdma_dcb_add(fdma, old_dcb, FDMA_DCB_INFO_DATAL(fdma->db_size),
+ FDMA_DCB_STATUS_INTR);
- nextptr = rx->dma + ((unsigned long)old_dcb -
- (unsigned long)rx->dcbs);
- lan966x_fdma_rx_add_dcb(rx, old_dcb, nextptr);
lan966x_fdma_rx_reload(rx);
}
@@ -650,56 +571,30 @@ irqreturn_t lan966x_fdma_irq_handler(int irq, void *args)
static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx)
{
struct lan966x_tx_dcb_buf *dcb_buf;
+ struct fdma *fdma = &tx->fdma;
int i;
- for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ for (i = 0; i < fdma->n_dcbs; ++i) {
dcb_buf = &tx->dcbs_buf[i];
- if (!dcb_buf->used && i != tx->last_in_use)
+ if (!dcb_buf->used &&
+ !fdma_is_last(&tx->fdma, &tx->fdma.dcbs[i]))
return i;
}
return -1;
}
-static void lan966x_fdma_tx_setup_dcb(struct lan966x_tx *tx,
- int next_to_use, int len,
- dma_addr_t dma_addr)
-{
- struct lan966x_tx_dcb *next_dcb;
- struct lan966x_db *next_db;
-
- next_dcb = &tx->dcbs[next_to_use];
- next_dcb->nextptr = FDMA_DCB_INVALID_DATA;
-
- next_db = &next_dcb->db[0];
- next_db->dataptr = dma_addr;
- next_db->status = FDMA_DCB_STATUS_SOF |
- FDMA_DCB_STATUS_EOF |
- FDMA_DCB_STATUS_INTR |
- FDMA_DCB_STATUS_BLOCKO(0) |
- FDMA_DCB_STATUS_BLOCKL(len);
-}
-
-static void lan966x_fdma_tx_start(struct lan966x_tx *tx, int next_to_use)
+static void lan966x_fdma_tx_start(struct lan966x_tx *tx)
{
struct lan966x *lan966x = tx->lan966x;
- struct lan966x_tx_dcb *dcb;
if (likely(lan966x->tx.activated)) {
- /* Connect current dcb to the next db */
- dcb = &tx->dcbs[tx->last_in_use];
- dcb->nextptr = tx->dma + (next_to_use *
- sizeof(struct lan966x_tx_dcb));
-
lan966x_fdma_tx_reload(tx);
} else {
/* Because it is first time, then just activate */
lan966x->tx.activated = true;
lan966x_fdma_tx_activate(tx);
}
-
- /* Move to next dcb because this last in use */
- tx->last_in_use = next_to_use;
}
int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len)
@@ -752,11 +647,6 @@ int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len)
next_dcb_buf->data.xdpf = xdpf;
next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES;
-
- /* Setup next dcb */
- lan966x_fdma_tx_setup_dcb(tx, next_to_use,
- xdpf->len + IFH_LEN_BYTES,
- dma_addr);
} else {
page = ptr;
@@ -773,11 +663,6 @@ int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len)
next_dcb_buf->data.page = page;
next_dcb_buf->len = len + IFH_LEN_BYTES;
-
- /* Setup next dcb */
- lan966x_fdma_tx_setup_dcb(tx, next_to_use,
- len + IFH_LEN_BYTES,
- dma_addr + XDP_PACKET_HEADROOM);
}
/* Fill up the buffer */
@@ -788,8 +673,19 @@ int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len)
next_dcb_buf->ptp = false;
next_dcb_buf->dev = port->dev;
+ __fdma_dcb_add(&tx->fdma,
+ next_to_use,
+ 0,
+ FDMA_DCB_STATUS_INTR |
+ FDMA_DCB_STATUS_SOF |
+ FDMA_DCB_STATUS_EOF |
+ FDMA_DCB_STATUS_BLOCKO(0) |
+ FDMA_DCB_STATUS_BLOCKL(next_dcb_buf->len),
+ &fdma_nextptr_cb,
+ &lan966x_fdma_xdp_tx_dataptr_cb);
+
/* Start the transmission */
- lan966x_fdma_tx_start(tx, next_to_use);
+ lan966x_fdma_tx_start(tx);
out:
spin_unlock(&lan966x->tx_lock);
@@ -847,9 +743,6 @@ int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
goto release;
}
- /* Setup next dcb */
- lan966x_fdma_tx_setup_dcb(tx, next_to_use, skb->len, dma_addr);
-
/* Fill up the buffer */
next_dcb_buf = &tx->dcbs_buf[next_to_use];
next_dcb_buf->use_skb = true;
@@ -861,12 +754,21 @@ int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
next_dcb_buf->ptp = false;
next_dcb_buf->dev = dev;
+ fdma_dcb_add(&tx->fdma,
+ next_to_use,
+ 0,
+ FDMA_DCB_STATUS_INTR |
+ FDMA_DCB_STATUS_SOF |
+ FDMA_DCB_STATUS_EOF |
+ FDMA_DCB_STATUS_BLOCKO(0) |
+ FDMA_DCB_STATUS_BLOCKL(skb->len));
+
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
next_dcb_buf->ptp = true;
/* Start the transmission */
- lan966x_fdma_tx_start(tx, next_to_use);
+ lan966x_fdma_tx_start(tx);
return NETDEV_TX_OK;
@@ -908,14 +810,11 @@ static int lan966x_qsys_sw_status(struct lan966x *lan966x)
static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
{
struct page_pool *page_pool;
- dma_addr_t rx_dma;
- void *rx_dcbs;
- u32 size;
+ struct fdma fdma_rx_old;
int err;
/* Store these for later to free them */
- rx_dma = lan966x->rx.dma;
- rx_dcbs = lan966x->rx.dcbs;
+ memcpy(&fdma_rx_old, &lan966x->rx.fdma, sizeof(struct fdma));
page_pool = lan966x->rx.page_pool;
napi_synchronize(&lan966x->napi);
@@ -931,9 +830,7 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
goto restore;
lan966x_fdma_rx_start(&lan966x->rx);
- size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
- size = ALIGN(size, PAGE_SIZE);
- dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma);
+ fdma_free_coherent(lan966x->dev, &fdma_rx_old);
page_pool_destroy(page_pool);
@@ -943,8 +840,7 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
return err;
restore:
lan966x->rx.page_pool = page_pool;
- lan966x->rx.dma = rx_dma;
- lan966x->rx.dcbs = rx_dcbs;
+ memcpy(&lan966x->rx.fdma, &fdma_rx_old, sizeof(struct fdma));
lan966x_fdma_rx_start(&lan966x->rx);
return err;
@@ -1034,11 +930,24 @@ int lan966x_fdma_init(struct lan966x *lan966x)
return 0;
lan966x->rx.lan966x = lan966x;
- lan966x->rx.channel_id = FDMA_XTR_CHANNEL;
+ lan966x->rx.fdma.channel_id = FDMA_XTR_CHANNEL;
+ lan966x->rx.fdma.n_dcbs = FDMA_DCB_MAX;
+ lan966x->rx.fdma.n_dbs = FDMA_RX_DCB_MAX_DBS;
+ lan966x->rx.fdma.priv = lan966x;
+ lan966x->rx.fdma.size = fdma_get_size(&lan966x->rx.fdma);
+ lan966x->rx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order;
+ lan966x->rx.fdma.ops.nextptr_cb = &fdma_nextptr_cb;
+ lan966x->rx.fdma.ops.dataptr_cb = &lan966x_fdma_rx_dataptr_cb;
lan966x->rx.max_mtu = lan966x_fdma_get_max_frame(lan966x);
lan966x->tx.lan966x = lan966x;
- lan966x->tx.channel_id = FDMA_INJ_CHANNEL;
- lan966x->tx.last_in_use = -1;
+ lan966x->tx.fdma.channel_id = FDMA_INJ_CHANNEL;
+ lan966x->tx.fdma.n_dcbs = FDMA_DCB_MAX;
+ lan966x->tx.fdma.n_dbs = FDMA_TX_DCB_MAX_DBS;
+ lan966x->tx.fdma.priv = lan966x;
+ lan966x->tx.fdma.size = fdma_get_size(&lan966x->tx.fdma);
+ lan966x->tx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order;
+ lan966x->tx.fdma.ops.nextptr_cb = &fdma_nextptr_cb;
+ lan966x->tx.fdma.ops.dataptr_cb = &lan966x_fdma_tx_dataptr_cb;
err = lan966x_fdma_rx_alloc(&lan966x->rx);
if (err)
@@ -1046,7 +955,7 @@ int lan966x_fdma_init(struct lan966x *lan966x)
err = lan966x_fdma_tx_alloc(&lan966x->tx);
if (err) {
- lan966x_fdma_rx_free(&lan966x->rx);
+ fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma);
return err;
}
@@ -1067,7 +976,7 @@ void lan966x_fdma_deinit(struct lan966x *lan966x)
napi_disable(&lan966x->napi);
lan966x_fdma_rx_free_pages(&lan966x->rx);
- lan966x_fdma_rx_free(&lan966x->rx);
+ fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma);
page_pool_destroy(lan966x->rx.page_pool);
lan966x_fdma_tx_free(&lan966x->tx);
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
index f8bebbcf77b2..25cb2f61986f 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -16,6 +16,7 @@
#include <net/switchdev.h>
#include <net/xdp.h>
+#include <fdma_api.h>
#include <vcap_api.h>
#include <vcap_api_client.h>
@@ -76,15 +77,6 @@
#define FDMA_RX_DCB_MAX_DBS 1
#define FDMA_TX_DCB_MAX_DBS 1
-#define FDMA_DCB_INFO_DATAL(x) ((x) & GENMASK(15, 0))
-
-#define FDMA_DCB_STATUS_BLOCKL(x) ((x) & GENMASK(15, 0))
-#define FDMA_DCB_STATUS_SOF BIT(16)
-#define FDMA_DCB_STATUS_EOF BIT(17)
-#define FDMA_DCB_STATUS_INTR BIT(18)
-#define FDMA_DCB_STATUS_DONE BIT(19)
-#define FDMA_DCB_STATUS_BLOCKO(x) (((x) << 20) & GENMASK(31, 20))
-#define FDMA_DCB_INVALID_DATA 0x1
#define FDMA_XTR_CHANNEL 6
#define FDMA_INJ_CHANNEL 0
@@ -199,49 +191,14 @@ enum vcap_is1_port_sel_rt {
struct lan966x_port;
-struct lan966x_db {
- u64 dataptr;
- u64 status;
-};
-
-struct lan966x_rx_dcb {
- u64 nextptr;
- u64 info;
- struct lan966x_db db[FDMA_RX_DCB_MAX_DBS];
-};
-
-struct lan966x_tx_dcb {
- u64 nextptr;
- u64 info;
- struct lan966x_db db[FDMA_TX_DCB_MAX_DBS];
-};
-
struct lan966x_rx {
struct lan966x *lan966x;
- /* Pointer to the array of hardware dcbs. */
- struct lan966x_rx_dcb *dcbs;
-
- /* Pointer to the last address in the dcbs. */
- struct lan966x_rx_dcb *last_entry;
+ struct fdma fdma;
/* For each DB, there is a page */
struct page *page[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS];
- /* Represents the db_index, it can have a value between 0 and
- * FDMA_RX_DCB_MAX_DBS, once it reaches the value of FDMA_RX_DCB_MAX_DBS
- * it means that the DCB can be reused.
- */
- int db_index;
-
- /* Represents the index in the dcbs. It has a value between 0 and
- * FDMA_DCB_MAX
- */
- int dcb_index;
-
- /* Represents the dma address to the dcbs array */
- dma_addr_t dma;
-
/* Represents the page order that is used to allocate the pages for the
* RX buffers. This value is calculated based on max MTU of the devices.
*/
@@ -252,8 +209,6 @@ struct lan966x_rx {
*/
u32 max_mtu;
- u8 channel_id;
-
struct page_pool *page_pool;
};
@@ -275,18 +230,11 @@ struct lan966x_tx_dcb_buf {
struct lan966x_tx {
struct lan966x *lan966x;
- /* Pointer to the dcb list */
- struct lan966x_tx_dcb *dcbs;
- u16 last_in_use;
-
- /* Represents the DMA address to the first entry of the dcb entries. */
- dma_addr_t dma;
+ struct fdma fdma;
/* Array of dcbs that are given to the HW */
struct lan966x_tx_dcb_buf *dcbs_buf;
- u8 channel_id;
-
bool activated;
};
diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c
index b3c28260adf8..e172638b0601 100644
--- a/drivers/net/ethernet/mscc/ocelot_ptp.c
+++ b/drivers/net/ethernet/mscc/ocelot_ptp.c
@@ -582,17 +582,13 @@ EXPORT_SYMBOL(ocelot_hwstamp_set);
int ocelot_get_ts_info(struct ocelot *ocelot, int port,
struct kernel_ethtool_ts_info *info)
{
- info->phc_index = ocelot->ptp_clock ?
- ptp_clock_index(ocelot->ptp_clock) : -1;
- if (info->phc_index == -1) {
- info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ if (ocelot->ptp_clock) {
+ info->phc_index = ptp_clock_index(ocelot->ptp_clock);
+ } else {
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE;
return 0;
}
info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
diff --git a/drivers/net/ethernet/pensando/Kconfig b/drivers/net/ethernet/pensando/Kconfig
index 3f7519e435b8..01fe76786f77 100644
--- a/drivers/net/ethernet/pensando/Kconfig
+++ b/drivers/net/ethernet/pensando/Kconfig
@@ -23,6 +23,7 @@ config IONIC
depends on PTP_1588_CLOCK_OPTIONAL
select NET_DEVLINK
select DIMLIB
+ select PAGE_POOL
help
This enables the support for the Pensando family of Ethernet
adapters. More specific information on this driver can be
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index f2f07bf88545..c8c710cfe70c 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -181,10 +181,7 @@ struct ionic_queue;
struct ionic_qcq;
#define IONIC_MAX_BUF_LEN ((u16)-1)
-#define IONIC_PAGE_SIZE PAGE_SIZE
-#define IONIC_PAGE_SPLIT_SZ (PAGE_SIZE / 2)
-#define IONIC_PAGE_GFP_MASK (GFP_ATOMIC | __GFP_NOWARN |\
- __GFP_COMP | __GFP_MEMALLOC)
+#define IONIC_PAGE_SIZE MIN(PAGE_SIZE, IONIC_MAX_BUF_LEN)
#define IONIC_XDP_MAX_LINEAR_MTU (IONIC_PAGE_SIZE - \
(VLAN_ETH_HLEN + \
@@ -238,9 +235,8 @@ struct ionic_queue {
unsigned int index;
unsigned int num_descs;
unsigned int max_sg_elems;
+
u64 features;
- unsigned int type;
- unsigned int hw_index;
unsigned int hw_type;
bool xdp_flush;
union {
@@ -250,18 +246,23 @@ struct ionic_queue {
struct ionic_admin_cmd *adminq;
};
union {
- void __iomem *cmb_base;
- struct ionic_txq_desc __iomem *cmb_txq;
- struct ionic_rxq_desc __iomem *cmb_rxq;
- };
- union {
void *sg_base;
struct ionic_txq_sg_desc *txq_sgl;
struct ionic_txq_sg_desc_v1 *txq_sgl_v1;
struct ionic_rxq_sg_desc *rxq_sgl;
};
struct xdp_rxq_info *xdp_rxq_info;
+ struct bpf_prog *xdp_prog;
+ struct page_pool *page_pool;
struct ionic_queue *partner;
+
+ union {
+ void __iomem *cmb_base;
+ struct ionic_txq_desc __iomem *cmb_txq;
+ struct ionic_rxq_desc __iomem *cmb_rxq;
+ };
+ unsigned int type;
+ unsigned int hw_index;
dma_addr_t base_pa;
dma_addr_t cmb_base_pa;
dma_addr_t sg_base_pa;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 86774d9922d8..40496587b2b3 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -13,6 +13,7 @@
#include <linux/cpumask.h>
#include <linux/crash_dump.h>
#include <linux/vmalloc.h>
+#include <net/page_pool/helpers.h>
#include "ionic.h"
#include "ionic_bus.h"
@@ -46,8 +47,9 @@ static int ionic_start_queues(struct ionic_lif *lif);
static void ionic_stop_queues(struct ionic_lif *lif);
static void ionic_lif_queue_identify(struct ionic_lif *lif);
-static int ionic_xdp_queues_config(struct ionic_lif *lif);
-static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q);
+static void ionic_xdp_rxqs_prog_update(struct ionic_lif *lif);
+static void ionic_unregister_rxq_info(struct ionic_queue *q);
+static int ionic_register_rxq_info(struct ionic_queue *q, unsigned int napi_id);
static void ionic_dim_work(struct work_struct *work)
{
@@ -380,6 +382,7 @@ static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
if (!(qcq->flags & IONIC_QCQ_F_INITED))
return;
+ ionic_unregister_rxq_info(&qcq->q);
if (qcq->flags & IONIC_QCQ_F_INTR) {
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_SET);
@@ -437,9 +440,10 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
qcq->sg_base_pa = 0;
}
- ionic_xdp_unregister_rxq_info(&qcq->q);
- ionic_qcq_intr_free(lif, qcq);
+ page_pool_destroy(qcq->q.page_pool);
+ qcq->q.page_pool = NULL;
+ ionic_qcq_intr_free(lif, qcq);
vfree(qcq->q.info);
qcq->q.info = NULL;
}
@@ -553,7 +557,8 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
unsigned int cq_desc_size,
unsigned int sg_desc_size,
unsigned int desc_info_size,
- unsigned int pid, struct ionic_qcq **qcq)
+ unsigned int pid, struct bpf_prog *xdp_prog,
+ struct ionic_qcq **qcq)
{
struct ionic_dev *idev = &lif->ionic->idev;
struct device *dev = lif->ionic->dev;
@@ -579,6 +584,31 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
goto err_out_free_qcq;
}
+ if (type == IONIC_QTYPE_RXQ) {
+ struct page_pool_params pp_params = {
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .order = 0,
+ .pool_size = num_descs,
+ .nid = NUMA_NO_NODE,
+ .dev = lif->ionic->dev,
+ .napi = &new->napi,
+ .dma_dir = DMA_FROM_DEVICE,
+ .max_len = PAGE_SIZE,
+ .netdev = lif->netdev,
+ };
+
+ if (xdp_prog)
+ pp_params.dma_dir = DMA_BIDIRECTIONAL;
+
+ new->q.page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(new->q.page_pool)) {
+ netdev_err(lif->netdev, "Cannot create page_pool\n");
+ err = PTR_ERR(new->q.page_pool);
+ new->q.page_pool = NULL;
+ goto err_out_free_q_info;
+ }
+ }
+
new->q.type = type;
new->q.max_sg_elems = lif->qtype_info[type].max_sg_elems;
@@ -586,12 +616,12 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
desc_size, sg_desc_size, pid);
if (err) {
netdev_err(lif->netdev, "Cannot initialize queue\n");
- goto err_out_free_q_info;
+ goto err_out_free_page_pool;
}
err = ionic_alloc_qcq_interrupt(lif, new);
if (err)
- goto err_out_free_q_info;
+ goto err_out_free_page_pool;
err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
if (err) {
@@ -712,6 +742,8 @@ err_out_free_irq:
devm_free_irq(dev, new->intr.vector, &new->napi);
ionic_intr_free(lif->ionic, new->intr.index);
}
+err_out_free_page_pool:
+ page_pool_destroy(new->q.page_pool);
err_out_free_q_info:
vfree(new->q.info);
err_out_free_qcq:
@@ -734,7 +766,7 @@ static int ionic_qcqs_alloc(struct ionic_lif *lif)
sizeof(struct ionic_admin_comp),
0,
sizeof(struct ionic_admin_desc_info),
- lif->kern_pid, &lif->adminqcq);
+ lif->kern_pid, NULL, &lif->adminqcq);
if (err)
return err;
ionic_debugfs_add_qcq(lif, lif->adminqcq);
@@ -747,7 +779,7 @@ static int ionic_qcqs_alloc(struct ionic_lif *lif)
sizeof(union ionic_notifyq_comp),
0,
sizeof(struct ionic_admin_desc_info),
- lif->kern_pid, &lif->notifyqcq);
+ lif->kern_pid, NULL, &lif->notifyqcq);
if (err)
goto err_out;
ionic_debugfs_add_qcq(lif, lif->notifyqcq);
@@ -925,6 +957,11 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi);
else
netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi);
+ err = ionic_register_rxq_info(q, qcq->napi.napi_id);
+ if (err) {
+ netif_napi_del(&qcq->napi);
+ return err;
+ }
qcq->flags |= IONIC_QCQ_F_INITED;
@@ -960,7 +997,7 @@ int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif)
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, txq_i, "hwstamp_tx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_tx_desc_info),
- lif->kern_pid, &txq);
+ lif->kern_pid, NULL, &txq);
if (err)
goto err_qcq_alloc;
@@ -1020,7 +1057,7 @@ int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif)
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, rxq_i, "hwstamp_rx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_rx_desc_info),
- lif->kern_pid, &rxq);
+ lif->kern_pid, NULL, &rxq);
if (err)
goto err_qcq_alloc;
@@ -1037,7 +1074,7 @@ int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif)
goto err_qcq_init;
if (test_bit(IONIC_LIF_F_UP, lif->state)) {
- ionic_rx_fill(&rxq->q);
+ ionic_rx_fill(&rxq->q, NULL);
err = ionic_qcq_enable(rxq);
if (err)
goto err_qcq_enable;
@@ -2046,7 +2083,7 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_tx_desc_info),
- lif->kern_pid, &lif->txqcqs[i]);
+ lif->kern_pid, NULL, &lif->txqcqs[i]);
if (err)
goto err_out;
@@ -2078,7 +2115,8 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_rx_desc_info),
- lif->kern_pid, &lif->rxqcqs[i]);
+ lif->kern_pid, lif->xdp_prog,
+ &lif->rxqcqs[i]);
if (err)
goto err_out;
@@ -2143,9 +2181,7 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
int derr = 0;
int i, err;
- err = ionic_xdp_queues_config(lif);
- if (err)
- return err;
+ ionic_xdp_rxqs_prog_update(lif);
for (i = 0; i < lif->nxqs; i++) {
if (!(lif->rxqcqs[i] && lif->txqcqs[i])) {
@@ -2154,7 +2190,8 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
goto err_out;
}
- ionic_rx_fill(&lif->rxqcqs[i]->q);
+ ionic_rx_fill(&lif->rxqcqs[i]->q,
+ READ_ONCE(lif->rxqcqs[i]->q.xdp_prog));
err = ionic_qcq_enable(lif->rxqcqs[i]);
if (err)
goto err_out;
@@ -2167,7 +2204,7 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
}
if (lif->hwstamp_rxq) {
- ionic_rx_fill(&lif->hwstamp_rxq->q);
+ ionic_rx_fill(&lif->hwstamp_rxq->q, NULL);
err = ionic_qcq_enable(lif->hwstamp_rxq);
if (err)
goto err_out_hwstamp_rx;
@@ -2192,7 +2229,7 @@ err_out:
derr = ionic_qcq_disable(lif, lif->rxqcqs[i], derr);
}
- ionic_xdp_queues_config(lif);
+ ionic_xdp_rxqs_prog_update(lif);
return err;
}
@@ -2651,7 +2688,7 @@ static void ionic_vf_attr_replay(struct ionic_lif *lif)
ionic_vf_start(ionic);
}
-static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q)
+static void ionic_unregister_rxq_info(struct ionic_queue *q)
{
struct xdp_rxq_info *xi;
@@ -2665,7 +2702,7 @@ static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q)
kfree(xi);
}
-static int ionic_xdp_register_rxq_info(struct ionic_queue *q, unsigned int napi_id)
+static int ionic_register_rxq_info(struct ionic_queue *q, unsigned int napi_id)
{
struct xdp_rxq_info *rxq_info;
int err;
@@ -2676,15 +2713,15 @@ static int ionic_xdp_register_rxq_info(struct ionic_queue *q, unsigned int napi_
err = xdp_rxq_info_reg(rxq_info, q->lif->netdev, q->index, napi_id);
if (err) {
- dev_err(q->dev, "Queue %d xdp_rxq_info_reg failed, err %d\n",
- q->index, err);
+ netdev_err(q->lif->netdev, "q%d xdp_rxq_info_reg failed, err %d\n",
+ q->index, err);
goto err_out;
}
- err = xdp_rxq_info_reg_mem_model(rxq_info, MEM_TYPE_PAGE_ORDER0, NULL);
+ err = xdp_rxq_info_reg_mem_model(rxq_info, MEM_TYPE_PAGE_POOL, q->page_pool);
if (err) {
- dev_err(q->dev, "Queue %d xdp_rxq_info_reg_mem_model failed, err %d\n",
- q->index, err);
+ netdev_err(q->lif->netdev, "q%d xdp_rxq_info_reg_mem_model failed, err %d\n",
+ q->index, err);
xdp_rxq_info_unreg(rxq_info);
goto err_out;
}
@@ -2698,44 +2735,20 @@ err_out:
return err;
}
-static int ionic_xdp_queues_config(struct ionic_lif *lif)
+static void ionic_xdp_rxqs_prog_update(struct ionic_lif *lif)
{
+ struct bpf_prog *xdp_prog;
unsigned int i;
- int err;
if (!lif->rxqcqs)
- return 0;
-
- /* There's no need to rework memory if not going to/from NULL program.
- * If there is no lif->xdp_prog, there should also be no q.xdp_rxq_info
- * This way we don't need to keep an *xdp_prog in every queue struct.
- */
- if (!lif->xdp_prog == !lif->rxqcqs[0]->q.xdp_rxq_info)
- return 0;
+ return;
+ xdp_prog = READ_ONCE(lif->xdp_prog);
for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) {
struct ionic_queue *q = &lif->rxqcqs[i]->q;
- if (q->xdp_rxq_info) {
- ionic_xdp_unregister_rxq_info(q);
- continue;
- }
-
- err = ionic_xdp_register_rxq_info(q, lif->rxqcqs[i]->napi.napi_id);
- if (err) {
- dev_err(lif->ionic->dev, "failed to register RX queue %d info for XDP, err %d\n",
- i, err);
- goto err_out;
- }
+ WRITE_ONCE(q->xdp_prog, xdp_prog);
}
-
- return 0;
-
-err_out:
- for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++)
- ionic_xdp_unregister_rxq_info(&lif->rxqcqs[i]->q);
-
- return err;
}
static int ionic_xdp_config(struct net_device *netdev, struct netdev_bpf *bpf)
@@ -2765,11 +2778,17 @@ static int ionic_xdp_config(struct net_device *netdev, struct netdev_bpf *bpf)
if (!netif_running(netdev)) {
old_prog = xchg(&lif->xdp_prog, bpf->prog);
+ } else if (lif->xdp_prog && bpf->prog) {
+ old_prog = xchg(&lif->xdp_prog, bpf->prog);
+ ionic_xdp_rxqs_prog_update(lif);
} else {
+ struct ionic_queue_params qparams;
+
+ ionic_init_queue_params(lif, &qparams);
+ qparams.xdp_prog = bpf->prog;
mutex_lock(&lif->queue_lock);
- ionic_stop_queues_reconfig(lif);
+ ionic_reconfigure_queues(lif, &qparams);
old_prog = xchg(&lif->xdp_prog, bpf->prog);
- ionic_start_queues_reconfig(lif);
mutex_unlock(&lif->queue_lock);
}
@@ -2871,13 +2890,23 @@ err_out:
static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
{
- /* only swapping the queues, not the napi, flags, or other stuff */
+ /* only swapping the queues and napi, not flags or other stuff */
+ swap(a->napi, b->napi);
+
+ if (a->q.type == IONIC_QTYPE_RXQ) {
+ swap(a->q.page_pool, b->q.page_pool);
+ a->q.page_pool->p.napi = &a->napi;
+ if (b->q.page_pool) /* is NULL when increasing queue count */
+ b->q.page_pool->p.napi = &b->napi;
+ }
+
swap(a->q.features, b->q.features);
swap(a->q.num_descs, b->q.num_descs);
swap(a->q.desc_size, b->q.desc_size);
swap(a->q.base, b->q.base);
swap(a->q.base_pa, b->q.base_pa);
swap(a->q.info, b->q.info);
+ swap(a->q.xdp_prog, b->q.xdp_prog);
swap(a->q.xdp_rxq_info, b->q.xdp_rxq_info);
swap(a->q.partner, b->q.partner);
swap(a->q_base, b->q_base);
@@ -2928,7 +2957,8 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
}
if (qparam->nxqs != lif->nxqs ||
qparam->nrxq_descs != lif->nrxq_descs ||
- qparam->rxq_features != lif->rxq_features) {
+ qparam->rxq_features != lif->rxq_features ||
+ qparam->xdp_prog != lif->xdp_prog) {
rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif,
sizeof(struct ionic_qcq *), GFP_KERNEL);
if (!rx_qcqs) {
@@ -2959,7 +2989,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
4, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_tx_desc_info),
- lif->kern_pid, &lif->txqcqs[i]);
+ lif->kern_pid, NULL, &lif->txqcqs[i]);
if (err)
goto err_out;
}
@@ -2968,7 +2998,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_tx_desc_info),
- lif->kern_pid, &tx_qcqs[i]);
+ lif->kern_pid, NULL, &tx_qcqs[i]);
if (err)
goto err_out;
}
@@ -2990,7 +3020,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
4, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_rx_desc_info),
- lif->kern_pid, &lif->rxqcqs[i]);
+ lif->kern_pid, NULL, &lif->rxqcqs[i]);
if (err)
goto err_out;
}
@@ -2999,11 +3029,12 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_rx_desc_info),
- lif->kern_pid, &rx_qcqs[i]);
+ lif->kern_pid, qparam->xdp_prog, &rx_qcqs[i]);
if (err)
goto err_out;
rx_qcqs[i]->q.features = qparam->rxq_features;
+ rx_qcqs[i]->q.xdp_prog = qparam->xdp_prog;
}
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 3e1005293c4a..e01756fb7fdd 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -268,6 +268,7 @@ struct ionic_queue_params {
unsigned int ntxq_descs;
unsigned int nrxq_descs;
u64 rxq_features;
+ struct bpf_prog *xdp_prog;
bool intr_split;
bool cmb_tx;
bool cmb_rx;
@@ -280,6 +281,7 @@ static inline void ionic_init_queue_params(struct ionic_lif *lif,
qparam->ntxq_descs = lif->ntxq_descs;
qparam->nrxq_descs = lif->nrxq_descs;
qparam->rxq_features = lif->rxq_features;
+ qparam->xdp_prog = lif->xdp_prog;
qparam->intr_split = test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
qparam->cmb_tx = test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
qparam->cmb_rx = test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index fc79baad4561..0eeda7e502db 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -6,6 +6,7 @@
#include <linux/if_vlan.h>
#include <net/ip6_checksum.h>
#include <net/netdev_queues.h>
+#include <net/page_pool/helpers.h>
#include "ionic.h"
#include "ionic_lif.h"
@@ -118,108 +119,57 @@ static void *ionic_rx_buf_va(struct ionic_buf_info *buf_info)
static dma_addr_t ionic_rx_buf_pa(struct ionic_buf_info *buf_info)
{
- return buf_info->dma_addr + buf_info->page_offset;
+ return page_pool_get_dma_addr(buf_info->page) + buf_info->page_offset;
}
-static unsigned int ionic_rx_buf_size(struct ionic_buf_info *buf_info)
+static void __ionic_rx_put_buf(struct ionic_queue *q,
+ struct ionic_buf_info *buf_info,
+ bool recycle_direct)
{
- return min_t(u32, IONIC_MAX_BUF_LEN, IONIC_PAGE_SIZE - buf_info->page_offset);
-}
-
-static int ionic_rx_page_alloc(struct ionic_queue *q,
- struct ionic_buf_info *buf_info)
-{
- struct device *dev = q->dev;
- dma_addr_t dma_addr;
- struct page *page;
-
- page = alloc_pages(IONIC_PAGE_GFP_MASK, 0);
- if (unlikely(!page)) {
- net_err_ratelimited("%s: %s page alloc failed\n",
- dev_name(dev), q->name);
- q_to_rx_stats(q)->alloc_err++;
- return -ENOMEM;
- }
-
- dma_addr = dma_map_page(dev, page, 0,
- IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, dma_addr))) {
- __free_pages(page, 0);
- net_err_ratelimited("%s: %s dma map failed\n",
- dev_name(dev), q->name);
- q_to_rx_stats(q)->dma_map_err++;
- return -EIO;
- }
-
- buf_info->dma_addr = dma_addr;
- buf_info->page = page;
- buf_info->page_offset = 0;
-
- return 0;
-}
-
-static void ionic_rx_page_free(struct ionic_queue *q,
- struct ionic_buf_info *buf_info)
-{
- struct device *dev = q->dev;
-
- if (unlikely(!buf_info)) {
- net_err_ratelimited("%s: %s invalid buf_info in free\n",
- dev_name(dev), q->name);
- return;
- }
-
if (!buf_info->page)
return;
- dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
- __free_pages(buf_info->page, 0);
+ page_pool_put_full_page(q->page_pool, buf_info->page, recycle_direct);
buf_info->page = NULL;
+ buf_info->len = 0;
+ buf_info->page_offset = 0;
}
-static bool ionic_rx_buf_recycle(struct ionic_queue *q,
- struct ionic_buf_info *buf_info, u32 len)
-{
- u32 size;
-
- /* don't re-use pages allocated in low-mem condition */
- if (page_is_pfmemalloc(buf_info->page))
- return false;
-
- /* don't re-use buffers from non-local numa nodes */
- if (page_to_nid(buf_info->page) != numa_mem_id())
- return false;
-
- size = ALIGN(len, q->xdp_rxq_info ? IONIC_PAGE_SIZE : IONIC_PAGE_SPLIT_SZ);
- buf_info->page_offset += size;
- if (buf_info->page_offset >= IONIC_PAGE_SIZE)
- return false;
- get_page(buf_info->page);
+static void ionic_rx_put_buf(struct ionic_queue *q,
+ struct ionic_buf_info *buf_info)
+{
+ __ionic_rx_put_buf(q, buf_info, false);
+}
- return true;
+static void ionic_rx_put_buf_direct(struct ionic_queue *q,
+ struct ionic_buf_info *buf_info)
+{
+ __ionic_rx_put_buf(q, buf_info, true);
}
static void ionic_rx_add_skb_frag(struct ionic_queue *q,
struct sk_buff *skb,
struct ionic_buf_info *buf_info,
- u32 off, u32 len,
+ u32 headroom, u32 len,
bool synced)
{
if (!synced)
- dma_sync_single_range_for_cpu(q->dev, ionic_rx_buf_pa(buf_info),
- off, len, DMA_FROM_DEVICE);
+ page_pool_dma_sync_for_cpu(q->page_pool,
+ buf_info->page,
+ buf_info->page_offset + headroom,
+ len);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- buf_info->page, buf_info->page_offset + off,
- len,
- IONIC_PAGE_SIZE);
+ buf_info->page, buf_info->page_offset + headroom,
+ len, buf_info->len);
- if (!ionic_rx_buf_recycle(q, buf_info, len)) {
- dma_unmap_page(q->dev, buf_info->dma_addr,
- IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
- buf_info->page = NULL;
- }
+ /* napi_gro_frags() will release/recycle the
+ * page_pool buffers from the frags list
+ */
+ buf_info->page = NULL;
+ buf_info->len = 0;
+ buf_info->page_offset = 0;
}
static struct sk_buff *ionic_rx_build_skb(struct ionic_queue *q,
@@ -244,12 +194,13 @@ static struct sk_buff *ionic_rx_build_skb(struct ionic_queue *q,
q_to_rx_stats(q)->alloc_err++;
return NULL;
}
+ skb_mark_for_recycle(skb);
if (headroom)
frag_len = min_t(u16, len,
IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN);
else
- frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info));
+ frag_len = min_t(u16, len, IONIC_PAGE_SIZE);
if (unlikely(!buf_info->page))
goto err_bad_buf_page;
@@ -260,7 +211,7 @@ static struct sk_buff *ionic_rx_build_skb(struct ionic_queue *q,
for (i = 0; i < num_sg_elems; i++, buf_info++) {
if (unlikely(!buf_info->page))
goto err_bad_buf_page;
- frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info));
+ frag_len = min_t(u16, len, buf_info->len);
ionic_rx_add_skb_frag(q, skb, buf_info, 0, frag_len, synced);
len -= frag_len;
}
@@ -277,11 +228,13 @@ static struct sk_buff *ionic_rx_copybreak(struct net_device *netdev,
struct ionic_rx_desc_info *desc_info,
unsigned int headroom,
unsigned int len,
+ unsigned int num_sg_elems,
bool synced)
{
struct ionic_buf_info *buf_info;
struct device *dev = q->dev;
struct sk_buff *skb;
+ int i;
buf_info = &desc_info->bufs[0];
@@ -292,54 +245,52 @@ static struct sk_buff *ionic_rx_copybreak(struct net_device *netdev,
q_to_rx_stats(q)->alloc_err++;
return NULL;
}
-
- if (unlikely(!buf_info->page)) {
- dev_kfree_skb(skb);
- return NULL;
- }
+ skb_mark_for_recycle(skb);
if (!synced)
- dma_sync_single_range_for_cpu(dev, ionic_rx_buf_pa(buf_info),
- headroom, len, DMA_FROM_DEVICE);
+ page_pool_dma_sync_for_cpu(q->page_pool,
+ buf_info->page,
+ buf_info->page_offset + headroom,
+ len);
+
skb_copy_to_linear_data(skb, ionic_rx_buf_va(buf_info) + headroom, len);
- dma_sync_single_range_for_device(dev, ionic_rx_buf_pa(buf_info),
- headroom, len, DMA_FROM_DEVICE);
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, netdev);
+ /* recycle the Rx buffer now that we're done with it */
+ ionic_rx_put_buf_direct(q, buf_info);
+ buf_info++;
+ for (i = 0; i < num_sg_elems; i++, buf_info++)
+ ionic_rx_put_buf_direct(q, buf_info);
+
return skb;
}
static void ionic_xdp_tx_desc_clean(struct ionic_queue *q,
- struct ionic_tx_desc_info *desc_info)
+ struct ionic_tx_desc_info *desc_info,
+ bool in_napi)
{
- unsigned int nbufs = desc_info->nbufs;
- struct ionic_buf_info *buf_info;
- struct device *dev = q->dev;
- int i;
+ struct xdp_frame_bulk bq;
- if (!nbufs)
+ if (!desc_info->nbufs)
return;
- buf_info = desc_info->bufs;
- dma_unmap_single(dev, buf_info->dma_addr,
- buf_info->len, DMA_TO_DEVICE);
- if (desc_info->act == XDP_TX)
- __free_pages(buf_info->page, 0);
- buf_info->page = NULL;
+ xdp_frame_bulk_init(&bq);
+ rcu_read_lock(); /* need for xdp_return_frame_bulk */
- buf_info++;
- for (i = 1; i < nbufs + 1 && buf_info->page; i++, buf_info++) {
- dma_unmap_page(dev, buf_info->dma_addr,
- buf_info->len, DMA_TO_DEVICE);
- if (desc_info->act == XDP_TX)
- __free_pages(buf_info->page, 0);
- buf_info->page = NULL;
+ if (desc_info->act == XDP_TX) {
+ if (likely(in_napi))
+ xdp_return_frame_rx_napi(desc_info->xdpf);
+ else
+ xdp_return_frame(desc_info->xdpf);
+ } else if (desc_info->act == XDP_REDIRECT) {
+ ionic_tx_desc_unmap_bufs(q, desc_info);
+ xdp_return_frame_bulk(desc_info->xdpf, &bq);
}
- if (desc_info->act == XDP_REDIRECT)
- xdp_return_frame(desc_info->xdpf);
+ xdp_flush_frame_bulk(&bq);
+ rcu_read_unlock();
desc_info->nbufs = 0;
desc_info->xdpf = NULL;
@@ -363,9 +314,17 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame,
buf_info = desc_info->bufs;
stats = q_to_tx_stats(q);
- dma_addr = ionic_tx_map_single(q, frame->data, len);
- if (!dma_addr)
- return -EIO;
+ if (act == XDP_TX) {
+ dma_addr = page_pool_get_dma_addr(page) +
+ off + XDP_PACKET_HEADROOM;
+ dma_sync_single_for_device(q->dev, dma_addr,
+ len, DMA_TO_DEVICE);
+ } else /* XDP_REDIRECT */ {
+ dma_addr = ionic_tx_map_single(q, frame->data, len);
+ if (!dma_addr)
+ return -EIO;
+ }
+
buf_info->dma_addr = dma_addr;
buf_info->len = len;
buf_info->page = page;
@@ -387,10 +346,21 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame,
frag = sinfo->frags;
elem = ionic_tx_sg_elems(q);
for (i = 0; i < sinfo->nr_frags; i++, frag++, bi++) {
- dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
- if (!dma_addr) {
- ionic_tx_desc_unmap_bufs(q, desc_info);
- return -EIO;
+ if (act == XDP_TX) {
+ struct page *pg = skb_frag_page(frag);
+
+ dma_addr = page_pool_get_dma_addr(pg) +
+ skb_frag_off(frag);
+ dma_sync_single_for_device(q->dev, dma_addr,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ } else {
+ dma_addr = ionic_tx_map_frag(q, frag, 0,
+ skb_frag_size(frag));
+ if (dma_mapping_error(q->dev, dma_addr)) {
+ ionic_tx_desc_unmap_bufs(q, desc_info);
+ return -EIO;
+ }
}
bi->dma_addr = dma_addr;
bi->len = skb_frag_size(frag);
@@ -481,15 +451,13 @@ int ionic_xdp_xmit(struct net_device *netdev, int n,
return nxmit;
}
-static void ionic_xdp_rx_put_bufs(struct ionic_queue *q,
- struct ionic_buf_info *buf_info,
- int nbufs)
+static void ionic_xdp_rx_unlink_bufs(struct ionic_queue *q,
+ struct ionic_buf_info *buf_info,
+ int nbufs)
{
int i;
for (i = 0; i < nbufs; i++) {
- dma_unmap_page(q->dev, buf_info->dma_addr,
- IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
buf_info->page = NULL;
buf_info++;
}
@@ -516,11 +484,9 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
frag_len = min_t(u16, len, IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN);
xdp_prepare_buff(&xdp_buf, ionic_rx_buf_va(buf_info),
XDP_PACKET_HEADROOM, frag_len, false);
-
- dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(buf_info),
- XDP_PACKET_HEADROOM, frag_len,
- DMA_FROM_DEVICE);
-
+ page_pool_dma_sync_for_cpu(rxq->page_pool, buf_info->page,
+ buf_info->page_offset + XDP_PACKET_HEADROOM,
+ frag_len);
prefetchw(&xdp_buf.data_hard_start);
/* We limit MTU size to one buffer if !xdp_has_frags, so
@@ -542,15 +508,16 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
do {
if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS)) {
err = -ENOSPC;
- goto out_xdp_abort;
+ break;
}
frag = &sinfo->frags[sinfo->nr_frags];
sinfo->nr_frags++;
bi++;
- frag_len = min_t(u16, remain_len, ionic_rx_buf_size(bi));
- dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(bi),
- 0, frag_len, DMA_FROM_DEVICE);
+ frag_len = min_t(u16, remain_len, bi->len);
+ page_pool_dma_sync_for_cpu(rxq->page_pool, bi->page,
+ buf_info->page_offset,
+ frag_len);
skb_frag_fill_page_desc(frag, bi->page, 0, frag_len);
sinfo->xdp_frags_size += frag_len;
remain_len -= frag_len;
@@ -569,14 +536,16 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
return false; /* false = we didn't consume the packet */
case XDP_DROP:
- ionic_rx_page_free(rxq, buf_info);
+ ionic_rx_put_buf_direct(rxq, buf_info);
stats->xdp_drop++;
break;
case XDP_TX:
xdpf = xdp_convert_buff_to_frame(&xdp_buf);
- if (!xdpf)
- goto out_xdp_abort;
+ if (!xdpf) {
+ err = -ENOSPC;
+ break;
+ }
txq = rxq->partner;
nq = netdev_get_tx_queue(netdev, txq->index);
@@ -588,7 +557,8 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
ionic_q_space_avail(txq),
1, 1)) {
__netif_tx_unlock(nq);
- goto out_xdp_abort;
+ err = -EIO;
+ break;
}
err = ionic_xdp_post_frame(txq, xdpf, XDP_TX,
@@ -598,49 +568,47 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
__netif_tx_unlock(nq);
if (unlikely(err)) {
netdev_dbg(netdev, "tx ionic_xdp_post_frame err %d\n", err);
- goto out_xdp_abort;
+ break;
}
- ionic_xdp_rx_put_bufs(rxq, buf_info, nbufs);
+ ionic_xdp_rx_unlink_bufs(rxq, buf_info, nbufs);
stats->xdp_tx++;
-
- /* the Tx completion will free the buffers */
break;
case XDP_REDIRECT:
err = xdp_do_redirect(netdev, &xdp_buf, xdp_prog);
if (unlikely(err)) {
netdev_dbg(netdev, "xdp_do_redirect err %d\n", err);
- goto out_xdp_abort;
+ break;
}
- ionic_xdp_rx_put_bufs(rxq, buf_info, nbufs);
+ ionic_xdp_rx_unlink_bufs(rxq, buf_info, nbufs);
rxq->xdp_flush = true;
stats->xdp_redirect++;
break;
case XDP_ABORTED:
default:
- goto out_xdp_abort;
+ err = -EIO;
+ break;
}
- return true;
-
-out_xdp_abort:
- trace_xdp_exception(netdev, xdp_prog, xdp_action);
- ionic_rx_page_free(rxq, buf_info);
- stats->xdp_aborted++;
+ if (err) {
+ ionic_rx_put_buf_direct(rxq, buf_info);
+ trace_xdp_exception(netdev, xdp_prog, xdp_action);
+ stats->xdp_aborted++;
+ }
return true;
}
static void ionic_rx_clean(struct ionic_queue *q,
struct ionic_rx_desc_info *desc_info,
- struct ionic_rxq_comp *comp)
+ struct ionic_rxq_comp *comp,
+ struct bpf_prog *xdp_prog)
{
struct net_device *netdev = q->lif->netdev;
struct ionic_qcq *qcq = q_to_qcq(q);
struct ionic_rx_stats *stats;
- struct bpf_prog *xdp_prog;
- unsigned int headroom;
+ unsigned int headroom = 0;
struct sk_buff *skb;
bool synced = false;
bool use_copybreak;
@@ -648,7 +616,14 @@ static void ionic_rx_clean(struct ionic_queue *q,
stats = q_to_rx_stats(q);
- if (comp->status) {
+ if (unlikely(comp->status)) {
+ /* Most likely status==2 and the pkt received was bigger
+ * than the buffer available: comp->len will show the
+ * pkt size received that didn't fit the advertised desc.len
+ */
+ dev_dbg(q->dev, "q%d drop comp->status %d comp->len %d desc->len %d\n",
+ q->index, comp->status, comp->len, q->rxq[q->head_idx].len);
+
stats->dropped++;
return;
}
@@ -657,18 +632,18 @@ static void ionic_rx_clean(struct ionic_queue *q,
stats->pkts++;
stats->bytes += len;
- xdp_prog = READ_ONCE(q->lif->xdp_prog);
if (xdp_prog) {
if (ionic_run_xdp(stats, netdev, xdp_prog, q, desc_info->bufs, len))
return;
synced = true;
+ headroom = XDP_PACKET_HEADROOM;
}
- headroom = q->xdp_rxq_info ? XDP_PACKET_HEADROOM : 0;
use_copybreak = len <= q->lif->rx_copybreak;
if (use_copybreak)
skb = ionic_rx_copybreak(netdev, q, desc_info,
- headroom, len, synced);
+ headroom, len,
+ comp->num_sg_elems, synced);
else
skb = ionic_rx_build_skb(q, desc_info, headroom, len,
comp->num_sg_elems, synced);
@@ -744,7 +719,7 @@ static void ionic_rx_clean(struct ionic_queue *q,
napi_gro_frags(&qcq->napi);
}
-bool ionic_rx_service(struct ionic_cq *cq)
+static bool __ionic_rx_service(struct ionic_cq *cq, struct bpf_prog *xdp_prog)
{
struct ionic_rx_desc_info *desc_info;
struct ionic_queue *q = cq->bound_q;
@@ -766,11 +741,16 @@ bool ionic_rx_service(struct ionic_cq *cq)
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
/* clean the related q entry, only one per qc completion */
- ionic_rx_clean(q, desc_info, comp);
+ ionic_rx_clean(q, desc_info, comp, xdp_prog);
return true;
}
+bool ionic_rx_service(struct ionic_cq *cq)
+{
+ return __ionic_rx_service(cq, NULL);
+}
+
static inline void ionic_write_cmb_desc(struct ionic_queue *q,
void *desc)
{
@@ -781,7 +761,7 @@ static inline void ionic_write_cmb_desc(struct ionic_queue *q,
memcpy_toio(&q->cmb_txq[q->head_idx], desc, sizeof(q->cmb_txq[0]));
}
-void ionic_rx_fill(struct ionic_queue *q)
+void ionic_rx_fill(struct ionic_queue *q, struct bpf_prog *xdp_prog)
{
struct net_device *netdev = q->lif->netdev;
struct ionic_rx_desc_info *desc_info;
@@ -789,6 +769,9 @@ void ionic_rx_fill(struct ionic_queue *q)
struct ionic_buf_info *buf_info;
unsigned int fill_threshold;
struct ionic_rxq_desc *desc;
+ unsigned int first_frag_len;
+ unsigned int first_buf_len;
+ unsigned int headroom = 0;
unsigned int remain_len;
unsigned int frag_len;
unsigned int nfrags;
@@ -806,35 +789,43 @@ void ionic_rx_fill(struct ionic_queue *q)
len = netdev->mtu + VLAN_ETH_HLEN;
- for (i = n_fill; i; i--) {
- unsigned int headroom;
- unsigned int buf_len;
+ if (xdp_prog) {
+ /* Always alloc the full size buffer, but only need
+ * the actual frag_len in the descriptor
+ * XDP uses space in the first buffer, so account for
+ * head room, tail room, and ip header in the first frag size.
+ */
+ headroom = XDP_PACKET_HEADROOM;
+ first_buf_len = IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN + headroom;
+ first_frag_len = min_t(u16, len + headroom, first_buf_len);
+ } else {
+ /* Use MTU size if smaller than max buffer size */
+ first_frag_len = min_t(u16, len, IONIC_PAGE_SIZE);
+ first_buf_len = first_frag_len;
+ }
+ for (i = n_fill; i; i--) {
+ /* fill main descriptor - buf[0] */
nfrags = 0;
remain_len = len;
desc = &q->rxq[q->head_idx];
desc_info = &q->rx_info[q->head_idx];
buf_info = &desc_info->bufs[0];
- if (!buf_info->page) { /* alloc a new buffer? */
- if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
- desc->addr = 0;
- desc->len = 0;
- return;
- }
+ buf_info->len = first_buf_len;
+ frag_len = first_frag_len - headroom;
+
+ /* get a new buffer if we can't reuse one */
+ if (!buf_info->page)
+ buf_info->page = page_pool_alloc(q->page_pool,
+ &buf_info->page_offset,
+ &buf_info->len,
+ GFP_ATOMIC);
+ if (unlikely(!buf_info->page)) {
+ buf_info->len = 0;
+ return;
}
- /* fill main descriptor - buf[0]
- * XDP uses space in the first buffer, so account for
- * head room, tail room, and ip header in the first frag size.
- */
- headroom = q->xdp_rxq_info ? XDP_PACKET_HEADROOM : 0;
- if (q->xdp_rxq_info)
- buf_len = IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN;
- else
- buf_len = ionic_rx_buf_size(buf_info);
- frag_len = min_t(u16, len, buf_len);
-
desc->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info) + headroom);
desc->len = cpu_to_le16(frag_len);
remain_len -= frag_len;
@@ -844,16 +835,26 @@ void ionic_rx_fill(struct ionic_queue *q)
/* fill sg descriptors - buf[1..n] */
sg_elem = q->rxq_sgl[q->head_idx].elems;
for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++, sg_elem++) {
- if (!buf_info->page) { /* alloc a new sg buffer? */
- if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
- sg_elem->addr = 0;
- sg_elem->len = 0;
+ frag_len = min_t(u16, remain_len, IONIC_PAGE_SIZE);
+
+ /* Recycle any leftover buffers that are too small to reuse */
+ if (unlikely(buf_info->page && buf_info->len < frag_len))
+ ionic_rx_put_buf_direct(q, buf_info);
+
+ /* Get new buffer if needed */
+ if (!buf_info->page) {
+ buf_info->len = frag_len;
+ buf_info->page = page_pool_alloc(q->page_pool,
+ &buf_info->page_offset,
+ &buf_info->len,
+ GFP_ATOMIC);
+ if (unlikely(!buf_info->page)) {
+ buf_info->len = 0;
return;
}
}
sg_elem->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info));
- frag_len = min_t(u16, remain_len, ionic_rx_buf_size(buf_info));
sg_elem->len = cpu_to_le16(frag_len);
remain_len -= frag_len;
buf_info++;
@@ -883,17 +884,12 @@ void ionic_rx_fill(struct ionic_queue *q)
void ionic_rx_empty(struct ionic_queue *q)
{
struct ionic_rx_desc_info *desc_info;
- struct ionic_buf_info *buf_info;
unsigned int i, j;
for (i = 0; i < q->num_descs; i++) {
desc_info = &q->rx_info[i];
- for (j = 0; j < ARRAY_SIZE(desc_info->bufs); j++) {
- buf_info = &desc_info->bufs[j];
- if (buf_info->page)
- ionic_rx_page_free(q, buf_info);
- }
-
+ for (j = 0; j < ARRAY_SIZE(desc_info->bufs); j++)
+ ionic_rx_put_buf(q, &desc_info->bufs[j]);
desc_info->nbufs = 0;
}
@@ -974,6 +970,32 @@ static void ionic_xdp_do_flush(struct ionic_cq *cq)
}
}
+static unsigned int ionic_rx_cq_service(struct ionic_cq *cq,
+ unsigned int work_to_do)
+{
+ struct ionic_queue *q = cq->bound_q;
+ unsigned int work_done = 0;
+ struct bpf_prog *xdp_prog;
+
+ if (work_to_do == 0)
+ return 0;
+
+ xdp_prog = READ_ONCE(q->xdp_prog);
+ while (__ionic_rx_service(cq, xdp_prog)) {
+ if (cq->tail_idx == cq->num_descs - 1)
+ cq->done_color = !cq->done_color;
+
+ cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
+
+ if (++work_done >= work_to_do)
+ break;
+ }
+ ionic_rx_fill(q, xdp_prog);
+ ionic_xdp_do_flush(cq);
+
+ return work_done;
+}
+
int ionic_rx_napi(struct napi_struct *napi, int budget)
{
struct ionic_qcq *qcq = napi_to_qcq(napi);
@@ -984,12 +1006,8 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
if (unlikely(!budget))
return budget;
- work_done = ionic_cq_service(cq, budget,
- ionic_rx_service, NULL, NULL);
-
- ionic_rx_fill(cq->bound_q);
+ work_done = ionic_rx_cq_service(cq, budget);
- ionic_xdp_do_flush(cq);
if (work_done < budget && napi_complete_done(napi, work_done)) {
ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR);
flags |= IONIC_INTR_CRED_UNMASK;
@@ -1030,12 +1048,8 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
if (unlikely(!budget))
return budget;
- rx_work_done = ionic_cq_service(rxcq, budget,
- ionic_rx_service, NULL, NULL);
-
- ionic_rx_fill(rxcq->bound_q);
+ rx_work_done = ionic_rx_cq_service(rxcq, budget);
- ionic_xdp_do_flush(rxcq);
if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
ionic_dim_update(rxqcq, 0);
flags |= IONIC_INTR_CRED_UNMASK;
@@ -1166,7 +1180,7 @@ static void ionic_tx_clean(struct ionic_queue *q,
struct sk_buff *skb;
if (desc_info->xdpf) {
- ionic_xdp_tx_desc_clean(q->partner, desc_info);
+ ionic_xdp_tx_desc_clean(q->partner, desc_info, in_napi);
stats->clean++;
if (unlikely(__netif_subqueue_stopped(q->lif->netdev, q->index)))
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
index 9e73e324e7a1..b2b9a2dc9eb8 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
@@ -4,9 +4,11 @@
#ifndef _IONIC_TXRX_H_
#define _IONIC_TXRX_H_
+struct bpf_prog;
+
void ionic_tx_flush(struct ionic_cq *cq);
-void ionic_rx_fill(struct ionic_queue *q);
+void ionic_rx_fill(struct ionic_queue *q, struct bpf_prog *xdp_prog);
void ionic_rx_empty(struct ionic_queue *q);
void ionic_tx_empty(struct ionic_queue *q);
int ionic_rx_napi(struct napi_struct *napi, int budget);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 63e3dac4d5f7..9d6399a5c780 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -326,25 +326,18 @@ int qede_ptp_get_ts_info(struct qede_dev *edev, struct kernel_ethtool_ts_info *i
struct qede_ptp *ptp = edev->ptp;
if (!ptp) {
- info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
- info->phc_index = -1;
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
return 0;
}
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (ptp->clock)
info->phc_index = ptp_clock_index(ptp->clock);
- else
- info->phc_index = -1;
info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index bcef8ab715bf..d7cdea8f604d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -2042,12 +2042,14 @@ int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *adapter, int mode)
int qlcnic_83xx_config_rss(struct qlcnic_adapter *adapter, int enable)
{
- int err;
- u32 word;
struct qlcnic_cmd_args cmd;
- const u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
- 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
- 0x255b0ec26d5a56daULL };
+ static const u64 key[] = {
+ 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
+ 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
+ 0x255b0ec26d5a56daULL
+ };
+ u32 word;
+ int err;
err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_RSS);
if (err)
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 3cb1c4f5c91a..45ac8befba29 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -578,7 +578,7 @@ struct rtl8169_counters {
__le64 rx_broadcast;
__le32 rx_multicast;
__le16 tx_aborted;
- __le16 tx_underun;
+ __le16 tx_underrun;
};
struct rtl8169_tc_offsets {
@@ -1843,7 +1843,7 @@ static void rtl8169_get_ethtool_stats(struct net_device *dev,
data[9] = le64_to_cpu(counters->rx_broadcast);
data[10] = le32_to_cpu(counters->rx_multicast);
data[11] = le16_to_cpu(counters->tx_aborted);
- data[12] = le16_to_cpu(counters->tx_underun);
+ data[12] = le16_to_cpu(counters->tx_underrun);
}
static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c
index 7882f2c0e1a4..869183e1565e 100644
--- a/drivers/net/ethernet/realtek/rtase/rtase_main.c
+++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c
@@ -98,7 +98,7 @@ struct rtase_counters {
__le64 rx_broadcast;
__le32 rx_multicast;
__le16 tx_aborted;
- __le16 tx_underun;
+ __le16 tx_underrun;
} __packed;
static void rtase_w8(const struct rtase_private *tp, u16 reg, u8 val8)
@@ -1619,8 +1619,8 @@ static void rtase_dump_state(const struct net_device *dev)
le32_to_cpu(counters->rx_multicast));
netdev_err(dev, "tx_aborted %d\n",
le16_to_cpu(counters->tx_aborted));
- netdev_err(dev, "tx_underun %d\n",
- le16_to_cpu(counters->tx_underun));
+ netdev_err(dev, "tx_underrun %d\n",
+ le16_to_cpu(counters->tx_underrun));
}
static void rtase_tx_timeout(struct net_device *dev, unsigned int txqueue)
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 7d69302ffa0a..de131fc5fa0b 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -4302,3 +4302,130 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.sensor_event = efx_mcdi_sensor_event,
.rx_recycle_ring_size = efx_ef10_recycle_ring_size,
};
+
+const struct efx_nic_type efx_x4_nic_type = {
+ .is_vf = false,
+ .mem_bar = efx_ef10_pf_mem_bar,
+ .mem_map_size = efx_ef10_mem_map_size,
+ .probe = efx_ef10_probe_pf,
+ .remove = efx_ef10_remove,
+ .dimension_resources = efx_ef10_dimension_resources,
+ .init = efx_ef10_init_nic,
+ .fini = efx_ef10_fini_nic,
+ .map_reset_reason = efx_ef10_map_reset_reason,
+ .map_reset_flags = efx_ef10_map_reset_flags,
+ .reset = efx_ef10_reset,
+ .probe_port = efx_mcdi_port_probe,
+ .remove_port = efx_mcdi_port_remove,
+ .fini_dmaq = efx_fini_dmaq,
+ .prepare_flr = efx_ef10_prepare_flr,
+ .finish_flr = efx_port_dummy_op_void,
+ .describe_stats = efx_ef10_describe_stats,
+ .update_stats = efx_ef10_update_stats_pf,
+ .start_stats = efx_mcdi_mac_start_stats,
+ .pull_stats = efx_mcdi_mac_pull_stats,
+ .stop_stats = efx_mcdi_mac_stop_stats,
+ .push_irq_moderation = efx_ef10_push_irq_moderation,
+ .reconfigure_mac = efx_ef10_mac_reconfigure,
+ .check_mac_fault = efx_mcdi_mac_check_fault,
+ .reconfigure_port = efx_mcdi_port_reconfigure,
+ .get_wol = efx_ef10_get_wol,
+ .set_wol = efx_ef10_set_wol,
+ .resume_wol = efx_port_dummy_op_void,
+ .get_fec_stats = efx_ef10_get_fec_stats,
+ .test_chip = efx_ef10_test_chip,
+ .test_nvram = efx_mcdi_nvram_test_all,
+ .mcdi_request = efx_ef10_mcdi_request,
+ .mcdi_poll_response = efx_ef10_mcdi_poll_response,
+ .mcdi_read_response = efx_ef10_mcdi_read_response,
+ .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
+ .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
+ .irq_enable_master = efx_port_dummy_op_void,
+ .irq_test_generate = efx_ef10_irq_test_generate,
+ .irq_disable_non_ev = efx_port_dummy_op_void,
+ .irq_handle_msi = efx_ef10_msi_interrupt,
+ .tx_probe = efx_ef10_tx_probe,
+ .tx_init = efx_ef10_tx_init,
+ .tx_write = efx_ef10_tx_write,
+ .tx_limit_len = efx_ef10_tx_limit_len,
+ .tx_enqueue = __efx_enqueue_skb,
+ .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
+ .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
+ .rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config,
+ .rx_pull_rss_context_config = efx_mcdi_rx_pull_rss_context_config,
+ .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
+ .rx_probe = efx_mcdi_rx_probe,
+ .rx_init = efx_mcdi_rx_init,
+ .rx_remove = efx_mcdi_rx_remove,
+ .rx_write = efx_ef10_rx_write,
+ .rx_defer_refill = efx_ef10_rx_defer_refill,
+ .rx_packet = __efx_rx_packet,
+ .ev_probe = efx_mcdi_ev_probe,
+ .ev_init = efx_ef10_ev_init,
+ .ev_fini = efx_mcdi_ev_fini,
+ .ev_remove = efx_mcdi_ev_remove,
+ .ev_process = efx_ef10_ev_process,
+ .ev_read_ack = efx_ef10_ev_read_ack,
+ .ev_test_generate = efx_ef10_ev_test_generate,
+ .filter_table_probe = efx_ef10_filter_table_probe,
+ .filter_table_restore = efx_mcdi_filter_table_restore,
+ .filter_table_remove = efx_ef10_filter_table_remove,
+ .filter_insert = efx_mcdi_filter_insert,
+ .filter_remove_safe = efx_mcdi_filter_remove_safe,
+ .filter_get_safe = efx_mcdi_filter_get_safe,
+ .filter_clear_rx = efx_mcdi_filter_clear_rx,
+ .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
+#ifdef CONFIG_RFS_ACCEL
+ .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
+#endif
+#ifdef CONFIG_SFC_MTD
+ .mtd_probe = efx_ef10_mtd_probe,
+ .mtd_rename = efx_mcdi_mtd_rename,
+ .mtd_read = efx_mcdi_mtd_read,
+ .mtd_erase = efx_mcdi_mtd_erase,
+ .mtd_write = efx_mcdi_mtd_write,
+ .mtd_sync = efx_mcdi_mtd_sync,
+#endif
+ .ptp_write_host_time = efx_ef10_ptp_write_host_time,
+ .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
+ .ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
+ .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
+ .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
+ .udp_tnl_push_ports = efx_ef10_udp_tnl_push_ports,
+ .udp_tnl_has_port = efx_ef10_udp_tnl_has_port,
+#ifdef CONFIG_SFC_SRIOV
+ /* currently set to the VF versions of these functions
+ * because SRIOV will be reimplemented later.
+ */
+ .vswitching_probe = efx_ef10_vswitching_probe_vf,
+ .vswitching_restore = efx_ef10_vswitching_restore_vf,
+ .vswitching_remove = efx_ef10_vswitching_remove_vf,
+#endif
+ .get_mac_address = efx_ef10_get_mac_address_pf,
+ .set_mac_address = efx_ef10_set_mac_address,
+ .tso_versions = efx_ef10_tso_versions,
+
+ .get_phys_port_id = efx_ef10_get_phys_port_id,
+ .revision = EFX_REV_X4,
+ .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
+ .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
+ .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
+ .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
+ .can_rx_scatter = true,
+ .always_rx_scatter = true,
+ .option_descriptors = true,
+ .min_interrupt_mode = EFX_INT_MODE_MSIX,
+ .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
+ .offload_features = EF10_OFFLOAD_FEATURES,
+ .mcdi_max_ver = 2,
+ .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
+ .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
+ 1 << HWTSTAMP_FILTER_ALL,
+ .check_caps = ef10_check_caps,
+ .print_additional_fwver = efx_ef10_print_additional_fwver,
+ .sensor_event = efx_mcdi_sensor_event,
+ .rx_recycle_ring_size = efx_ef10_recycle_ring_size,
+};
+
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 6f1a01ded7d4..36b3b57e2055 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -821,6 +821,10 @@ static const struct pci_device_id efx_pci_table[] = {
.driver_data = (unsigned long) &efx_hunt_a0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1b03), /* SFC9250 VF */
.driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0c03), /* X4 PF (FF/LL) */
+ .driver_data = (unsigned long)&efx_x4_nic_type},
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x2c03), /* X4 PF (FF only) */
+ .driver_data = (unsigned long)&efx_x4_nic_type},
{0} /* end of list */
};
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 848b1923133a..bb1930818beb 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -230,11 +230,6 @@ static int efx_ethtool_get_ts_info(struct net_device *net_dev,
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
- /* Software capabilities */
- ts_info->so_timestamping = (SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE);
- ts_info->phc_index = -1;
-
efx_ptp_get_ts_info(efx, ts_info);
return 0;
}
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 1db64fc6e909..9fa5c4c713ab 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -211,4 +211,6 @@ int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
extern const struct efx_nic_type efx_hunt_a0_nic_type;
extern const struct efx_nic_type efx_hunt_a0_vf_nic_type;
+extern const struct efx_nic_type efx_x4_nic_type;
+
#endif /* EFX_NIC_H */
diff --git a/drivers/net/ethernet/sfc/nic_common.h b/drivers/net/ethernet/sfc/nic_common.h
index 466df5348b29..7ec4ac7b7ff5 100644
--- a/drivers/net/ethernet/sfc/nic_common.h
+++ b/drivers/net/ethernet/sfc/nic_common.h
@@ -21,6 +21,7 @@ enum {
*/
EFX_REV_HUNT_A0 = 4,
EFX_REV_EF100 = 5,
+ EFX_REV_X4 = 6,
};
static inline int efx_nic_rev(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/siena/ethtool.c b/drivers/net/ethernet/sfc/siena/ethtool.c
index 88ddc226b012..c5ad84db9613 100644
--- a/drivers/net/ethernet/sfc/siena/ethtool.c
+++ b/drivers/net/ethernet/sfc/siena/ethtool.c
@@ -230,11 +230,6 @@ static int efx_ethtool_get_ts_info(struct net_device *net_dev,
{
struct efx_nic *efx = netdev_priv(net_dev);
- /* Software capabilities */
- ts_info->so_timestamping = (SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE);
- ts_info->phc_index = -1;
-
efx_siena_ptp_get_ts_info(efx, ts_info);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 31c387cc5f26..a1858f083eef 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -58,10 +58,6 @@ static void dwmac4_core_init(struct mac_device_info *hw,
if (hw->pcs)
value |= GMAC_PCS_IRQ_DEFAULT;
- /* Enable FPE interrupt */
- if ((GMAC_HW_FEAT_FPESEL & readl(ioaddr + GMAC_HW_FEATURE3)) >> 26)
- value |= GMAC_INT_FPE_EN;
-
writel(value, ioaddr + GMAC_INT_EN);
if (GMAC_INT_DEFAULT_ENABLE & GMAC_INT_TSIE)
@@ -1268,6 +1264,9 @@ const struct stmmac_ops dwmac410_ops = {
.fpe_configure = dwmac5_fpe_configure,
.fpe_send_mpacket = dwmac5_fpe_send_mpacket,
.fpe_irq_status = dwmac5_fpe_irq_status,
+ .fpe_get_add_frag_size = dwmac5_fpe_get_add_frag_size,
+ .fpe_set_add_frag_size = dwmac5_fpe_set_add_frag_size,
+ .fpe_map_preemption_class = dwmac5_fpe_map_preemption_class,
.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
@@ -1320,6 +1319,9 @@ const struct stmmac_ops dwmac510_ops = {
.fpe_configure = dwmac5_fpe_configure,
.fpe_send_mpacket = dwmac5_fpe_send_mpacket,
.fpe_irq_status = dwmac5_fpe_irq_status,
+ .fpe_get_add_frag_size = dwmac5_fpe_get_add_frag_size,
+ .fpe_set_add_frag_size = dwmac5_fpe_set_add_frag_size,
+ .fpe_map_preemption_class = dwmac5_fpe_map_preemption_class,
.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
index e02cebc3f1b7..08add508db84 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
@@ -575,11 +575,11 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
u32 num_txq, u32 num_rxq,
- bool enable)
+ bool tx_enable, bool pmac_enable)
{
u32 value;
- if (enable) {
+ if (tx_enable) {
cfg->fpe_csr = EFPE;
value = readl(ioaddr + GMAC_RXQ_CTRL1);
value &= ~GMAC_RXQCTRL_FPRQ;
@@ -589,6 +589,21 @@ void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
cfg->fpe_csr = 0;
}
writel(cfg->fpe_csr, ioaddr + MAC_FPE_CTRL_STS);
+
+ value = readl(ioaddr + GMAC_INT_EN);
+
+ if (pmac_enable) {
+ if (!(value & GMAC_INT_FPE_EN)) {
+ /* Dummy read to clear any pending masked interrupts */
+ readl(ioaddr + MAC_FPE_CTRL_STS);
+
+ value |= GMAC_INT_FPE_EN;
+ }
+ } else {
+ value &= ~GMAC_INT_FPE_EN;
+ }
+
+ writel(value, ioaddr + GMAC_INT_EN);
}
int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev)
@@ -605,22 +620,22 @@ int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev)
if (value & TRSP) {
status |= FPE_EVENT_TRSP;
- netdev_info(dev, "FPE: Respond mPacket is transmitted\n");
+ netdev_dbg(dev, "FPE: Respond mPacket is transmitted\n");
}
if (value & TVER) {
status |= FPE_EVENT_TVER;
- netdev_info(dev, "FPE: Verify mPacket is transmitted\n");
+ netdev_dbg(dev, "FPE: Verify mPacket is transmitted\n");
}
if (value & RRSP) {
status |= FPE_EVENT_RRSP;
- netdev_info(dev, "FPE: Respond mPacket is received\n");
+ netdev_dbg(dev, "FPE: Respond mPacket is received\n");
}
if (value & RVER) {
status |= FPE_EVENT_RVER;
- netdev_info(dev, "FPE: Verify mPacket is received\n");
+ netdev_dbg(dev, "FPE: Verify mPacket is received\n");
}
return status;
@@ -638,3 +653,72 @@ void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
writel(value, ioaddr + MAC_FPE_CTRL_STS);
}
+
+int dwmac5_fpe_get_add_frag_size(const void __iomem *ioaddr)
+{
+ return FIELD_GET(DWMAC5_ADD_FRAG_SZ, readl(ioaddr + MTL_FPE_CTRL_STS));
+}
+
+void dwmac5_fpe_set_add_frag_size(void __iomem *ioaddr, u32 add_frag_size)
+{
+ u32 value;
+
+ value = readl(ioaddr + MTL_FPE_CTRL_STS);
+ writel(u32_replace_bits(value, add_frag_size, DWMAC5_ADD_FRAG_SZ),
+ ioaddr + MTL_FPE_CTRL_STS);
+}
+
+#define ALG_ERR_MSG "TX algorithm SP is not suitable for one-to-many mapping"
+#define WEIGHT_ERR_MSG "TXQ weight %u differs across other TXQs in TC: [%u]"
+
+int dwmac5_fpe_map_preemption_class(struct net_device *ndev,
+ struct netlink_ext_ack *extack, u32 pclass)
+{
+ u32 val, offset, count, queue_weight, preemptible_txqs = 0;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ u32 num_tc = ndev->num_tc;
+
+ if (!pclass)
+ goto update_mapping;
+
+ /* DWMAC CORE4+ can not program TC:TXQ mapping to hardware.
+ *
+ * Synopsys Databook:
+ * "The number of Tx DMA channels is equal to the number of Tx queues,
+ * and is direct one-to-one mapping."
+ */
+ for (u32 tc = 0; tc < num_tc; tc++) {
+ count = ndev->tc_to_txq[tc].count;
+ offset = ndev->tc_to_txq[tc].offset;
+
+ if (pclass & BIT(tc))
+ preemptible_txqs |= GENMASK(offset + count - 1, offset);
+
+ /* This is 1:1 mapping, go to next TC */
+ if (count == 1)
+ continue;
+
+ if (priv->plat->tx_sched_algorithm == MTL_TX_ALGORITHM_SP) {
+ NL_SET_ERR_MSG_MOD(extack, ALG_ERR_MSG);
+ return -EINVAL;
+ }
+
+ queue_weight = priv->plat->tx_queues_cfg[offset].weight;
+
+ for (u32 i = 1; i < count; i++) {
+ if (priv->plat->tx_queues_cfg[offset + i].weight !=
+ queue_weight) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, WEIGHT_ERR_MSG,
+ queue_weight, tc);
+ return -EINVAL;
+ }
+ }
+ }
+
+update_mapping:
+ val = readl(priv->ioaddr + MTL_FPE_CTRL_STS);
+ writel(u32_replace_bits(val, preemptible_txqs, DWMAC5_PREEMPTION_CLASS),
+ priv->ioaddr + MTL_FPE_CTRL_STS);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
index bf33a51d229e..6c6eb6790e83 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
@@ -39,6 +39,12 @@
#define MAC_PPSx_INTERVAL(x) (0x00000b88 + ((x) * 0x10))
#define MAC_PPSx_WIDTH(x) (0x00000b8c + ((x) * 0x10))
+#define MTL_FPE_CTRL_STS 0x00000c90
+/* Preemption Classification */
+#define DWMAC5_PREEMPTION_CLASS GENMASK(15, 8)
+/* Additional Fragment Size of preempted frames */
+#define DWMAC5_ADD_FRAG_SZ GENMASK(1, 0)
+
#define MTL_RXP_CONTROL_STATUS 0x00000ca0
#define RXPI BIT(31)
#define NPE GENMASK(23, 16)
@@ -104,10 +110,14 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
u32 sub_second_inc, u32 systime_flags);
void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
u32 num_txq, u32 num_rxq,
- bool enable);
+ bool tx_enable, bool pmac_enable);
void dwmac5_fpe_send_mpacket(void __iomem *ioaddr,
struct stmmac_fpe_cfg *cfg,
enum stmmac_mpacket_type type);
int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev);
+int dwmac5_fpe_get_add_frag_size(const void __iomem *ioaddr);
+void dwmac5_fpe_set_add_frag_size(void __iomem *ioaddr, u32 add_frag_size);
+int dwmac5_fpe_map_preemption_class(struct net_device *ndev,
+ struct netlink_ext_ack *extack, u32 pclass);
#endif /* __DWMAC5_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index cbf2dd976ab1..f519d43738b0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -1504,13 +1504,14 @@ static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en,
writel(value, ioaddr + XGMAC_RX_CONFIG);
}
-static void dwxgmac3_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
- u32 num_txq,
- u32 num_rxq, bool enable)
+static void dwxgmac3_fpe_configure(void __iomem *ioaddr,
+ struct stmmac_fpe_cfg *cfg,
+ u32 num_txq, u32 num_rxq,
+ bool tx_enable, bool pmac_enable)
{
u32 value;
- if (!enable) {
+ if (!tx_enable) {
value = readl(ioaddr + XGMAC_FPE_CTRL_STS);
value &= ~XGMAC_EFPE;
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index 29367105df54..88cce28b2f98 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -171,7 +171,7 @@ static const struct stmmac_hwif_entry {
.mac = &dwmac4_ops,
.hwtimestamp = &stmmac_ptp,
.mode = NULL,
- .tc = &dwmac510_tc_ops,
+ .tc = &dwmac4_tc_ops,
.mmc = &dwmac_mmc_ops,
.est = &dwmac510_est_ops,
.setup = dwmac4_setup,
@@ -252,7 +252,7 @@ static const struct stmmac_hwif_entry {
.mac = &dwxgmac210_ops,
.hwtimestamp = &stmmac_ptp,
.mode = NULL,
- .tc = &dwmac510_tc_ops,
+ .tc = &dwxgmac_tc_ops,
.mmc = &dwxgmac_mmc_ops,
.est = &dwmac510_est_ops,
.setup = dwxgmac2_setup,
@@ -273,7 +273,7 @@ static const struct stmmac_hwif_entry {
.mac = &dwxlgmac2_ops,
.hwtimestamp = &stmmac_ptp,
.mode = NULL,
- .tc = &dwmac510_tc_ops,
+ .tc = &dwxgmac_tc_ops,
.mmc = &dwxgmac_mmc_ops,
.est = &dwmac510_est_ops,
.setup = dwxlgmac2_setup,
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 7e90f34b8c88..d5a9f01ecac5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -7,6 +7,7 @@
#include <linux/netdevice.h>
#include <linux/stmmac.h>
+#include <net/pkt_cls.h>
#define stmmac_do_void_callback(__priv, __module, __cname, __arg0, __args...) \
({ \
@@ -28,6 +29,8 @@
struct stmmac_extra_stats;
struct stmmac_priv;
struct stmmac_safety_stats;
+struct stmmac_fpe_cfg;
+enum stmmac_mpacket_type;
struct dma_desc;
struct dma_extended_desc;
struct dma_edesc;
@@ -419,11 +422,16 @@ struct stmmac_ops {
void (*set_arp_offload)(struct mac_device_info *hw, bool en, u32 addr);
void (*fpe_configure)(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
u32 num_txq, u32 num_rxq,
- bool enable);
+ bool tx_enable, bool pmac_enable);
void (*fpe_send_mpacket)(void __iomem *ioaddr,
struct stmmac_fpe_cfg *cfg,
enum stmmac_mpacket_type type);
int (*fpe_irq_status)(void __iomem *ioaddr, struct net_device *dev);
+ int (*fpe_get_add_frag_size)(const void __iomem *ioaddr);
+ void (*fpe_set_add_frag_size)(void __iomem *ioaddr, u32 add_frag_size);
+ int (*fpe_map_preemption_class)(struct net_device *ndev,
+ struct netlink_ext_ack *extack,
+ u32 pclass);
};
#define stmmac_core_init(__priv, __args...) \
@@ -528,6 +536,12 @@ struct stmmac_ops {
stmmac_do_void_callback(__priv, mac, fpe_send_mpacket, __args)
#define stmmac_fpe_irq_status(__priv, __args...) \
stmmac_do_callback(__priv, mac, fpe_irq_status, __args)
+#define stmmac_fpe_get_add_frag_size(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, fpe_get_add_frag_size, __args)
+#define stmmac_fpe_set_add_frag_size(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, fpe_set_add_frag_size, __args)
+#define stmmac_fpe_map_preemption_class(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, fpe_map_preemption_class, __args)
/* PTP and HW Timer helpers */
struct stmmac_hwtimestamp {
@@ -615,6 +629,8 @@ struct stmmac_tc_ops {
struct tc_etf_qopt_offload *qopt);
int (*query_caps)(struct stmmac_priv *priv,
struct tc_query_caps_base *base);
+ int (*setup_mqprio)(struct stmmac_priv *priv,
+ struct tc_mqprio_qopt_offload *qopt);
};
#define stmmac_tc_init(__priv, __args...) \
@@ -631,6 +647,8 @@ struct stmmac_tc_ops {
stmmac_do_callback(__priv, tc, setup_etf, __args)
#define stmmac_tc_query_caps(__priv, __args...) \
stmmac_do_callback(__priv, tc, query_caps, __args)
+#define stmmac_tc_setup_mqprio(__priv, __args...) \
+ stmmac_do_callback(__priv, tc, setup_mqprio, __args)
struct stmmac_counters;
@@ -674,7 +692,9 @@ extern const struct stmmac_dma_ops dwmac4_dma_ops;
extern const struct stmmac_ops dwmac410_ops;
extern const struct stmmac_dma_ops dwmac410_dma_ops;
extern const struct stmmac_ops dwmac510_ops;
+extern const struct stmmac_tc_ops dwmac4_tc_ops;
extern const struct stmmac_tc_ops dwmac510_tc_ops;
+extern const struct stmmac_tc_ops dwxgmac_tc_ops;
extern const struct stmmac_ops dwxgmac210_ops;
extern const struct stmmac_ops dwxlgmac2_ops;
extern const struct stmmac_dma_ops dwxgmac210_dma_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index b23b920eedb1..ea135203ff2e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -146,6 +146,32 @@ struct stmmac_channel {
u32 index;
};
+/* FPE link-partner hand-shaking mPacket type */
+enum stmmac_mpacket_type {
+ MPACKET_VERIFY = 0,
+ MPACKET_RESPONSE = 1,
+};
+
+#define STMMAC_FPE_MM_MAX_VERIFY_RETRIES 3
+#define STMMAC_FPE_MM_MAX_VERIFY_TIME_MS 128
+
+struct stmmac_fpe_cfg {
+ /* Serialize access to MAC Merge state between ethtool requests
+ * and link state updates.
+ */
+ spinlock_t lock;
+
+ u32 fpe_csr; /* MAC_FPE_CTRL_STS reg cache */
+
+ enum ethtool_mm_verify_status status;
+ struct timer_list verify_timer;
+ bool verify_enabled;
+ int verify_retries;
+ bool pmac_enabled;
+ u32 verify_time;
+ bool tx_enabled;
+};
+
struct stmmac_tc_entry {
bool in_use;
bool in_hw;
@@ -339,11 +365,8 @@ struct stmmac_priv {
struct workqueue_struct *wq;
struct work_struct service_task;
- /* Workqueue for handling FPE hand-shaking */
- unsigned long fpe_task_state;
- struct workqueue_struct *fpe_wq;
- struct work_struct fpe_task;
- char wq_name[IFNAMSIZ + 4];
+ /* Frame Preemption feature (FPE) */
+ struct stmmac_fpe_cfg fpe_cfg;
/* TC Handling */
unsigned int tc_entries_max;
@@ -397,7 +420,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv);
int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt);
int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size);
int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled);
-void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable);
+void stmmac_fpe_apply(struct stmmac_priv *priv);
static inline bool stmmac_xdp_is_enabled(struct stmmac_priv *priv)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 220c582904f4..2a37592a6281 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -19,6 +19,7 @@
#include "stmmac.h"
#include "dwmac_dma.h"
#include "dwxgmac2.h"
+#include "dwmac5.h"
#define REG_SPACE_SIZE 0x1060
#define GMAC4_REG_SPACE_SIZE 0x116C
@@ -1200,13 +1201,13 @@ static int stmmac_get_ts_info(struct net_device *dev,
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (priv->ptp_clock)
info->phc_index = ptp_clock_index(priv->ptp_clock);
+ else
+ info->phc_index = 0;
info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
@@ -1263,6 +1264,98 @@ static int stmmac_set_tunable(struct net_device *dev,
return ret;
}
+static int stmmac_get_mm(struct net_device *ndev,
+ struct ethtool_mm_state *state)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ unsigned long flags;
+ u32 frag_size;
+
+ if (!priv->dma_cap.fpesel)
+ return -EOPNOTSUPP;
+
+ spin_lock_irqsave(&priv->fpe_cfg.lock, flags);
+
+ state->max_verify_time = STMMAC_FPE_MM_MAX_VERIFY_TIME_MS;
+ state->verify_enabled = priv->fpe_cfg.verify_enabled;
+ state->pmac_enabled = priv->fpe_cfg.pmac_enabled;
+ state->verify_time = priv->fpe_cfg.verify_time;
+ state->tx_enabled = priv->fpe_cfg.tx_enabled;
+ state->verify_status = priv->fpe_cfg.status;
+ state->rx_min_frag_size = ETH_ZLEN;
+
+ /* FPE active if common tx_enabled and
+ * (verification success or disabled(forced))
+ */
+ if (state->tx_enabled &&
+ (state->verify_status == ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED ||
+ state->verify_status == ETHTOOL_MM_VERIFY_STATUS_DISABLED))
+ state->tx_active = true;
+ else
+ state->tx_active = false;
+
+ frag_size = stmmac_fpe_get_add_frag_size(priv, priv->ioaddr);
+ state->tx_min_frag_size = ethtool_mm_frag_size_add_to_min(frag_size);
+
+ spin_unlock_irqrestore(&priv->fpe_cfg.lock, flags);
+
+ return 0;
+}
+
+static int stmmac_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
+ unsigned long flags;
+ u32 frag_size;
+ int err;
+
+ err = ethtool_mm_frag_size_min_to_add(cfg->tx_min_frag_size,
+ &frag_size, extack);
+ if (err)
+ return err;
+
+ /* Wait for the verification that's currently in progress to finish */
+ timer_shutdown_sync(&fpe_cfg->verify_timer);
+
+ spin_lock_irqsave(&fpe_cfg->lock, flags);
+
+ fpe_cfg->verify_enabled = cfg->verify_enabled;
+ fpe_cfg->pmac_enabled = cfg->pmac_enabled;
+ fpe_cfg->verify_time = cfg->verify_time;
+ fpe_cfg->tx_enabled = cfg->tx_enabled;
+
+ if (!cfg->verify_enabled)
+ fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_DISABLED;
+
+ stmmac_fpe_set_add_frag_size(priv, priv->ioaddr, frag_size);
+ stmmac_fpe_apply(priv);
+
+ spin_unlock_irqrestore(&fpe_cfg->lock, flags);
+
+ return 0;
+}
+
+static void stmmac_get_mm_stats(struct net_device *ndev,
+ struct ethtool_mm_stats *s)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct stmmac_counters *mmc = &priv->mmc;
+
+ if (!priv->dma_cap.rmon)
+ return;
+
+ stmmac_mmc_read(priv, priv->mmcaddr, mmc);
+
+ s->MACMergeFrameAssErrorCount = mmc->mmc_rx_packet_assembly_err_cntr;
+ s->MACMergeFrameAssOkCount = mmc->mmc_rx_packet_assembly_ok_cntr;
+ s->MACMergeFrameSmdErrorCount = mmc->mmc_rx_packet_smd_err_cntr;
+ s->MACMergeFragCountRx = mmc->mmc_rx_fpe_fragment_cntr;
+ s->MACMergeFragCountTx = mmc->mmc_tx_fpe_fragment_cntr;
+ s->MACMergeHoldCount = mmc->mmc_tx_hold_req_cntr;
+}
+
static const struct ethtool_ops stmmac_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
@@ -1301,6 +1394,9 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
.set_tunable = stmmac_set_tunable,
.get_link_ksettings = stmmac_ethtool_get_link_ksettings,
.set_link_ksettings = stmmac_ethtool_set_link_ksettings,
+ .get_mm = stmmac_get_mm,
+ .set_mm = stmmac_set_mm,
+ .get_mm_stats = stmmac_get_mm_stats,
};
void stmmac_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d9fca8d1227c..d3895d7eecfc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -968,18 +968,31 @@ static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
{
- struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
- enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
- enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
- bool *hs_enable = &fpe_cfg->hs_enable;
+ struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
+ unsigned long flags;
- if (is_up && *hs_enable) {
- stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
- MPACKET_VERIFY);
+ timer_shutdown_sync(&fpe_cfg->verify_timer);
+
+ spin_lock_irqsave(&fpe_cfg->lock, flags);
+
+ if (is_up && fpe_cfg->pmac_enabled) {
+ /* VERIFY process requires pmac enabled when NIC comes up */
+ stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use,
+ false, true);
+
+ /* New link => maybe new partner => new verification process */
+ stmmac_fpe_apply(priv);
} else {
- *lo_state = FPE_STATE_OFF;
- *lp_state = FPE_STATE_OFF;
+ /* No link => turn off EFPE */
+ stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use,
+ false, false);
}
+
+ spin_unlock_irqrestore(&fpe_cfg->lock, flags);
}
static void stmmac_mac_link_down(struct phylink_config *config,
@@ -3358,27 +3371,6 @@ static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
}
}
-static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
-{
- char *name;
-
- clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
- clear_bit(__FPE_REMOVING, &priv->fpe_task_state);
-
- name = priv->wq_name;
- sprintf(name, "%s-fpe", priv->dev->name);
-
- priv->fpe_wq = create_singlethread_workqueue(name);
- if (!priv->fpe_wq) {
- netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
-
- return -ENOMEM;
- }
- netdev_info(priv->dev, "FPE workqueue start");
-
- return 0;
-}
-
/**
* stmmac_hw_setup - setup mac in a usable state.
* @dev : pointer to the device structure.
@@ -3533,13 +3525,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
stmmac_set_hw_vlan_mode(priv, priv->hw);
- if (priv->dma_cap.fpesel) {
- stmmac_fpe_start_wq(priv);
-
- if (priv->plat->fpe_cfg->enable)
- stmmac_fpe_handshake(priv, true);
- }
-
return 0;
}
@@ -4036,18 +4021,6 @@ static int stmmac_open(struct net_device *dev)
return ret;
}
-static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
-{
- set_bit(__FPE_REMOVING, &priv->fpe_task_state);
-
- if (priv->fpe_wq) {
- destroy_workqueue(priv->fpe_wq);
- priv->fpe_wq = NULL;
- }
-
- netdev_info(priv->dev, "FPE workqueue stop");
-}
-
/**
* stmmac_release - close entry point of the driver
* @dev : device pointer.
@@ -4095,10 +4068,10 @@ static int stmmac_release(struct net_device *dev)
stmmac_release_ptp(priv);
- pm_runtime_put(priv->device);
-
if (priv->dma_cap.fpesel)
- stmmac_fpe_stop_wq(priv);
+ timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
+
+ pm_runtime_put(priv->device);
return 0;
}
@@ -5982,45 +5955,31 @@ static int stmmac_set_features(struct net_device *netdev,
static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
{
- struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
- enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
- enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
- bool *hs_enable = &fpe_cfg->hs_enable;
-
- if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
- return;
+ struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
- /* If LP has sent verify mPacket, LP is FPE capable */
- if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
- if (*lp_state < FPE_STATE_CAPABLE)
- *lp_state = FPE_STATE_CAPABLE;
+ /* This is interrupt context, just spin_lock() */
+ spin_lock(&fpe_cfg->lock);
- /* If user has requested FPE enable, quickly response */
- if (*hs_enable)
- stmmac_fpe_send_mpacket(priv, priv->ioaddr,
- fpe_cfg,
- MPACKET_RESPONSE);
- }
+ if (!fpe_cfg->pmac_enabled || status == FPE_EVENT_UNKNOWN)
+ goto unlock_out;
- /* If Local has sent verify mPacket, Local is FPE capable */
- if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
- if (*lo_state < FPE_STATE_CAPABLE)
- *lo_state = FPE_STATE_CAPABLE;
- }
+ /* LP has sent verify mPacket */
+ if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER)
+ stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
+ MPACKET_RESPONSE);
- /* If LP has sent response mPacket, LP is entering FPE ON */
- if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
- *lp_state = FPE_STATE_ENTERING_ON;
+ /* Local has sent verify mPacket */
+ if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER &&
+ fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED)
+ fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_VERIFYING;
- /* If Local has sent response mPacket, Local is entering FPE ON */
- if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
- *lo_state = FPE_STATE_ENTERING_ON;
+ /* LP has sent response mPacket */
+ if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP &&
+ fpe_cfg->status == ETHTOOL_MM_VERIFY_STATUS_VERIFYING)
+ fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED;
- if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
- !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
- priv->fpe_wq) {
- queue_work(priv->fpe_wq, &priv->fpe_task);
- }
+unlock_out:
+ spin_unlock(&fpe_cfg->lock);
}
static void stmmac_common_interrupt(struct stmmac_priv *priv)
@@ -6257,6 +6216,8 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
switch (type) {
case TC_QUERY_CAPS:
return stmmac_tc_query_caps(priv, priv, type_data);
+ case TC_SETUP_QDISC_MQPRIO:
+ return stmmac_tc_setup_mqprio(priv, priv, type_data);
case TC_SETUP_BLOCK:
return flow_block_cb_setup_simple(type_data,
&stmmac_block_cb_list,
@@ -7376,68 +7337,87 @@ int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
return ret;
}
-#define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
-static void stmmac_fpe_lp_task(struct work_struct *work)
+/**
+ * stmmac_fpe_verify_timer - Timer for MAC Merge verification
+ * @t: timer_list struct containing private info
+ *
+ * Verify the MAC Merge capability in the local TX direction, by
+ * transmitting Verify mPackets up to 3 times. Wait until link
+ * partner responds with a Response mPacket, otherwise fail.
+ */
+static void stmmac_fpe_verify_timer(struct timer_list *t)
{
- struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
- fpe_task);
- struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
- enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
- enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
- bool *hs_enable = &fpe_cfg->hs_enable;
- bool *enable = &fpe_cfg->enable;
- int retries = 20;
-
- while (retries-- > 0) {
- /* Bail out immediately if FPE handshake is OFF */
- if (*lo_state == FPE_STATE_OFF || !*hs_enable)
- break;
-
- if (*lo_state == FPE_STATE_ENTERING_ON &&
- *lp_state == FPE_STATE_ENTERING_ON) {
- stmmac_fpe_configure(priv, priv->ioaddr,
- fpe_cfg,
- priv->plat->tx_queues_to_use,
- priv->plat->rx_queues_to_use,
- *enable);
-
- netdev_info(priv->dev, "configured FPE\n");
+ struct stmmac_fpe_cfg *fpe_cfg = from_timer(fpe_cfg, t, verify_timer);
+ struct stmmac_priv *priv = container_of(fpe_cfg, struct stmmac_priv,
+ fpe_cfg);
+ unsigned long flags;
+ bool rearm = false;
- *lo_state = FPE_STATE_ON;
- *lp_state = FPE_STATE_ON;
- netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
- break;
- }
+ spin_lock_irqsave(&fpe_cfg->lock, flags);
- if ((*lo_state == FPE_STATE_CAPABLE ||
- *lo_state == FPE_STATE_ENTERING_ON) &&
- *lp_state != FPE_STATE_ON) {
- netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
- *lo_state, *lp_state);
+ switch (fpe_cfg->status) {
+ case ETHTOOL_MM_VERIFY_STATUS_INITIAL:
+ case ETHTOOL_MM_VERIFY_STATUS_VERIFYING:
+ if (fpe_cfg->verify_retries != 0) {
stmmac_fpe_send_mpacket(priv, priv->ioaddr,
- fpe_cfg,
- MPACKET_VERIFY);
+ fpe_cfg, MPACKET_VERIFY);
+ rearm = true;
+ } else {
+ fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_FAILED;
}
- /* Sleep then retry */
- msleep(500);
+
+ fpe_cfg->verify_retries--;
+ break;
+
+ case ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED:
+ stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use,
+ true, true);
+ break;
+
+ default:
+ break;
}
- clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
+ if (rearm) {
+ mod_timer(&fpe_cfg->verify_timer,
+ jiffies + msecs_to_jiffies(fpe_cfg->verify_time));
+ }
+
+ spin_unlock_irqrestore(&fpe_cfg->lock, flags);
}
-void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
+static void stmmac_fpe_verify_timer_arm(struct stmmac_fpe_cfg *fpe_cfg)
{
- if (priv->plat->fpe_cfg->hs_enable != enable) {
- if (enable) {
- stmmac_fpe_send_mpacket(priv, priv->ioaddr,
- priv->plat->fpe_cfg,
- MPACKET_VERIFY);
- } else {
- priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
- priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
- }
+ if (fpe_cfg->pmac_enabled && fpe_cfg->tx_enabled &&
+ fpe_cfg->verify_enabled &&
+ fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_FAILED &&
+ fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED) {
+ timer_setup(&fpe_cfg->verify_timer, stmmac_fpe_verify_timer, 0);
+ mod_timer(&fpe_cfg->verify_timer, jiffies);
+ }
+}
+
+void stmmac_fpe_apply(struct stmmac_priv *priv)
+{
+ struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
+
+ /* If verification is disabled, configure FPE right away.
+ * Otherwise let the timer code do it.
+ */
+ if (!fpe_cfg->verify_enabled) {
+ stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use,
+ fpe_cfg->tx_enabled,
+ fpe_cfg->pmac_enabled);
+ } else {
+ fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_INITIAL;
+ fpe_cfg->verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES;
- priv->plat->fpe_cfg->hs_enable = enable;
+ if (netif_running(priv->dev))
+ stmmac_fpe_verify_timer_arm(fpe_cfg);
}
}
@@ -7555,9 +7535,6 @@ int stmmac_dvr_probe(struct device *device,
INIT_WORK(&priv->service_task, stmmac_service_task);
- /* Initialize Link Partner FPE workqueue */
- INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
-
/* Override with kernel parameters if supplied XXX CRS XXX
* this needs to have multiple instances
*/
@@ -7722,6 +7699,12 @@ int stmmac_dvr_probe(struct device *device,
mutex_init(&priv->lock);
+ priv->fpe_cfg.verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES;
+ priv->fpe_cfg.verify_time = STMMAC_FPE_MM_MAX_VERIFY_TIME_MS;
+ priv->fpe_cfg.status = ETHTOOL_MM_VERIFY_STATUS_DISABLED;
+ timer_setup(&priv->fpe_cfg.verify_timer, stmmac_fpe_verify_timer, 0);
+ spin_lock_init(&priv->fpe_cfg.lock);
+
/* If a specific clk_csr value is passed from the platform
* this means that the CSR Clock Range selection cannot be
* changed at run-time and it is fixed. Viceversa the driver'll try to
@@ -7895,16 +7878,8 @@ int stmmac_suspend(struct device *dev)
}
rtnl_unlock();
- if (priv->dma_cap.fpesel) {
- /* Disable FPE */
- stmmac_fpe_configure(priv, priv->ioaddr,
- priv->plat->fpe_cfg,
- priv->plat->tx_queues_to_use,
- priv->plat->rx_queues_to_use, false);
-
- stmmac_fpe_handshake(priv, false);
- stmmac_fpe_stop_wq(priv);
- }
+ if (priv->dma_cap.fpesel)
+ timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
priv->speed = SPEED_UNKNOWN;
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 996f2bcd07a2..832998bc020b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -282,16 +282,6 @@ static int tc_init(struct stmmac_priv *priv)
if (ret)
return -ENOMEM;
- if (!priv->plat->fpe_cfg) {
- priv->plat->fpe_cfg = devm_kzalloc(priv->device,
- sizeof(*priv->plat->fpe_cfg),
- GFP_KERNEL);
- if (!priv->plat->fpe_cfg)
- return -ENOMEM;
- } else {
- memset(priv->plat->fpe_cfg, 0, sizeof(*priv->plat->fpe_cfg));
- }
-
/* Fail silently as we can still use remaining features, e.g. CBS */
if (!dma_cap->frpsel)
return 0;
@@ -941,9 +931,9 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
struct tc_taprio_qopt_offload *qopt)
{
u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
+ struct netlink_ext_ack *extack = qopt->mqprio.extack;
struct timespec64 time, current_time, qopt_time;
ktime_t current_time_ns;
- bool fpe = false;
int i, ret = 0;
u64 ctr;
@@ -1028,16 +1018,12 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
switch (qopt->entries[i].command) {
case TC_TAPRIO_CMD_SET_GATES:
- if (fpe)
- return -EINVAL;
break;
case TC_TAPRIO_CMD_SET_AND_HOLD:
gates |= BIT(0);
- fpe = true;
break;
case TC_TAPRIO_CMD_SET_AND_RELEASE:
gates &= ~BIT(0);
- fpe = true;
break;
default:
return -EOPNOTSUPP;
@@ -1068,16 +1054,6 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
tc_taprio_map_maxsdu_txq(priv, qopt);
- if (fpe && !priv->dma_cap.fpesel) {
- mutex_unlock(&priv->est_lock);
- return -EOPNOTSUPP;
- }
-
- /* Actual FPE register configuration will be done after FPE handshake
- * is success.
- */
- priv->plat->fpe_cfg->enable = fpe;
-
ret = stmmac_est_configure(priv, priv, priv->est,
priv->plat->clk_ptp_rate);
mutex_unlock(&priv->est_lock);
@@ -1086,12 +1062,10 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
goto disable;
}
- netdev_info(priv->dev, "configured EST\n");
-
- if (fpe) {
- stmmac_fpe_handshake(priv, true);
- netdev_info(priv->dev, "start FPE handshake\n");
- }
+ ret = stmmac_fpe_map_preemption_class(priv, priv->dev, extack,
+ qopt->mqprio.preemptible_tcs);
+ if (ret)
+ goto disable;
return 0;
@@ -1109,16 +1083,7 @@ disable:
mutex_unlock(&priv->est_lock);
}
- priv->plat->fpe_cfg->enable = false;
- stmmac_fpe_configure(priv, priv->ioaddr,
- priv->plat->fpe_cfg,
- priv->plat->tx_queues_to_use,
- priv->plat->rx_queues_to_use,
- false);
- netdev_info(priv->dev, "disabled FPE\n");
-
- stmmac_fpe_handshake(priv, false);
- netdev_info(priv->dev, "stop FPE handshake\n");
+ stmmac_fpe_map_preemption_class(priv, priv->dev, extack, 0);
return ret;
}
@@ -1174,6 +1139,18 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
return err;
}
+static int tc_setup_taprio_without_fpe(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ if (!qopt->mqprio.preemptible_tcs)
+ return tc_setup_taprio(priv, qopt);
+
+ NL_SET_ERR_MSG_MOD(qopt->mqprio.extack,
+ "taprio with FPE is not implemented for this MAC");
+
+ return -EOPNOTSUPP;
+}
+
static int tc_setup_etf(struct stmmac_priv *priv,
struct tc_etf_qopt_offload *qopt)
{
@@ -1198,6 +1175,13 @@ static int tc_query_caps(struct stmmac_priv *priv,
struct tc_query_caps_base *base)
{
switch (base->type) {
+ case TC_SETUP_QDISC_MQPRIO: {
+ struct tc_mqprio_caps *caps = base->caps;
+
+ caps->validate_queue_counts = true;
+
+ return 0;
+ }
case TC_SETUP_QDISC_TAPRIO: {
struct tc_taprio_caps *caps = base->caps;
@@ -1214,6 +1198,81 @@ static int tc_query_caps(struct stmmac_priv *priv,
}
}
+static void stmmac_reset_tc_mqprio(struct net_device *ndev,
+ struct netlink_ext_ack *extack)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ netdev_reset_tc(ndev);
+ netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
+ stmmac_fpe_map_preemption_class(priv, ndev, extack, 0);
+}
+
+static int tc_setup_dwmac510_mqprio(struct stmmac_priv *priv,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ struct netlink_ext_ack *extack = mqprio->extack;
+ struct tc_mqprio_qopt *qopt = &mqprio->qopt;
+ u32 offset, count, num_stack_tx_queues = 0;
+ struct net_device *ndev = priv->dev;
+ u32 num_tc = qopt->num_tc;
+ int err;
+
+ if (!num_tc) {
+ stmmac_reset_tc_mqprio(ndev, extack);
+ return 0;
+ }
+
+ err = netdev_set_num_tc(ndev, num_tc);
+ if (err)
+ return err;
+
+ for (u32 tc = 0; tc < num_tc; tc++) {
+ offset = qopt->offset[tc];
+ count = qopt->count[tc];
+ num_stack_tx_queues += count;
+
+ err = netdev_set_tc_queue(ndev, tc, count, offset);
+ if (err)
+ goto err_reset_tc;
+ }
+
+ err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
+ if (err)
+ goto err_reset_tc;
+
+ err = stmmac_fpe_map_preemption_class(priv, ndev, extack,
+ mqprio->preemptible_tcs);
+ if (err)
+ goto err_reset_tc;
+
+ return 0;
+
+err_reset_tc:
+ stmmac_reset_tc_mqprio(ndev, extack);
+
+ return err;
+}
+
+static int tc_setup_mqprio_unimplemented(struct stmmac_priv *priv,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ NL_SET_ERR_MSG_MOD(mqprio->extack,
+ "mqprio HW offload is not implemented for this MAC");
+ return -EOPNOTSUPP;
+}
+
+const struct stmmac_tc_ops dwmac4_tc_ops = {
+ .init = tc_init,
+ .setup_cls_u32 = tc_setup_cls_u32,
+ .setup_cbs = tc_setup_cbs,
+ .setup_cls = tc_setup_cls,
+ .setup_taprio = tc_setup_taprio_without_fpe,
+ .setup_etf = tc_setup_etf,
+ .query_caps = tc_query_caps,
+ .setup_mqprio = tc_setup_mqprio_unimplemented,
+};
+
const struct stmmac_tc_ops dwmac510_tc_ops = {
.init = tc_init,
.setup_cls_u32 = tc_setup_cls_u32,
@@ -1222,4 +1281,16 @@ const struct stmmac_tc_ops dwmac510_tc_ops = {
.setup_taprio = tc_setup_taprio,
.setup_etf = tc_setup_etf,
.query_caps = tc_query_caps,
+ .setup_mqprio = tc_setup_dwmac510_mqprio,
+};
+
+const struct stmmac_tc_ops dwxgmac_tc_ops = {
+ .init = tc_init,
+ .setup_cls_u32 = tc_setup_cls_u32,
+ .setup_cbs = tc_setup_cbs,
+ .setup_cls = tc_setup_cls,
+ .setup_taprio = tc_setup_taprio_without_fpe,
+ .setup_etf = tc_setup_etf,
+ .query_caps = tc_query_caps,
+ .setup_mqprio = tc_setup_mqprio_unimplemented,
};
diff --git a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
index 73b6cef10401..b715af21d23a 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
@@ -68,9 +68,13 @@ static int emac_nway_reset(struct net_device *ndev)
static int emac_get_sset_count(struct net_device *ndev, int stringset)
{
+ struct prueth_emac *emac = netdev_priv(ndev);
switch (stringset) {
case ETH_SS_STATS:
- return ICSSG_NUM_ETHTOOL_STATS;
+ if (emac->prueth->pa_stats)
+ return ICSSG_NUM_ETHTOOL_STATS;
+ else
+ return ICSSG_NUM_ETHTOOL_STATS - ICSSG_NUM_PA_STATS;
default:
return -EOPNOTSUPP;
}
@@ -78,6 +82,7 @@ static int emac_get_sset_count(struct net_device *ndev, int stringset)
static void emac_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
{
+ struct prueth_emac *emac = netdev_priv(ndev);
u8 *p = data;
int i;
@@ -86,8 +91,9 @@ static void emac_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
for (i = 0; i < ARRAY_SIZE(icssg_all_miig_stats); i++)
if (!icssg_all_miig_stats[i].standard_stats)
ethtool_puts(&p, icssg_all_miig_stats[i].name);
- for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++)
- ethtool_puts(&p, icssg_all_pa_stats[i].name);
+ if (emac->prueth->pa_stats)
+ for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++)
+ ethtool_puts(&p, icssg_all_pa_stats[i].name);
break;
default:
break;
@@ -106,8 +112,9 @@ static void emac_get_ethtool_stats(struct net_device *ndev,
if (!icssg_all_miig_stats[i].standard_stats)
*(data++) = emac->stats[i];
- for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++)
- *(data++) = emac->pa_stats[i];
+ if (emac->prueth->pa_stats)
+ for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++)
+ *(data++) = emac->pa_stats[i];
}
static int emac_get_ts_info(struct net_device *ndev,
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index becdda143c19..6644203d6bb7 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -1185,7 +1185,7 @@ static int prueth_probe(struct platform_device *pdev)
prueth->pa_stats = syscon_regmap_lookup_by_phandle(np, "ti,pa-stats");
if (IS_ERR(prueth->pa_stats)) {
dev_err(dev, "couldn't get ti,pa-stats syscon regmap\n");
- return -ENODEV;
+ prueth->pa_stats = NULL;
}
if (eth0_node) {
diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.c b/drivers/net/ethernet/ti/icssg/icssg_stats.c
index 06a15c0b2acc..8800bd3a8d07 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_stats.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_stats.c
@@ -42,11 +42,14 @@ void emac_update_hardware_stats(struct prueth_emac *emac)
emac->stats[i] -= tx_pkt_cnt * 8;
}
- for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++) {
- reg = ICSSG_FW_STATS_BASE + icssg_all_pa_stats[i].offset *
- PRUETH_NUM_MACS + slice * sizeof(u32);
- regmap_read(prueth->pa_stats, reg, &val);
- emac->pa_stats[i] += val;
+ if (prueth->pa_stats) {
+ for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++) {
+ reg = ICSSG_FW_STATS_BASE +
+ icssg_all_pa_stats[i].offset *
+ PRUETH_NUM_MACS + slice * sizeof(u32);
+ regmap_read(prueth->pa_stats, reg, &val);
+ emac->pa_stats[i] += val;
+ }
}
}
@@ -70,9 +73,11 @@ int emac_get_stat_by_name(struct prueth_emac *emac, char *stat_name)
return emac->stats[icssg_all_miig_stats[i].offset / sizeof(u32)];
}
- for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++) {
- if (!strcmp(icssg_all_pa_stats[i].name, stat_name))
- return emac->pa_stats[icssg_all_pa_stats[i].offset / sizeof(u32)];
+ if (emac->prueth->pa_stats) {
+ for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++) {
+ if (!strcmp(icssg_all_pa_stats[i].name, stat_name))
+ return emac->pa_stats[icssg_all_pa_stats[i].offset / sizeof(u32)];
+ }
}
netdev_err(emac->ndev, "Invalid stats %s\n", stat_name);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 43d81b40f761..d64b8abcf018 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -529,8 +529,6 @@ struct skbuf_dma_descriptor {
* supported, the maximum frame size would be 9k. Else it is
* 1522 bytes (assuming support for basic VLAN)
* @rxmem: Stores rx memory size for jumbo frame handling.
- * @csum_offload_on_tx_path: Stores the checksum selection on TX side.
- * @csum_offload_on_rx_path: Stores the checksum selection on RX side.
* @coalesce_count_rx: Store the irq coalesce on RX side.
* @coalesce_usec_rx: IRQ coalesce delay for RX
* @coalesce_count_tx: Store the irq coalesce on TX side.
@@ -609,9 +607,6 @@ struct axienet_local {
u32 max_frm_size;
u32 rxmem;
- int csum_offload_on_tx_path;
- int csum_offload_on_rx_path;
-
u32 coalesce_count_rx;
u32 coalesce_usec_rx;
u32 coalesce_count_tx;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 374dff70ef0d..ea7d7c03f48e 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1188,9 +1188,7 @@ static int axienet_rx_poll(struct napi_struct *napi, int budget)
csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
- } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
- skb->protocol == htons(ETH_P_IP) &&
- skb->len > 64) {
+ } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
skb->ip_summed = CHECKSUM_COMPLETE;
}
@@ -2639,38 +2637,28 @@ static int axienet_probe(struct platform_device *pdev)
if (!ret) {
switch (value) {
case 1:
- lp->csum_offload_on_tx_path =
- XAE_FEATURE_PARTIAL_TX_CSUM;
lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
- /* Can checksum TCP/UDP over IPv4. */
- ndev->features |= NETIF_F_IP_CSUM;
+ /* Can checksum any contiguous range */
+ ndev->features |= NETIF_F_HW_CSUM;
break;
case 2:
- lp->csum_offload_on_tx_path =
- XAE_FEATURE_FULL_TX_CSUM;
lp->features |= XAE_FEATURE_FULL_TX_CSUM;
/* Can checksum TCP/UDP over IPv4. */
ndev->features |= NETIF_F_IP_CSUM;
break;
- default:
- lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD;
}
}
ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
if (!ret) {
switch (value) {
case 1:
- lp->csum_offload_on_rx_path =
- XAE_FEATURE_PARTIAL_RX_CSUM;
lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
+ ndev->features |= NETIF_F_RXCSUM;
break;
case 2:
- lp->csum_offload_on_rx_path =
- XAE_FEATURE_FULL_RX_CSUM;
lp->features |= XAE_FEATURE_FULL_RX_CSUM;
+ ndev->features |= NETIF_F_RXCSUM;
break;
- default:
- lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD;
}
}
/* For supporting jumbo frames, the Axi Ethernet hardware must have
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 56df37f8d50a..aef316278eb4 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1026,9 +1026,7 @@ static int ixp4xx_get_ts_info(struct net_device *dev,
if (info->phc_index < 0) {
info->so_timestamping =
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ SOF_TIMESTAMPING_TX_SOFTWARE;
return 0;
}
info->so_timestamping =
diff --git a/drivers/net/mdio/fwnode_mdio.c b/drivers/net/mdio/fwnode_mdio.c
index fd02f5cbc853..b156493d7084 100644
--- a/drivers/net/mdio/fwnode_mdio.c
+++ b/drivers/net/mdio/fwnode_mdio.c
@@ -7,6 +7,7 @@
*/
#include <linux/acpi.h>
+#include <linux/dev_printk.h>
#include <linux/fwnode_mdio.h>
#include <linux/of.h>
#include <linux/phy.h>
@@ -104,7 +105,7 @@ int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
return rc;
}
- dev_dbg(&mdio->dev, "registered phy %p fwnode at address %i\n",
+ dev_dbg(&mdio->dev, "registered phy fwnode %pfw at address %i\n",
child, addr);
return 0;
}
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
index 5732ad65e7f9..a5ef8fe50704 100644
--- a/drivers/net/phy/microchip_t1.c
+++ b/drivers/net/phy/microchip_t1.c
@@ -175,6 +175,9 @@
#define LAN887X_LED_LINK_ACT_ANY_SPEED 0x0
/* MX chip top registers */
+#define LAN887X_CHIP_HARD_RST 0xf03e
+#define LAN887X_CHIP_HARD_RST_RESET BIT(0)
+
#define LAN887X_CHIP_SOFT_RST 0xf03f
#define LAN887X_CHIP_SOFT_RST_RESET BIT(0)
@@ -188,9 +191,60 @@
#define LAN887X_EFUSE_READ_DAT9_SGMII_DIS BIT(9)
#define LAN887X_EFUSE_READ_DAT9_MAC_MODE GENMASK(1, 0)
+#define LAN887X_CALIB_CONFIG_100 0x437
+#define LAN887X_CALIB_CONFIG_100_CBL_DIAG_USE_LOCAL_SMPL BIT(5)
+#define LAN887X_CALIB_CONFIG_100_CBL_DIAG_STB_SYNC_MODE BIT(4)
+#define LAN887X_CALIB_CONFIG_100_CBL_DIAG_CLK_ALGN_MODE BIT(3)
+#define LAN887X_CALIB_CONFIG_100_VAL \
+ (LAN887X_CALIB_CONFIG_100_CBL_DIAG_CLK_ALGN_MODE |\
+ LAN887X_CALIB_CONFIG_100_CBL_DIAG_STB_SYNC_MODE |\
+ LAN887X_CALIB_CONFIG_100_CBL_DIAG_USE_LOCAL_SMPL)
+
+#define LAN887X_MAX_PGA_GAIN_100 0x44f
+#define LAN887X_MIN_PGA_GAIN_100 0x450
+#define LAN887X_START_CBL_DIAG_100 0x45a
+#define LAN887X_CBL_DIAG_DONE BIT(1)
+#define LAN887X_CBL_DIAG_START BIT(0)
+#define LAN887X_CBL_DIAG_STOP 0x0
+
+#define LAN887X_CBL_DIAG_TDR_THRESH_100 0x45b
+#define LAN887X_CBL_DIAG_AGC_THRESH_100 0x45c
+#define LAN887X_CBL_DIAG_MIN_WAIT_CONFIG_100 0x45d
+#define LAN887X_CBL_DIAG_MAX_WAIT_CONFIG_100 0x45e
+#define LAN887X_CBL_DIAG_CYC_CONFIG_100 0x45f
+#define LAN887X_CBL_DIAG_TX_PULSE_CONFIG_100 0x460
+#define LAN887X_CBL_DIAG_MIN_PGA_GAIN_100 0x462
+#define LAN887X_CBL_DIAG_AGC_GAIN_100 0x497
+#define LAN887X_CBL_DIAG_POS_PEAK_VALUE_100 0x499
+#define LAN887X_CBL_DIAG_NEG_PEAK_VALUE_100 0x49a
+#define LAN887X_CBL_DIAG_POS_PEAK_TIME_100 0x49c
+#define LAN887X_CBL_DIAG_NEG_PEAK_TIME_100 0x49d
+
+#define MICROCHIP_CABLE_NOISE_MARGIN 20
+#define MICROCHIP_CABLE_TIME_MARGIN 89
+#define MICROCHIP_CABLE_MIN_TIME_DIFF 96
+#define MICROCHIP_CABLE_MAX_TIME_DIFF \
+ (MICROCHIP_CABLE_MIN_TIME_DIFF + MICROCHIP_CABLE_TIME_MARGIN)
+
#define DRIVER_AUTHOR "Nisar Sayed <nisar.sayed@microchip.com>"
#define DRIVER_DESC "Microchip LAN87XX/LAN937x/LAN887x T1 PHY driver"
+/* TEST_MODE_NORMAL: Non-hybrid results to calculate cable status(open/short/ok)
+ * TEST_MODE_HYBRID: Hybrid results to calculate distance to fault
+ */
+enum cable_diag_mode {
+ TEST_MODE_NORMAL,
+ TEST_MODE_HYBRID
+};
+
+/* CD_TEST_INIT: Cable test is initated
+ * CD_TEST_DONE: Cable test is done
+ */
+enum cable_diag_state {
+ CD_TEST_INIT,
+ CD_TEST_DONE
+};
+
struct access_ereg_val {
u8 mode;
u8 bank;
@@ -1420,6 +1474,362 @@ static void lan887x_get_strings(struct phy_device *phydev, u8 *data)
ethtool_puts(&data, lan887x_hw_stats[i].string);
}
+static int lan887x_cd_reset(struct phy_device *phydev,
+ enum cable_diag_state cd_done)
+{
+ u16 val;
+ int rc;
+
+ /* Chip hard-reset */
+ rc = phy_write_mmd(phydev, MDIO_MMD_VEND1, LAN887X_CHIP_HARD_RST,
+ LAN887X_CHIP_HARD_RST_RESET);
+ if (rc < 0)
+ return rc;
+
+ /* Wait for reset to complete */
+ rc = phy_read_poll_timeout(phydev, MII_PHYSID2, val,
+ ((val & GENMASK(15, 4)) ==
+ (PHY_ID_LAN887X & GENMASK(15, 4))),
+ 5000, 50000, true);
+ if (rc < 0)
+ return rc;
+
+ if (cd_done == CD_TEST_DONE) {
+ /* Cable diagnostics complete. Restore PHY. */
+ rc = lan887x_phy_setup(phydev);
+ if (rc < 0)
+ return rc;
+
+ rc = lan887x_phy_init(phydev);
+ if (rc < 0)
+ return rc;
+
+ rc = lan887x_phy_reconfig(phydev);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int lan887x_cable_test_prep(struct phy_device *phydev,
+ enum cable_diag_mode mode)
+{
+ static const struct lan887x_regwr_map values[] = {
+ {MDIO_MMD_VEND1, LAN887X_MAX_PGA_GAIN_100, 0x1f},
+ {MDIO_MMD_VEND1, LAN887X_MIN_PGA_GAIN_100, 0x0},
+ {MDIO_MMD_VEND1, LAN887X_CBL_DIAG_TDR_THRESH_100, 0x1},
+ {MDIO_MMD_VEND1, LAN887X_CBL_DIAG_AGC_THRESH_100, 0x3c},
+ {MDIO_MMD_VEND1, LAN887X_CBL_DIAG_MIN_WAIT_CONFIG_100, 0x0},
+ {MDIO_MMD_VEND1, LAN887X_CBL_DIAG_MAX_WAIT_CONFIG_100, 0x46},
+ {MDIO_MMD_VEND1, LAN887X_CBL_DIAG_CYC_CONFIG_100, 0x5a},
+ {MDIO_MMD_VEND1, LAN887X_CBL_DIAG_TX_PULSE_CONFIG_100, 0x44d5},
+ {MDIO_MMD_VEND1, LAN887X_CBL_DIAG_MIN_PGA_GAIN_100, 0x0},
+
+ };
+ int rc;
+
+ rc = lan887x_cd_reset(phydev, CD_TEST_INIT);
+ if (rc < 0)
+ return rc;
+
+ /* Forcing DUT to master mode, as we don't care about
+ * mode during diagnostics
+ */
+ rc = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1_CTRL,
+ MDIO_PMA_PMD_BT1_CTRL_CFG_MST);
+ if (rc < 0)
+ return rc;
+
+ rc = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, 0x80b0, 0x0038);
+ if (rc < 0)
+ return rc;
+
+ rc = phy_modify_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_CALIB_CONFIG_100, 0,
+ LAN887X_CALIB_CONFIG_100_VAL);
+ if (rc < 0)
+ return rc;
+
+ for (int i = 0; i < ARRAY_SIZE(values); i++) {
+ rc = phy_write_mmd(phydev, values[i].mmd, values[i].reg,
+ values[i].val);
+ if (rc < 0)
+ return rc;
+
+ if (mode &&
+ values[i].reg == LAN887X_CBL_DIAG_MAX_WAIT_CONFIG_100) {
+ rc = phy_write_mmd(phydev, values[i].mmd,
+ values[i].reg, 0xa);
+ if (rc < 0)
+ return rc;
+ }
+ }
+
+ if (mode == TEST_MODE_HYBRID) {
+ rc = phy_modify_mmd(phydev, MDIO_MMD_PMAPMD,
+ LAN887X_AFE_PORT_TESTBUS_CTRL4,
+ BIT(0), BIT(0));
+ if (rc < 0)
+ return rc;
+ }
+
+ /* HW_INIT 100T1, Get DUT running in 100T1 mode */
+ rc = phy_modify_mmd(phydev, MDIO_MMD_VEND1, LAN887X_REG_REG26,
+ LAN887X_REG_REG26_HW_INIT_SEQ_EN,
+ LAN887X_REG_REG26_HW_INIT_SEQ_EN);
+ if (rc < 0)
+ return rc;
+
+ /* Cable diag requires hard reset and is sensitive regarding the delays.
+ * Hard reset is expected into and out of cable diag.
+ * Wait for 50ms
+ */
+ msleep(50);
+
+ /* Start cable diag */
+ return phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_START_CBL_DIAG_100,
+ LAN887X_CBL_DIAG_START);
+}
+
+static int lan887x_cable_test_chk(struct phy_device *phydev,
+ enum cable_diag_mode mode)
+{
+ int val;
+ int rc;
+
+ if (mode == TEST_MODE_HYBRID) {
+ /* Cable diag requires hard reset and is sensitive regarding the delays.
+ * Hard reset is expected into and out of cable diag.
+ * Wait for cable diag to complete.
+ * Minimum wait time is 50ms if the condition is not a match.
+ */
+ rc = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
+ LAN887X_START_CBL_DIAG_100, val,
+ ((val & LAN887X_CBL_DIAG_DONE) ==
+ LAN887X_CBL_DIAG_DONE),
+ 50000, 500000, false);
+ if (rc < 0)
+ return rc;
+ } else {
+ rc = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_START_CBL_DIAG_100);
+ if (rc < 0)
+ return rc;
+
+ if ((rc & LAN887X_CBL_DIAG_DONE) != LAN887X_CBL_DIAG_DONE)
+ return -EAGAIN;
+ }
+
+ /* Stop cable diag */
+ return phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_START_CBL_DIAG_100,
+ LAN887X_CBL_DIAG_STOP);
+}
+
+static int lan887x_cable_test_start(struct phy_device *phydev)
+{
+ int rc, ret;
+
+ rc = lan887x_cable_test_prep(phydev, TEST_MODE_NORMAL);
+ if (rc < 0) {
+ ret = lan887x_cd_reset(phydev, CD_TEST_DONE);
+ if (ret < 0)
+ return ret;
+
+ return rc;
+ }
+
+ return 0;
+}
+
+static int lan887x_cable_test_report(struct phy_device *phydev)
+{
+ int pos_peak_cycle, pos_peak_cycle_hybrid, pos_peak_in_phases;
+ int pos_peak_time, pos_peak_time_hybrid, neg_peak_time;
+ int neg_peak_cycle, neg_peak_in_phases;
+ int pos_peak_in_phases_hybrid;
+ int gain_idx, gain_idx_hybrid;
+ int pos_peak_phase_hybrid;
+ int pos_peak, neg_peak;
+ int distance;
+ int detect;
+ int length;
+ int ret;
+ int rc;
+
+ /* Read non-hybrid results */
+ gain_idx = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_CBL_DIAG_AGC_GAIN_100);
+ if (gain_idx < 0) {
+ rc = gain_idx;
+ goto error;
+ }
+
+ pos_peak = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_CBL_DIAG_POS_PEAK_VALUE_100);
+ if (pos_peak < 0) {
+ rc = pos_peak;
+ goto error;
+ }
+
+ neg_peak = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_CBL_DIAG_NEG_PEAK_VALUE_100);
+ if (neg_peak < 0) {
+ rc = neg_peak;
+ goto error;
+ }
+
+ pos_peak_time = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_CBL_DIAG_POS_PEAK_TIME_100);
+ if (pos_peak_time < 0) {
+ rc = pos_peak_time;
+ goto error;
+ }
+
+ neg_peak_time = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_CBL_DIAG_NEG_PEAK_TIME_100);
+ if (neg_peak_time < 0) {
+ rc = neg_peak_time;
+ goto error;
+ }
+
+ /* Calculate non-hybrid values */
+ pos_peak_cycle = (pos_peak_time >> 7) & 0x7f;
+ pos_peak_in_phases = (pos_peak_cycle * 96) + (pos_peak_time & 0x7f);
+ neg_peak_cycle = (neg_peak_time >> 7) & 0x7f;
+ neg_peak_in_phases = (neg_peak_cycle * 96) + (neg_peak_time & 0x7f);
+
+ /* Deriving the status of cable */
+ if (pos_peak > MICROCHIP_CABLE_NOISE_MARGIN &&
+ neg_peak > MICROCHIP_CABLE_NOISE_MARGIN && gain_idx >= 0) {
+ if (pos_peak_in_phases > neg_peak_in_phases &&
+ ((pos_peak_in_phases - neg_peak_in_phases) >=
+ MICROCHIP_CABLE_MIN_TIME_DIFF) &&
+ ((pos_peak_in_phases - neg_peak_in_phases) <
+ MICROCHIP_CABLE_MAX_TIME_DIFF) &&
+ pos_peak_in_phases > 0) {
+ detect = LAN87XX_CABLE_TEST_SAME_SHORT;
+ } else if (neg_peak_in_phases > pos_peak_in_phases &&
+ ((neg_peak_in_phases - pos_peak_in_phases) >=
+ MICROCHIP_CABLE_MIN_TIME_DIFF) &&
+ ((neg_peak_in_phases - pos_peak_in_phases) <
+ MICROCHIP_CABLE_MAX_TIME_DIFF) &&
+ neg_peak_in_phases > 0) {
+ detect = LAN87XX_CABLE_TEST_OPEN;
+ } else {
+ detect = LAN87XX_CABLE_TEST_OK;
+ }
+ } else {
+ detect = LAN87XX_CABLE_TEST_OK;
+ }
+
+ if (detect == LAN87XX_CABLE_TEST_OK) {
+ distance = 0;
+ goto get_len;
+ }
+
+ /* Re-initialize PHY and start cable diag test */
+ rc = lan887x_cable_test_prep(phydev, TEST_MODE_HYBRID);
+ if (rc < 0)
+ goto cd_stop;
+
+ /* Wait for cable diag test completion */
+ rc = lan887x_cable_test_chk(phydev, TEST_MODE_HYBRID);
+ if (rc < 0)
+ goto cd_stop;
+
+ /* Read hybrid results */
+ gain_idx_hybrid = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_CBL_DIAG_AGC_GAIN_100);
+ if (gain_idx_hybrid < 0) {
+ rc = gain_idx_hybrid;
+ goto error;
+ }
+
+ pos_peak_time_hybrid = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_CBL_DIAG_POS_PEAK_TIME_100);
+ if (pos_peak_time_hybrid < 0) {
+ rc = pos_peak_time_hybrid;
+ goto error;
+ }
+
+ /* Calculate hybrid values to derive cable length to fault */
+ pos_peak_cycle_hybrid = (pos_peak_time_hybrid >> 7) & 0x7f;
+ pos_peak_phase_hybrid = pos_peak_time_hybrid & 0x7f;
+ pos_peak_in_phases_hybrid = pos_peak_cycle_hybrid * 96 +
+ pos_peak_phase_hybrid;
+
+ /* Distance to fault calculation.
+ * distance = (peak_in_phases - peak_in_phases_hybrid) *
+ * propagationconstant.
+ * constant to convert number of phases to meters
+ * propagationconstant = 0.015953
+ * (0.6811 * 2.9979 * 156.2499 * 0.0001 * 0.5)
+ * Applying constant 1.5953 as ethtool further devides by 100 to
+ * convert to meters.
+ */
+ if (detect == LAN87XX_CABLE_TEST_OPEN) {
+ distance = (((pos_peak_in_phases - pos_peak_in_phases_hybrid)
+ * 15953) / 10000);
+ } else if (detect == LAN87XX_CABLE_TEST_SAME_SHORT) {
+ distance = (((neg_peak_in_phases - pos_peak_in_phases_hybrid)
+ * 15953) / 10000);
+ } else {
+ distance = 0;
+ }
+
+get_len:
+ rc = lan887x_cd_reset(phydev, CD_TEST_DONE);
+ if (rc < 0)
+ return rc;
+
+ length = ((u32)distance & GENMASK(15, 0));
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ lan87xx_cable_test_report_trans(detect));
+ ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_A, length);
+
+ return 0;
+
+cd_stop:
+ /* Stop cable diag */
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_START_CBL_DIAG_100,
+ LAN887X_CBL_DIAG_STOP);
+ if (ret < 0)
+ return ret;
+
+error:
+ /* Cable diag test failed */
+ ret = lan887x_cd_reset(phydev, CD_TEST_DONE);
+ if (ret < 0)
+ return ret;
+
+ /* Return error in failure case */
+ return rc;
+}
+
+static int lan887x_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ int rc;
+
+ rc = lan887x_cable_test_chk(phydev, TEST_MODE_NORMAL);
+ if (rc < 0) {
+ /* Let PHY statemachine poll again */
+ if (rc == -EAGAIN)
+ return 0;
+ return rc;
+ }
+
+ /* Cable diag test complete */
+ *finished = true;
+
+ /* Retrieve test status and cable length to fault */
+ return lan887x_cable_test_report(phydev);
+}
+
static struct phy_driver microchip_t1_phy_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_LAN87XX),
@@ -1458,6 +1868,7 @@ static struct phy_driver microchip_t1_phy_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_LAN887X),
.name = "Microchip LAN887x T1 PHY",
+ .flags = PHY_POLL_CABLE_TEST,
.probe = lan887x_probe,
.get_features = lan887x_get_features,
.config_init = lan887x_phy_init,
@@ -1468,6 +1879,8 @@ static struct phy_driver microchip_t1_phy_driver[] = {
.suspend = genphy_suspend,
.resume = genphy_resume,
.read_status = genphy_c45_read_status,
+ .cable_test_start = lan887x_cable_test_start,
+ .cable_test_get_status = lan887x_cable_test_get_status,
}
};
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index ab4e9fc03017..4309317de3d1 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -1636,6 +1636,48 @@ static int phylink_register_sfp(struct phylink *pl,
}
/**
+ * phylink_set_fixed_link() - set the fixed link
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @state: a pointer to a struct phylink_link_state.
+ *
+ * This function is used when the link parameters are known and do not change,
+ * making it suitable for certain types of network connections.
+ *
+ * Returns: zero on success or negative error code.
+ */
+int phylink_set_fixed_link(struct phylink *pl,
+ const struct phylink_link_state *state)
+{
+ const struct phy_setting *s;
+ unsigned long *adv;
+
+ if (pl->cfg_link_an_mode != MLO_AN_PHY || !state ||
+ !test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state))
+ return -EINVAL;
+
+ s = phy_lookup_setting(state->speed, state->duplex,
+ pl->supported, true);
+ if (!s)
+ return -EINVAL;
+
+ adv = pl->link_config.advertising;
+ linkmode_zero(adv);
+ linkmode_set_bit(s->bit, adv);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, adv);
+
+ pl->link_config.speed = state->speed;
+ pl->link_config.duplex = state->duplex;
+ pl->link_config.link = 1;
+ pl->link_config.an_complete = 1;
+
+ pl->cfg_link_an_mode = MLO_AN_FIXED;
+ pl->cur_link_an_mode = pl->cfg_link_an_mode;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(phylink_set_fixed_link);
+
+/**
* phylink_create() - create a phylink instance
* @config: a pointer to the target &struct phylink_config
* @fwnode: a pointer to a &struct fwnode_handle describing the network
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index b93a64bf8190..35bfe7232e95 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -3,7 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -1774,7 +1774,7 @@ static ssize_t ath10k_write_simulate_radar(struct file *file,
if (!arvif->is_started)
return -EINVAL;
- ieee80211_radar_detected(ar->hw);
+ ieee80211_radar_detected(ar->hw, NULL);
return count;
}
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index a5da32e87106..646e1737d4c4 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -1437,7 +1437,7 @@ static void ath10k_recalc_radar_detection(struct ath10k *ar)
* by indicating that radar was detected.
*/
ath10k_warn(ar, "failed to start CAC: %d\n", ret);
- ieee80211_radar_detected(ar->hw);
+ ieee80211_radar_detected(ar->hw, NULL);
}
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index fe2344598364..4861179b2217 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -3990,7 +3990,7 @@ static void ath10k_radar_detected(struct ath10k *ar)
if (ar->dfs_block_radar_events)
ath10k_info(ar, "DFS Radar detected, but ignored as requested\n");
else
- ieee80211_radar_detected(ar->hw);
+ ieee80211_radar_detected(ar->hw, NULL);
}
static void ath10k_radar_confirmation_work(struct work_struct *work)
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 76faa55fd0f3..09c37e19a168 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -407,11 +407,17 @@ struct ath11k_vif {
bool wpaie_present;
bool bcca_zero_sent;
bool do_not_send_tmpl;
- struct ieee80211_chanctx_conf chanctx;
struct ath11k_arp_ns_offload arp_ns_offload;
struct ath11k_rekey_data rekey_data;
struct ath11k_reg_tpc_power_info reg_tpc_info;
+
+ /* Must be last - ends in a flexible-array member.
+ *
+ * FIXME: Driver should not copy struct ieee80211_chanctx_conf,
+ * especially because it has a flexible array. Find a better way.
+ */
+ struct ieee80211_chanctx_conf chanctx;
};
struct ath11k_vif_iter {
diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h
index 2f6dd69d3be2..65d2bc0687c8 100644
--- a/drivers/net/wireless/ath/ath11k/dp.h
+++ b/drivers/net/wireless/ath/ath11k/dp.h
@@ -1305,18 +1305,6 @@ struct htt_ppdu_stats_user_rate {
#define HTT_TX_INFO_PEERID(_flags) \
FIELD_GET(HTT_PPDU_STATS_TX_INFO_FLAGS_PEERID_M, _flags)
-struct htt_tx_ppdu_stats_info {
- struct htt_tlv tlv_hdr;
- u32 tx_success_bytes;
- u32 tx_retry_bytes;
- u32 tx_failed_bytes;
- u32 flags; /* %HTT_PPDU_STATS_TX_INFO_FLAGS_ */
- u16 tx_success_msdus;
- u16 tx_retry_msdus;
- u16 tx_failed_msdus;
- u16 tx_duration; /* united in us */
-} __packed;
-
enum htt_ppdu_stats_usr_compln_status {
HTT_PPDU_STATS_USER_STATUS_OK,
HTT_PPDU_STATS_USER_STATUS_FILTERED,
@@ -1364,17 +1352,6 @@ struct htt_ppdu_stats_usr_cmpltn_ack_ba_status {
u32 success_bytes;
} __packed;
-struct htt_ppdu_stats_usr_cmn_array {
- struct htt_tlv tlv_hdr;
- u32 num_ppdu_stats;
- /* tx_ppdu_stats_info is filled by multiple struct htt_tx_ppdu_stats_info
- * elements.
- * tx_ppdu_stats_info is variable length, with length =
- * number_of_ppdu_stats * sizeof (struct htt_tx_ppdu_stats_info)
- */
- struct htt_tx_ppdu_stats_info tx_ppdu_info[];
-} __packed;
-
struct htt_ppdu_user_stats {
u16 peer_id;
u32 tlv_flags;
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 2662092ee00a..87abfa547529 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -8358,7 +8358,7 @@ ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff
if (ar->dfs_block_radar_events)
ath11k_info(ab, "DFS Radar detected, but ignored as requested\n");
else
- ieee80211_radar_detected(ar->hw);
+ ieee80211_radar_detected(ar->hw, NULL);
exit:
rcu_read_unlock();
diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h
index cdfd43a7321a..7f2e9a9b4097 100644
--- a/drivers/net/wireless/ath/ath12k/core.h
+++ b/drivers/net/wireless/ath/ath12k/core.h
@@ -287,7 +287,6 @@ struct ath12k_vif {
int txpower;
bool rsnie_present;
bool wpaie_present;
- struct ieee80211_chanctx_conf chanctx;
u32 key_cipher;
u8 tx_encap_type;
u8 vdev_stats_id;
@@ -295,6 +294,13 @@ struct ath12k_vif {
bool ps;
struct ath12k_vif_cache *cache;
struct ath12k_rekey_data rekey_data;
+
+ /* Must be last - ends in a flexible-array member.
+ *
+ * FIXME: Driver should not copy struct ieee80211_chanctx_conf,
+ * especially because it has a flexible array. Find a better way.
+ */
+ struct ieee80211_chanctx_conf chanctx;
};
struct ath12k_vif_iter {
diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h
index b77497c14ac4..2fb18b83b3ee 100644
--- a/drivers/net/wireless/ath/ath12k/dp.h
+++ b/drivers/net/wireless/ath/ath12k/dp.h
@@ -1495,18 +1495,6 @@ struct htt_ppdu_stats_user_rate {
#define HTT_TX_INFO_PEERID(_flags) \
u32_get_bits(_flags, HTT_PPDU_STATS_TX_INFO_FLAGS_PEERID_M)
-struct htt_tx_ppdu_stats_info {
- struct htt_tlv tlv_hdr;
- __le32 tx_success_bytes;
- __le32 tx_retry_bytes;
- __le32 tx_failed_bytes;
- __le32 flags; /* %HTT_PPDU_STATS_TX_INFO_FLAGS_ */
- __le16 tx_success_msdus;
- __le16 tx_retry_msdus;
- __le16 tx_failed_msdus;
- __le16 tx_duration; /* united in us */
-} __packed;
-
enum htt_ppdu_stats_usr_compln_status {
HTT_PPDU_STATS_USER_STATUS_OK,
HTT_PPDU_STATS_USER_STATUS_FILTERED,
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index a3248d977532..137394c36460 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -3663,7 +3663,7 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
struct ath12k *ar, *prev_ar;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct cfg80211_scan_request *req = &hw_req->req;
- struct ath12k_wmi_scan_req_arg arg = {};
+ struct ath12k_wmi_scan_req_arg *arg = NULL;
int ret;
int i;
bool create = true;
@@ -3745,42 +3745,47 @@ scan:
if (ret)
goto exit;
- ath12k_wmi_start_scan_init(ar, &arg);
- arg.vdev_id = arvif->vdev_id;
- arg.scan_id = ATH12K_SCAN_ID;
+ arg = kzalloc(sizeof(*arg), GFP_KERNEL);
+ if (!arg) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ath12k_wmi_start_scan_init(ar, arg);
+ arg->vdev_id = arvif->vdev_id;
+ arg->scan_id = ATH12K_SCAN_ID;
if (req->ie_len) {
- arg.extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL);
- if (!arg.extraie.ptr) {
+ arg->extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL);
+ if (!arg->extraie.ptr) {
ret = -ENOMEM;
goto exit;
}
- arg.extraie.len = req->ie_len;
+ arg->extraie.len = req->ie_len;
}
if (req->n_ssids) {
- arg.num_ssids = req->n_ssids;
- for (i = 0; i < arg.num_ssids; i++)
- arg.ssid[i] = req->ssids[i];
+ arg->num_ssids = req->n_ssids;
+ for (i = 0; i < arg->num_ssids; i++)
+ arg->ssid[i] = req->ssids[i];
} else {
- arg.scan_f_passive = 1;
+ arg->scan_f_passive = 1;
}
if (req->n_channels) {
- arg.num_chan = req->n_channels;
- arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list),
- GFP_KERNEL);
-
- if (!arg.chan_list) {
+ arg->num_chan = req->n_channels;
+ arg->chan_list = kcalloc(arg->num_chan, sizeof(*arg->chan_list),
+ GFP_KERNEL);
+ if (!arg->chan_list) {
ret = -ENOMEM;
goto exit;
}
- for (i = 0; i < arg.num_chan; i++)
- arg.chan_list[i] = req->channels[i]->center_freq;
+ for (i = 0; i < arg->num_chan; i++)
+ arg->chan_list[i] = req->channels[i]->center_freq;
}
- ret = ath12k_start_scan(ar, &arg);
+ ret = ath12k_start_scan(ar, arg);
if (ret) {
ath12k_warn(ar->ab, "failed to start hw scan: %d\n", ret);
spin_lock_bh(&ar->data_lock);
@@ -3790,14 +3795,15 @@ scan:
/* Add a margin to account for event/command processing */
ieee80211_queue_delayed_work(ath12k_ar_to_hw(ar), &ar->scan.timeout,
- msecs_to_jiffies(arg.max_scan_time +
+ msecs_to_jiffies(arg->max_scan_time +
ATH12K_MAC_SCAN_TIMEOUT_MSECS));
exit:
- kfree(arg.chan_list);
-
- if (req->ie_len)
- kfree(arg.extraie.ptr);
+ if (arg) {
+ kfree(arg->chan_list);
+ kfree(arg->extraie.ptr);
+ kfree(arg);
+ }
mutex_unlock(&ar->conf_mutex);
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
index a76413320dbf..2cd3ff9b0164 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.c
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -6789,7 +6789,7 @@ ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff
if (ar->dfs_block_radar_events)
ath12k_info(ab, "DFS Radar detected, but ignored as requested\n");
else
- ieee80211_radar_detected(ath12k_ar_to_hw(ar));
+ ieee80211_radar_detected(ath12k_ar_to_hw(ar), NULL);
exit:
rcu_read_unlock();
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
index 11349218bc21..3689e12db9f7 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.c
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -280,7 +280,7 @@ ath9k_dfs_process_radar_pulse(struct ath_softc *sc, struct pulse_event *pe)
if (!pd->add_pulse(pd, pe, NULL))
return;
DFS_STAT_INC(sc, radar_detected);
- ieee80211_radar_detected(sc->hw);
+ ieee80211_radar_detected(sc->hw, NULL);
}
/*
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c
index 8e18e9b4ef48..426caa057396 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_debug.c
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.c
@@ -116,7 +116,7 @@ static ssize_t write_file_simulate_radar(struct file *file,
{
struct ath_softc *sc = file->private_data;
- ieee80211_radar_detected(sc->hw);
+ ieee80211_radar_detected(sc->hw, NULL);
return count;
}
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 0c7841f95228..a3733c9b484e 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -716,8 +716,7 @@ static void ath9k_hif_usb_rx_cb(struct urb *urb)
}
resubmit:
- skb_reset_tail_pointer(skb);
- skb_trim(skb, 0);
+ __skb_set_length(skb, 0);
usb_anchor_urb(urb, &hif_dev->rx_submitted);
ret = usb_submit_urb(urb, GFP_ATOMIC);
@@ -754,8 +753,7 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
case -ESHUTDOWN:
goto free_skb;
default:
- skb_reset_tail_pointer(skb);
- skb_trim(skb, 0);
+ __skb_set_length(skb, 0);
goto resubmit;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 815f6b3c79fc..349aa3439502 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -1135,7 +1135,7 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
offset = offsetof(struct brcmf_scan_params_v2_le, channel_list) +
n_channels * sizeof(u16);
offset = roundup(offset, sizeof(u32));
- length += sizeof(ssid_le) * n_ssids,
+ length += sizeof(ssid_le) * n_ssids;
ptr = (char *)params_le + offset;
for (i = 0; i < n_ssids; i++) {
memset(&ssid_le, 0, sizeof(ssid_le));
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
index 8873904f51ec..59751f123571 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
@@ -13,7 +13,7 @@ iwlmvm-y += ptp.o
iwlmvm-y += time-sync.o
iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
-iwlmvm-$(CONFIG_PM) += d3.o
+iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
iwlmvm-$(CONFIG_IWLMEI) += vendor-cmd.o
subdir-ccflags-y += -I $(src)/../
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 6f5b16eb4902..a327893c6dce 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1232,7 +1232,7 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
mvm->nvm_data = NULL;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
/* fast_resume will be cleared by iwl_mvm_fast_resume */
fast_resume = mvm->fast_resume;
@@ -1254,7 +1254,7 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
}
}
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) {
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index c9b69c3a61fc..ef07cff203b0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -512,7 +512,7 @@ struct iwl_mvm_vif {
bool bf_enabled;
bool ba_enabled;
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
/* WoWLAN GTK rekey data */
struct {
u8 kck[NL80211_KCK_EXT_LEN];
@@ -1178,7 +1178,7 @@ struct iwl_mvm {
struct ieee80211_vif *p2p_device_vif;
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
struct wiphy_wowlan_support wowlan;
int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen;
@@ -2306,7 +2306,7 @@ void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, int idx);
extern const struct file_operations iwl_dbgfs_d3_test_ops;
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
void iwl_mvm_fast_suspend(struct iwl_mvm *mvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 2cd89ccaef9a..4dd4a9d5c71f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -2155,6 +2155,7 @@ static void iwl_op_mode_mvm_time_point(struct iwl_op_mode *op_mode,
iwl_dbg_tlv_time_point(&mvm->fwrt, tp_id, tp_data);
}
+#ifdef CONFIG_PM_SLEEP
static void iwl_op_mode_mvm_device_powered_off(struct iwl_op_mode *op_mode)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
@@ -2163,11 +2164,13 @@ static void iwl_op_mode_mvm_device_powered_off(struct iwl_op_mode *op_mode)
clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
iwl_mvm_stop_device(mvm);
-#ifdef CONFIG_PM
mvm->fast_resume = false;
-#endif
mutex_unlock(&mvm->mutex);
}
+#else
+static void iwl_op_mode_mvm_device_powered_off(struct iwl_op_mode *op_mode)
+{}
+#endif
#define IWL_MVM_COMMON_OPS \
/* these could be differentiated */ \
diff --git a/drivers/net/wireless/marvell/libertas/cmd.h b/drivers/net/wireless/marvell/libertas/cmd.h
index 3c193074662b..d7be232f5739 100644
--- a/drivers/net/wireless/marvell/libertas/cmd.h
+++ b/drivers/net/wireless/marvell/libertas/cmd.h
@@ -116,11 +116,6 @@ int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0,
int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1,
int8_t p2, int usesnr);
-int lbs_set_data_rate(struct lbs_private *priv, u8 rate);
-
-int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
- uint16_t cmd_action);
-
int lbs_set_tx_power(struct lbs_private *priv, s16 dbm);
int lbs_set_deep_sleep(struct lbs_private *priv, int deep_sleep);
diff --git a/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h b/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h
index 631b5da09f86..a5d4c09fb918 100644
--- a/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h
+++ b/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h
@@ -484,12 +484,9 @@ void lbtf_complete_command(struct lbtf_private *priv, struct cmd_ctrl_node *cmd,
void lbtf_cmd_response_rx(struct lbtf_private *priv);
/* main.c */
-struct chan_freq_power *lbtf_get_region_cfp_table(u8 region,
- int *cfp_no);
struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev,
const struct lbtf_ops *ops);
int lbtf_remove_card(struct lbtf_private *priv);
-int lbtf_start_card(struct lbtf_private *priv);
int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb);
void lbtf_send_tx_feedback(struct lbtf_private *priv, u8 retrycnt, u8 fail);
void lbtf_bcn_sent(struct lbtf_private *priv);
diff --git a/drivers/net/wireless/marvell/mwifiex/11h.c b/drivers/net/wireless/marvell/mwifiex/11h.c
index b90f922f1cdc..032b93a41d99 100644
--- a/drivers/net/wireless/marvell/mwifiex/11h.c
+++ b/drivers/net/wireless/marvell/mwifiex/11h.c
@@ -117,12 +117,12 @@ void mwifiex_dfs_cac_work_queue(struct work_struct *work)
dfs_cac_work);
chandef = priv->dfs_chandef;
- if (priv->wdev.cac_started) {
+ if (priv->wdev.links[0].cac_started) {
mwifiex_dbg(priv->adapter, MSG,
"CAC timer finished; No radar detected\n");
cfg80211_cac_event(priv->netdev, &chandef,
NL80211_RADAR_CAC_FINISHED,
- GFP_KERNEL);
+ GFP_KERNEL, 0);
}
}
@@ -174,7 +174,7 @@ int mwifiex_stop_radar_detection(struct mwifiex_private *priv,
*/
void mwifiex_abort_cac(struct mwifiex_private *priv)
{
- if (priv->wdev.cac_started) {
+ if (priv->wdev.links[0].cac_started) {
if (mwifiex_stop_radar_detection(priv, &priv->dfs_chandef))
mwifiex_dbg(priv->adapter, ERROR,
"failed to stop CAC in FW\n");
@@ -182,7 +182,8 @@ void mwifiex_abort_cac(struct mwifiex_private *priv)
"Aborting delayed work for CAC.\n");
cancel_delayed_work_sync(&priv->dfs_cac_work);
cfg80211_cac_event(priv->netdev, &priv->dfs_chandef,
- NL80211_RADAR_CAC_ABORTED, GFP_KERNEL);
+ NL80211_RADAR_CAC_ABORTED, GFP_KERNEL,
+ 0);
}
}
@@ -221,7 +222,7 @@ int mwifiex_11h_handle_chanrpt_ready(struct mwifiex_private *priv,
cfg80211_cac_event(priv->netdev,
&priv->dfs_chandef,
NL80211_RADAR_DETECTED,
- GFP_KERNEL);
+ GFP_KERNEL, 0);
}
break;
default:
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 5209e1c7d2e7..fca3eea7ee84 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -1906,7 +1906,7 @@ mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev,
struct mwifiex_sta_node *sta_node;
u8 deauth_mac[ETH_ALEN];
- if (!priv->bss_started && priv->wdev.cac_started) {
+ if (!priv->bss_started && priv->wdev.links[0].cac_started) {
mwifiex_dbg(priv->adapter, INFO, "%s: abort CAC!\n", __func__);
mwifiex_abort_cac(priv);
}
@@ -4038,7 +4038,7 @@ mwifiex_cfg80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
return -EBUSY;
}
- if (priv->wdev.cac_started)
+ if (priv->wdev.links[0].cac_started)
return -EBUSY;
if (cfg80211_chandef_identical(&params->chandef,
@@ -4205,7 +4205,7 @@ static int
mwifiex_cfg80211_start_radar_detection(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_chan_def *chandef,
- u32 cac_time_ms)
+ u32 cac_time_ms, int link_id)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
struct mwifiex_radar_params radar_params;
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index e91def0afa14..d03129d5d24e 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -1627,7 +1627,7 @@ struct host_cmd_ds_802_11_scan_rsp {
struct host_cmd_ds_802_11_scan_ext {
u32 reserved;
- u8 tlv_buffer[1];
+ u8 tlv_buffer[];
} __packed;
struct mwifiex_ie_types_bss_mode {
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 010e6ff91457..cab889af4c4a 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -2534,8 +2534,7 @@ int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv,
ext_scan_resp = &resp->params.ext_scan;
tlv = (void *)ext_scan_resp->tlv_buffer;
- buf_left = le16_to_cpu(resp->size) - (sizeof(*ext_scan_resp) + S_DS_GEN
- - 1);
+ buf_left = le16_to_cpu(resp->size) - (sizeof(*ext_scan_resp) + S_DS_GEN);
while (buf_left >= sizeof(struct mwifiex_ie_types_header)) {
type = le16_to_cpu(tlv->type);
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index bb291fe314fb..9d5561f44134 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -929,14 +929,19 @@ void mt76_update_survey(struct mt76_phy *phy)
}
EXPORT_SYMBOL_GPL(mt76_update_survey);
-void mt76_set_channel(struct mt76_phy *phy)
+int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
+ bool offchannel)
{
struct mt76_dev *dev = phy->dev;
- struct ieee80211_hw *hw = phy->hw;
- struct cfg80211_chan_def *chandef = &hw->conf.chandef;
- bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
int timeout = HZ / 5;
+ int ret;
+ cancel_delayed_work_sync(&phy->mac_work);
+
+ mutex_lock(&dev->mutex);
+ set_bit(MT76_RESET, &phy->state);
+
+ mt76_worker_disable(&dev->tx_worker);
wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
mt76_update_survey(phy);
@@ -946,14 +951,34 @@ void mt76_set_channel(struct mt76_phy *phy)
phy->chandef = *chandef;
phy->chan_state = mt76_channel_state(phy, chandef->chan);
+ phy->offchannel = offchannel;
if (!offchannel)
phy->main_chan = chandef->chan;
if (chandef->chan != phy->main_chan)
memset(phy->chan_state, 0, sizeof(*phy->chan_state));
+ mt76_worker_enable(&dev->tx_worker);
+
+ ret = dev->drv->set_channel(phy);
+
+ clear_bit(MT76_RESET, &phy->state);
+ mt76_worker_schedule(&dev->tx_worker);
+
+ mutex_unlock(&dev->mutex);
+
+ return ret;
}
-EXPORT_SYMBOL_GPL(mt76_set_channel);
+
+int mt76_update_channel(struct mt76_phy *phy)
+{
+ struct ieee80211_hw *hw = phy->hw;
+ struct cfg80211_chan_def *chandef = &hw->conf.chandef;
+ bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
+
+ return mt76_set_channel(phy, chandef, offchannel);
+}
+EXPORT_SYMBOL_GPL(mt76_update_channel);
int mt76_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
@@ -1484,21 +1509,32 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
struct mt76_phy *phy = hw->priv;
struct mt76_dev *dev = phy->dev;
+ enum mt76_sta_event ev;
if (old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE)
return mt76_sta_add(phy, vif, sta);
- if (old_state == IEEE80211_STA_AUTH &&
- new_state == IEEE80211_STA_ASSOC &&
- dev->drv->sta_assoc)
- dev->drv->sta_assoc(dev, vif, sta);
-
if (old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)
mt76_sta_remove(dev, vif, sta);
- return 0;
+ if (!dev->drv->sta_event)
+ return 0;
+
+ if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC)
+ ev = MT76_STA_EVENT_ASSOC;
+ else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTHORIZED)
+ ev = MT76_STA_EVENT_AUTHORIZE;
+ else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH)
+ ev = MT76_STA_EVENT_DISASSOC;
+ else
+ return 0;
+
+ return dev->drv->sta_event(dev, vif, sta, ev);
}
EXPORT_SYMBOL_GPL(mt76_sta_state);
@@ -1521,6 +1557,7 @@ void mt76_wcid_init(struct mt76_wcid *wcid)
{
INIT_LIST_HEAD(&wcid->tx_list);
skb_queue_head_init(&wcid->tx_pending);
+ skb_queue_head_init(&wcid->tx_offchannel);
INIT_LIST_HEAD(&wcid->list);
idr_init(&wcid->pktid);
@@ -1529,7 +1566,7 @@ EXPORT_SYMBOL_GPL(mt76_wcid_init);
void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid)
{
- struct mt76_phy *phy = dev->phys[wcid->phy_idx];
+ struct mt76_phy *phy = mt76_dev_phy(dev, wcid->phy_idx);
struct ieee80211_hw *hw;
struct sk_buff_head list;
struct sk_buff *skb;
@@ -1697,14 +1734,15 @@ int mt76_get_rate(struct mt76_dev *dev,
struct ieee80211_supported_band *sband,
int idx, bool cck)
{
+ bool is_2g = sband->band == NL80211_BAND_2GHZ;
int i, offset = 0, len = sband->n_bitrates;
if (cck) {
- if (sband != &dev->phy.sband_2g.sband)
+ if (!is_2g)
return 0;
idx &= ~BIT(2); /* short preamble */
- } else if (sband == &dev->phy.sband_2g.sband) {
+ } else if (is_2g) {
offset = 4;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mcu.c b/drivers/net/wireless/mediatek/mt76/mcu.c
index a8cafa39a56d..98da82b74094 100644
--- a/drivers/net/wireless/mediatek/mt76/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mcu.c
@@ -73,6 +73,8 @@ int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb,
int cmd, bool wait_resp,
struct sk_buff **ret_skb)
{
+ unsigned int retry = 0;
+ struct sk_buff *orig_skb = NULL;
unsigned long expires;
int ret, seq;
@@ -81,6 +83,14 @@ int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb,
mutex_lock(&dev->mcu.mutex);
+ if (dev->mcu_ops->mcu_skb_prepare_msg) {
+ ret = dev->mcu_ops->mcu_skb_prepare_msg(dev, skb, cmd, &seq);
+ if (ret < 0)
+ goto out;
+ }
+
+retry:
+ orig_skb = skb_get(skb);
ret = dev->mcu_ops->mcu_skb_send_msg(dev, skb, cmd, &seq);
if (ret < 0)
goto out;
@@ -94,6 +104,14 @@ int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb,
do {
skb = mt76_mcu_get_response(dev, expires);
+ if (!skb && !test_bit(MT76_MCU_RESET, &dev->phy.state) &&
+ retry++ < dev->mcu_ops->max_retry) {
+ dev_err(dev->dev, "Retry message %08x (seq %d)\n",
+ cmd, seq);
+ skb = orig_skb;
+ goto retry;
+ }
+
ret = dev->mcu_ops->mcu_parse_response(dev, cmd, skb, seq);
if (!ret && ret_skb)
*ret_skb = skb;
@@ -101,7 +119,9 @@ int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb,
dev_kfree_skb(skb);
} while (ret == -EAGAIN);
+
out:
+ dev_kfree_skb(orig_skb);
mutex_unlock(&dev->mcu.mutex);
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 4a58a78d5ed2..0b75a45ad2e8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -162,8 +162,8 @@ enum mt76_dfs_state {
struct mt76_queue_buf {
dma_addr_t addr;
- u16 len;
- bool skip_unmap;
+ u16 len:15,
+ skip_unmap:1;
};
struct mt76_tx_info {
@@ -230,11 +230,14 @@ struct mt76_queue {
};
struct mt76_mcu_ops {
+ unsigned int max_retry;
u32 headroom;
u32 tailroom;
int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data,
int len, bool wait_resp);
+ int (*mcu_skb_prepare_msg)(struct mt76_dev *dev, struct sk_buff *skb,
+ int cmd, int *seq);
int (*mcu_skb_send_msg)(struct mt76_dev *dev, struct sk_buff *skb,
int cmd, int *seq);
int (*mcu_parse_response)(struct mt76_dev *dev, int cmd,
@@ -347,6 +350,7 @@ struct mt76_wcid {
u8 hw_key_idx2;
u8 sta:1;
+ u8 sta_disabled:1;
u8 amsdu:1;
u8 phy_idx:2;
u8 link_id:4;
@@ -361,6 +365,7 @@ struct mt76_wcid {
struct list_head tx_list;
struct sk_buff_head tx_pending;
+ struct sk_buff_head tx_offchannel;
struct list_head list;
struct idr pktid;
@@ -466,6 +471,12 @@ enum {
MT76_STATE_WED_RESET,
};
+enum mt76_sta_event {
+ MT76_STA_EVENT_ASSOC,
+ MT76_STA_EVENT_AUTHORIZE,
+ MT76_STA_EVENT_DISASSOC,
+};
+
struct mt76_hw_cap {
bool has_2ghz;
bool has_5ghz;
@@ -487,6 +498,7 @@ struct mt76_driver_ops {
u8 mcs_rates;
void (*update_survey)(struct mt76_phy *phy);
+ int (*set_channel)(struct mt76_phy *phy);
int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
@@ -511,8 +523,8 @@ struct mt76_driver_ops {
int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
- void (*sta_assoc)(struct mt76_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
+ int (*sta_event)(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev);
void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
@@ -768,6 +780,7 @@ struct mt76_phy {
struct cfg80211_chan_def chandef;
struct ieee80211_channel *main_chan;
+ bool offchannel;
struct mt76_channel_state *chan_state;
enum mt76_dfs_state dfs_state;
@@ -1370,7 +1383,7 @@ void mt76_release_buffered_frames(struct ieee80211_hw *hw,
enum ieee80211_frame_release_type reason,
bool more_data);
bool mt76_has_tx_pending(struct mt76_phy *phy);
-void mt76_set_channel(struct mt76_phy *phy);
+int mt76_update_channel(struct mt76_phy *phy);
void mt76_update_survey(struct mt76_phy *phy);
void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time);
int mt76_get_survey(struct ieee80211_hw *hw, int idx,
@@ -1484,6 +1497,8 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
void mt76_testmode_tx_pending(struct mt76_phy *phy);
void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_queue_entry *e);
+int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
+ bool offchannel);
/* usb */
static inline bool mt76u_urb_error(struct urb *urb)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
index c223f7c19e6d..6457ee06bb5a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
@@ -107,7 +107,7 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
struct sk_buff *skb;
int i, nframes;
- if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ if (dev->mphy.offchannel)
return;
data.dev = dev;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
index ea017f22fff2..863e5770df51 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
@@ -29,7 +29,7 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
struct ieee80211_sta *sta;
struct mt7603_sta *msta;
struct mt76_wcid *wcid;
- u8 tid = 0, hwq = 0;
+ u8 qid, tid = 0, hwq = 0;
void *priv;
int idx;
u32 val;
@@ -57,7 +57,7 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
if (ieee80211_is_data_qos(hdr->frame_control)) {
tid = *ieee80211_get_qos_ctl(hdr) &
IEEE80211_QOS_CTL_TAG1D_MASK;
- u8 qid = tid_to_ac[tid];
+ qid = tid_to_ac[tid];
hwq = wmm_queue_map[qid];
skb_set_queue_mapping(skb, qid);
} else if (ieee80211_is_data(hdr->frame_control)) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c
index d951cb81df83..f5a6b03bc61d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c
@@ -181,6 +181,7 @@ int mt7603_eeprom_init(struct mt7603_dev *dev)
is_mt7688(dev))
dev->mphy.antenna_mask = 1;
+ dev->mphy.chainmask = dev->mphy.antenna_mask;
mt76_eeprom_override(&dev->mphy);
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
index 6c55c72f28a2..86617a3e4328 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
@@ -15,9 +15,10 @@ const struct mt76_driver_ops mt7603_drv_ops = {
.rx_poll_complete = mt7603_rx_poll_complete,
.sta_ps = mt7603_sta_ps,
.sta_add = mt7603_sta_add,
- .sta_assoc = mt7603_sta_assoc,
+ .sta_event = mt7603_sta_event,
.sta_remove = mt7603_sta_remove,
.update_survey = mt7603_update_channel,
+ .set_channel = mt7603_set_channel,
};
static void
@@ -456,11 +457,13 @@ mt7603_init_txpower(struct mt7603_dev *dev,
int target_power = eeprom[MT_EE_TX_POWER_0_START_2G + 2] & ~BIT(7);
u8 *rate_power = &eeprom[MT_EE_TX_POWER_CCK];
bool ext_pa = eeprom[MT_EE_NIC_CONF_0 + 1] & BIT(1);
+ u8 ext_pa_pwr;
int max_offset, cur_offset;
int i;
- if (ext_pa && is_mt7603(dev))
- target_power = eeprom[MT_EE_TX_POWER_TSSI_OFF] & ~BIT(7);
+ ext_pa_pwr = eeprom[MT_EE_TX_POWER_TSSI_OFF];
+ if (ext_pa && is_mt7603(dev) && ext_pa_pwr != 0 && ext_pa_pwr != 0xff)
+ target_power = ext_pa_pwr & ~BIT(7);
if (target_power & BIT(6))
target_power = -(target_power & GENMASK(5, 0));
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index f35fa643c0da..574f74ad325d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -133,30 +133,24 @@ void mt7603_init_edcca(struct mt7603_dev *dev)
mt7603_edcca_set_strict(dev, false);
}
-static int
-mt7603_set_channel(struct ieee80211_hw *hw, struct cfg80211_chan_def *def)
+int mt7603_set_channel(struct mt76_phy *mphy)
{
- struct mt7603_dev *dev = hw->priv;
+ struct mt7603_dev *dev = container_of(mphy->dev, struct mt7603_dev, mt76);
+ struct cfg80211_chan_def *def = &mphy->chandef;
+
u8 *rssi_data = (u8 *)dev->mt76.eeprom.data;
int idx, ret;
u8 bw = MT_BW_20;
bool failed = false;
- ieee80211_stop_queues(hw);
- cancel_delayed_work_sync(&dev->mphy.mac_work);
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
- mutex_lock(&dev->mt76.mutex);
- set_bit(MT76_RESET, &dev->mphy.state);
-
mt7603_beacon_set_timer(dev, -1, 0);
- mt76_set_channel(&dev->mphy);
mt7603_mac_stop(dev);
if (def->width == NL80211_CHAN_WIDTH_40)
bw = MT_BW_40;
- dev->mphy.chandef = *def;
mt76_rmw_field(dev, MT_AGG_BWCR, MT_AGG_BWCR_BW, bw);
ret = mt7603_mcu_set_channel(dev);
if (ret) {
@@ -180,10 +174,6 @@ mt7603_set_channel(struct ieee80211_hw *hw, struct cfg80211_chan_def *def)
mt7603_mac_set_timing(dev);
mt7603_mac_start(dev);
- clear_bit(MT76_RESET, &dev->mphy.state);
-
- mt76_txq_schedule_all(&dev->mphy);
-
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
msecs_to_jiffies(MT7603_WATCHDOG_TIME));
@@ -199,17 +189,14 @@ mt7603_set_channel(struct ieee80211_hw *hw, struct cfg80211_chan_def *def)
mt7603_init_edcca(dev);
out:
- if (!(mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL))
+ if (!mphy->offchannel)
mt7603_beacon_set_timer(dev, -1, dev->mt76.beacon_int);
- mutex_unlock(&dev->mt76.mutex);
tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
if (failed)
mt7603_mac_work(&dev->mphy.mac_work.work);
- ieee80211_wake_queues(hw);
-
return ret;
}
@@ -227,7 +214,7 @@ static int mt7603_set_sar_specs(struct ieee80211_hw *hw,
if (err)
return err;
- return mt7603_set_channel(hw, &mphy->chandef);
+ return mt76_update_channel(mphy);
}
static int
@@ -238,7 +225,7 @@ mt7603_config(struct ieee80211_hw *hw, u32 changed)
if (changed & (IEEE80211_CONF_CHANGE_CHANNEL |
IEEE80211_CONF_CHANGE_POWER))
- ret = mt7603_set_channel(hw, &hw->conf.chandef);
+ ret = mt76_update_channel(&dev->mphy);
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
mutex_lock(&dev->mt76.mutex);
@@ -368,13 +355,19 @@ mt7603_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
return ret;
}
-void
-mt7603_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+int
+mt7603_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev)
{
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
- mt7603_wtbl_update_cap(dev, sta);
+ if (ev == MT76_STA_EVENT_ASSOC) {
+ mutex_lock(&dev->mt76.mutex);
+ mt7603_wtbl_update_cap(dev, sta);
+ mutex_unlock(&dev->mt76.mutex);
+ }
+
+ return 0;
}
void
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
index 9e58df7042ad..55a034ccbacd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
@@ -213,6 +213,7 @@ void mt7603_mac_sta_poll(struct mt7603_dev *dev);
void mt7603_pse_client_reset(struct mt7603_dev *dev);
+int mt7603_set_channel(struct mt76_phy *mphy);
int mt7603_mcu_set_channel(struct mt7603_dev *dev);
int mt7603_mcu_set_eeprom(struct mt7603_dev *dev);
void mt7603_mcu_exit(struct mt7603_dev *dev);
@@ -245,8 +246,8 @@ void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
void mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
int mt7603_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
-void mt7603_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
+int mt7603_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev);
void mt7603_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
index f7722f67db57..66ba3be27343 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
@@ -56,6 +56,9 @@ int mt7615_thermal_init(struct mt7615_dev *dev)
name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7615_%s",
wiphy_name(wiphy));
+ if (!name)
+ return -ENOMEM;
+
hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, dev,
mt7615_hwmon_groups);
return PTR_ERR_OR_ZERO(hwmon);
@@ -319,7 +322,7 @@ void mt7615_init_work(struct mt7615_dev *dev)
mt7615_mcu_set_eeprom(dev);
mt7615_mac_init(dev);
mt7615_phy_init(dev);
- mt7615_mcu_del_wtbl_all(dev);
+ mt76_connac_mcu_del_wtbl_all(&dev->mt76);
mt7615_check_offload_capability(dev);
}
EXPORT_SYMBOL_GPL(mt7615_init_work);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index 50e262c1622f..376975388007 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -282,19 +282,14 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
mt76_wcid_cleanup(&dev->mt76, &mvif->sta.wcid);
}
-int mt7615_set_channel(struct mt7615_phy *phy)
+int mt7615_set_channel(struct mt76_phy *mphy)
{
+ struct mt7615_phy *phy = mphy->priv;
struct mt7615_dev *dev = phy->dev;
bool ext_phy = phy != &dev->phy;
int ret;
- cancel_delayed_work_sync(&phy->mt76->mac_work);
-
- mt7615_mutex_acquire(dev);
-
- set_bit(MT76_RESET, &phy->mt76->state);
-
- mt76_set_channel(phy->mt76);
+ mt76_connac_pm_wake(mphy, &dev->pm);
if (is_mt7615(&dev->mt76) && dev->flash_eeprom) {
ret = mt7615_mcu_apply_rx_dcoc(phy);
@@ -325,11 +320,8 @@ int mt7615_set_channel(struct mt7615_phy *phy)
phy->chfreq = mt76_rr(dev, MT_CHFREQ(ext_phy));
out:
- clear_bit(MT76_RESET, &phy->mt76->state);
+ mt76_connac_power_save_sched(mphy, &dev->pm);
- mt7615_mutex_release(dev);
-
- mt76_worker_schedule(&dev->mt76.tx_worker);
if (!mt76_testmode_enabled(phy->mt76)) {
unsigned long timeout = mt7615_get_macwork_timeout(dev);
@@ -339,6 +331,7 @@ out:
return ret;
}
+EXPORT_SYMBOL_GPL(mt7615_set_channel);
static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
@@ -425,11 +418,7 @@ static int mt7615_set_sar_specs(struct ieee80211_hw *hw,
if (mt7615_firmware_offload(phy->dev))
return mt76_connac_mcu_set_rate_txpower(phy->mt76);
- ieee80211_stop_queues(hw);
- err = mt7615_set_channel(phy);
- ieee80211_wake_queues(hw);
-
- return err;
+ return mt76_update_channel(phy->mt76);
}
static int mt7615_config(struct ieee80211_hw *hw, u32 changed)
@@ -448,9 +437,7 @@ static int mt7615_config(struct ieee80211_hw *hw, u32 changed)
mt7615_mutex_release(dev);
}
#endif
- ieee80211_stop_queues(hw);
- ret = mt7615_set_channel(phy);
- ieee80211_wake_queues(hw);
+ ret = mt76_update_channel(phy->mt76);
}
mt7615_mutex_acquire(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index d50d967828be..96e34277fece 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -394,7 +394,7 @@ mt7615_mcu_rx_radar_detected(struct mt7615_dev *dev, struct sk_buff *skb)
if (mt76_phy_dfs_state(mphy) < MT_DFS_STATE_CAC)
return;
- ieee80211_radar_detected(mphy->hw);
+ ieee80211_radar_detected(mphy->hw, NULL);
dev->hw_pattern++;
}
@@ -847,6 +847,7 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
struct wtbl_req_hdr *wtbl_hdr;
struct mt7615_sta *msta;
bool new_entry = true;
+ int conn_state;
int cmd, err;
msta = sta ? (struct mt7615_sta *)sta->drv_priv : &mvif->sta;
@@ -863,8 +864,9 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
else
mvif->sta_added = true;
}
+ conn_state = enable ? CONN_STATE_PORT_SECURE : CONN_STATE_DISCONNECT;
mt76_connac_mcu_sta_basic_tlv(&dev->mt76, sskb, vif, link_sta,
- enable, new_entry);
+ conn_state, new_entry);
if (enable && sta)
mt76_connac_mcu_sta_tlv(phy->mt76, sskb, sta, vif, 0,
MT76_STA_INFO_STATE_ASSOC);
@@ -1878,16 +1880,6 @@ out:
sizeof(req), true);
}
-int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev)
-{
- struct wtbl_req_hdr req = {
- .operation = WTBL_RESET_ALL,
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(WTBL_UPDATE),
- &req, sizeof(req), true);
-}
-
int mt7615_mcu_set_fcc5_lpn(struct mt7615_dev *dev, int val)
{
struct {
@@ -2151,7 +2143,7 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd)
if (cmd == MCU_EXT_CMD(SET_RX_PATH) ||
phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR)
req.switch_reason = CH_SWITCH_NORMAL;
- else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ else if (phy->mt76->offchannel)
req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef,
NL80211_IFTYPE_AP))
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
index 87a956ea3ad7..dbb2c82407df 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
@@ -182,6 +182,7 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
.sta_add = mt7615_mac_sta_add,
.sta_remove = mt7615_mac_sta_remove,
.update_survey = mt7615_update_channel,
+ .set_channel = mt7615_set_channel,
};
struct mt76_bus_ops *bus_ops;
struct ieee80211_ops *ops;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
index a20322aae967..530da48ce3ea 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
@@ -399,7 +399,6 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
struct ieee80211_tx_rate *rates);
void mt7615_pm_wake_work(struct work_struct *work);
void mt7615_pm_power_save_work(struct work_struct *work);
-int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev);
int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd);
int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue,
const struct ieee80211_tx_queue_params *params);
@@ -457,7 +456,7 @@ void mt7615_roc_work(struct work_struct *work);
void mt7615_roc_timer(struct timer_list *timer);
void mt7615_init_txpower(struct mt7615_dev *dev,
struct ieee80211_supported_band *sband);
-int mt7615_set_channel(struct mt7615_phy *phy);
+int mt7615_set_channel(struct mt76_phy *mphy);
void mt7615_init_work(struct mt7615_dev *dev);
int mt7615_mcu_restart(struct mt76_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
index 9692890ba51b..aebfc4576aa4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
@@ -87,6 +87,7 @@ static int mt7663s_probe(struct sdio_func *func,
.sta_add = mt7615_mac_sta_add,
.sta_remove = mt7615_mac_sta_remove,
.update_survey = mt7615_update_channel,
+ .set_channel = mt7615_set_channel,
};
static const struct mt76_bus_ops mt7663s_ops = {
.rr = mt76s_rr,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c
index a3d1cfa729ed..03f5af84424b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c
@@ -141,7 +141,7 @@ mt7615_tm_init(struct mt7615_phy *phy)
mt7615_mcu_set_sku_en(phy, phy->mt76->test.state == MT76_TM_STATE_OFF);
mutex_unlock(&dev->mt76.mutex);
- mt7615_set_channel(phy);
+ mt76_update_channel(phy->mt76);
mt7615_ops.configure_filter(phy->mt76->hw, 0, &total_flags, 0);
mutex_lock(&dev->mt76.mutex);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
index 9335ca0776fe..5020af52c68c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
@@ -123,6 +123,7 @@ static int mt7663u_probe(struct usb_interface *usb_intf,
.sta_add = mt7615_mac_sta_add,
.sta_remove = mt7615_mac_sta_remove,
.update_survey = mt7615_update_channel,
+ .set_channel = mt7615_set_channel,
};
static struct mt76_bus_ops bus_ops = {
.rr = mt7663u_rr,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
index 5f132115ebfc..eb4765365b8c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
@@ -355,4 +355,11 @@ enum tx_port_idx {
MT_TX_PORT_IDX_MCU
};
+enum tx_frag_idx {
+ MT_TX_FRAG_NONE,
+ MT_TX_FRAG_FIRST,
+ MT_TX_FRAG_MID,
+ MT_TX_FRAG_LAST
+};
+
#endif /* __MT76_CONNAC2_MAC_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
index 353e66069840..db0c29e65185 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
@@ -28,8 +28,6 @@ enum {
#define MT_RXD0_MESH BIT(18)
#define MT_RXD0_MHCP BIT(19)
#define MT_RXD0_NORMAL_ETH_TYPE_OFS GENMASK(22, 16)
-#define MT_RXD0_NORMAL_IP_SUM BIT(23)
-#define MT_RXD0_NORMAL_UDP_TCP_SUM BIT(24)
#define MT_RXD0_SW_PKT_TYPE_MASK GENMASK(31, 16)
#define MT_RXD0_SW_PKT_TYPE_MAP 0x380F
@@ -80,6 +78,8 @@ enum {
#define MT_RXD3_NORMAL_BEACON_UC BIT(21)
#define MT_RXD3_NORMAL_CO_ANT BIT(22)
#define MT_RXD3_NORMAL_FCS_ERR BIT(24)
+#define MT_RXD3_NORMAL_IP_SUM BIT(26)
+#define MT_RXD3_NORMAL_UDP_TCP_SUM BIT(27)
#define MT_RXD3_NORMAL_VLAN2ETH BIT(31)
/* RXD DW4 */
@@ -197,6 +197,13 @@ enum tx_mgnt_type {
MT_TX_ADDBA,
};
+enum tx_frag_idx {
+ MT_TX_FRAG_NONE,
+ MT_TX_FRAG_FIRST,
+ MT_TX_FRAG_MID,
+ MT_TX_FRAG_LAST
+};
+
#define MT_CT_INFO_APPLY_TXD BIT(0)
#define MT_CT_INFO_COPY_HOST_TXD_ALL BIT(1)
#define MT_CT_INFO_MGMT_FRAME BIT(2)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
index b841bf628d02..a3db65254e37 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
@@ -391,6 +391,7 @@ mt76_connac2_mac_write_txwi_80211(struct mt76_dev *dev, __le32 *txwi,
bool multicast = is_multicast_ether_addr(hdr->addr1);
u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
__le16 fc = hdr->frame_control;
+ __le16 sc = hdr->seq_ctrl;
u8 fc_type, fc_stype;
u32 val;
@@ -432,6 +433,13 @@ mt76_connac2_mac_write_txwi_80211(struct mt76_dev *dev, __le32 *txwi,
info->flags & IEEE80211_TX_CTL_USE_MINRATE)
val |= MT_TXD2_FIX_RATE;
+ if (ieee80211_has_morefrags(fc) && ieee80211_is_first_frag(sc))
+ val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_FIRST);
+ else if (ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc))
+ val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_MID);
+ else if (!ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc))
+ val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_LAST);
+
txwi[2] |= cpu_to_le32(val);
if (ieee80211_is_beacon(fc)) {
@@ -440,7 +448,7 @@ mt76_connac2_mac_write_txwi_80211(struct mt76_dev *dev, __le32 *txwi,
}
if (info->flags & IEEE80211_TX_CTL_INJECTED) {
- u16 seqno = le16_to_cpu(hdr->seq_ctrl);
+ u16 seqno = le16_to_cpu(sc);
if (ieee80211_is_back_req(hdr->frame_control)) {
struct ieee80211_bar *bar;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index 4dce03ddbfa4..864246f94088 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -283,7 +283,7 @@ __mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
};
struct sk_buff *skb;
- if (wcid && !wcid->sta)
+ if (wcid && !wcid->sta && !wcid->sta_disabled)
hdr.muar_idx = 0xe;
mt76_connac_mcu_get_wlan_idx(dev, wcid, &hdr.wlan_idx_lo,
@@ -371,7 +371,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_bss_omac_tlv);
void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_vif *vif,
struct ieee80211_link_sta *link_sta,
- bool enable, bool newly)
+ int conn_state, bool newly)
{
struct sta_rec_basic *basic;
struct tlv *tlv;
@@ -382,13 +382,9 @@ void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
basic = (struct sta_rec_basic *)tlv;
basic->extra_info = cpu_to_le16(EXTRA_INFO_VER);
- if (enable) {
- if (newly)
- basic->extra_info |= cpu_to_le16(EXTRA_INFO_NEW);
- basic->conn_state = CONN_STATE_PORT_SECURE;
- } else {
- basic->conn_state = CONN_STATE_DISCONNECT;
- }
+ if (newly && conn_state != CONN_STATE_DISCONNECT)
+ basic->extra_info |= cpu_to_le16(EXTRA_INFO_NEW);
+ basic->conn_state = conn_state;
if (!link_sta) {
basic->conn_type = cpu_to_le32(CONNECTION_INFRA_BC);
@@ -1051,15 +1047,18 @@ int mt76_connac_mcu_sta_cmd(struct mt76_phy *phy,
struct wtbl_req_hdr *wtbl_hdr;
struct tlv *sta_wtbl;
struct sk_buff *skb;
+ int conn_state;
skb = mt76_connac_mcu_alloc_sta_req(dev, mvif, info->wcid);
if (IS_ERR(skb))
return PTR_ERR(skb);
+ conn_state = info->enable ? CONN_STATE_PORT_SECURE :
+ CONN_STATE_DISCONNECT;
link_sta = info->sta ? &info->sta->deflink : NULL;
if (info->sta || !info->offload_fw)
mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif,
- link_sta, info->enable,
+ link_sta, conn_state,
info->newly);
if (info->sta && info->enable)
mt76_connac_mcu_sta_tlv(phy, skb, info->sta,
@@ -2850,6 +2849,17 @@ int mt76_connac_mcu_restart(struct mt76_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_restart);
+int mt76_connac_mcu_del_wtbl_all(struct mt76_dev *dev)
+{
+ struct wtbl_req_hdr req = {
+ .operation = WTBL_RESET_ALL,
+ };
+
+ return mt76_mcu_send_msg(dev, MCU_EXT_CMD(WTBL_UPDATE),
+ &req, sizeof(req), true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_del_wtbl_all);
+
int mt76_connac_mcu_rdd_cmd(struct mt76_dev *dev, int cmd, u8 index,
u8 rx_sel, u8 val)
{
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
index 4242d436de26..1b0e80dfc346 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
@@ -115,21 +115,26 @@ struct mt76_connac2_mcu_uni_txd {
} __packed __aligned(4);
struct mt76_connac2_mcu_rxd {
- __le32 rxd[6];
+ /* New members MUST be added within the struct_group() macro below. */
+ struct_group_tagged(mt76_connac2_mcu_rxd_hdr, hdr,
+ __le32 rxd[6];
- __le16 len;
- __le16 pkt_type_id;
+ __le16 len;
+ __le16 pkt_type_id;
- u8 eid;
- u8 seq;
- u8 option;
- u8 rsv;
- u8 ext_eid;
- u8 rsv1[2];
- u8 s2d_index;
+ u8 eid;
+ u8 seq;
+ u8 option;
+ u8 rsv;
+ u8 ext_eid;
+ u8 rsv1[2];
+ u8 s2d_index;
+ );
u8 tlv[];
};
+static_assert(offsetof(struct mt76_connac2_mcu_rxd, tlv) == sizeof(struct mt76_connac2_mcu_rxd_hdr),
+ "struct member likely outside of struct_group_tagged()");
struct mt76_connac2_patch_hdr {
char build_date[16];
@@ -1898,7 +1903,7 @@ int mt76_connac_mcu_set_vif_ps(struct mt76_dev *dev, struct ieee80211_vif *vif);
void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_vif *vif,
struct ieee80211_link_sta *link_sta,
- bool enable, bool newly);
+ int state, bool newly);
void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta, void *sta_wtbl,
@@ -2032,6 +2037,7 @@ void mt76_connac_mcu_wtbl_smps_tlv(struct sk_buff *skb,
void *sta_wtbl, void *wtbl_tlv);
int mt76_connac_mcu_set_pm(struct mt76_dev *dev, int band, int enter);
int mt76_connac_mcu_restart(struct mt76_dev *dev);
+int mt76_connac_mcu_del_wtbl_all(struct mt76_dev *dev);
int mt76_connac_mcu_rdd_cmd(struct mt76_dev *dev, int cmd, u8 index,
u8 rx_sel, u8 val);
int mt76_connac_mcu_sta_wed_update(struct mt76_dev *dev, struct sk_buff *skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
index 07380cce8755..4aa2dcedc874 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
@@ -8,16 +8,15 @@
#include <linux/etherdevice.h>
#include "mt76x0.h"
-static void
-mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
+int mt76x0_set_channel(struct mt76_phy *mphy)
{
- cancel_delayed_work_sync(&dev->cal_work);
+ struct mt76x02_dev *dev = container_of(mphy->dev, struct mt76x02_dev, mt76);
+
mt76x02_pre_tbtt_enable(dev, false);
if (mt76_is_mmio(&dev->mt76))
tasklet_disable(&dev->dfs_pd.dfs_tasklet);
- mt76_set_channel(&dev->mphy);
- mt76x0_phy_set_channel(dev, chandef);
+ mt76x0_phy_set_channel(dev, &mphy->chandef);
mt76x02_mac_cc_reset(dev);
mt76x02_edcca_init(dev);
@@ -28,8 +27,9 @@ mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
}
mt76x02_pre_tbtt_enable(dev, true);
- mt76_txq_schedule_all(&dev->mphy);
+ return 0;
}
+EXPORT_SYMBOL_GPL(mt76x0_set_channel);
int mt76x0_set_sar_specs(struct ieee80211_hw *hw,
const struct cfg80211_sar_specs *sar)
@@ -61,13 +61,10 @@ int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
{
struct mt76x02_dev *dev = hw->priv;
- mutex_lock(&dev->mt76.mutex);
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL)
+ mt76_update_channel(&dev->mphy);
- if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ieee80211_stop_queues(hw);
- mt76x0_set_channel(dev, &hw->conf.chandef);
- ieee80211_wake_queues(hw);
- }
+ mutex_lock(&dev->mt76.mutex);
if (changed & IEEE80211_CONF_CHANGE_POWER) {
struct mt76_phy *mphy = &dev->mphy;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
index 99dcb8feb9f7..50f755344968 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
@@ -49,6 +49,7 @@ void mt76x0_chip_onoff(struct mt76x02_dev *dev, bool enable, bool reset);
void mt76x0_mac_stop(struct mt76x02_dev *dev);
int mt76x0_config(struct ieee80211_hw *hw, u32 changed);
+int mt76x0_set_channel(struct mt76_phy *mphy);
int mt76x0_set_sar_specs(struct ieee80211_hw *hw,
const struct cfg80211_sar_specs *sar);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index 2ecee7c5c80d..1eb955f3ca13 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -159,6 +159,7 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
MT_DRV_SW_RX_AIRTIME,
.survey_flags = SURVEY_INFO_TIME_TX,
.update_survey = mt76x02_update_channel,
+ .set_channel = mt76x0_set_channel,
.tx_prepare_skb = mt76x02_tx_prepare_skb,
.tx_complete_skb = mt76x02_tx_complete_skb,
.rx_skb = mt76x02_queue_rx_skb,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index 390f502e97f0..b031c500b741 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -217,6 +217,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
.drv_flags = MT_DRV_SW_RX_AIRTIME,
.survey_flags = SURVEY_INFO_TIME_TX,
.update_survey = mt76x02_update_channel,
+ .set_channel = mt76x0_set_channel,
.tx_prepare_skb = mt76x02u_tx_prepare_skb,
.tx_complete_skb = mt76x02u_tx_complete_skb,
.tx_status_data = mt76x02_tx_status_data,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
index 024a5c0a5a57..7a07636d09c6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
@@ -630,7 +630,7 @@ static void mt76x02_dfs_tasklet(struct tasklet_struct *t)
radar_detected = mt76x02_dfs_check_detection(dev);
if (radar_detected) {
/* sw detector rx radar pattern */
- ieee80211_radar_detected(dev->mt76.hw);
+ ieee80211_radar_detected(dev->mt76.hw, NULL);
mt76x02_dfs_detector_reset(dev);
return;
@@ -658,7 +658,7 @@ static void mt76x02_dfs_tasklet(struct tasklet_struct *t)
/* hw detector rx radar pattern */
dfs_pd->stats[i].hw_pattern++;
- ieee80211_radar_detected(dev->mt76.hw);
+ ieee80211_radar_detected(dev->mt76.hw, NULL);
mt76x02_dfs_detector_reset(dev);
return;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index 35b7ebc2c9c6..4a49a3036a46 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -22,7 +22,7 @@ static void mt76x02_pre_tbtt_tasklet(struct tasklet_struct *t)
struct sk_buff *skb;
int i;
- if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ if (dev->mphy.offchannel)
return;
__skb_queue_head_init(&data.q);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
index 29b9a15f8dbe..0e1ede9314d8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
@@ -188,10 +188,7 @@ static void mt76x02u_pre_tbtt_work(struct work_struct *work)
struct sk_buff *skb;
int nbeacons;
- if (!dev->mt76.beacon_mask)
- return;
-
- if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ if (!dev->mt76.beacon_mask || dev->mphy.offchannel)
return;
__skb_queue_head_init(&data.q);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
index be1217329a77..f051721bb00e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
@@ -47,6 +47,8 @@ void mt76x2_phy_power_on(struct mt76x02_dev *dev);
void mt76x2_stop_hardware(struct mt76x02_dev *dev);
int mt76x2_eeprom_init(struct mt76x02_dev *dev);
int mt76x2_apply_calibration_data(struct mt76x02_dev *dev, int channel);
+int mt76x2e_set_channel(struct mt76_phy *phy);
+int mt76x2u_set_channel(struct mt76_phy *phy);
void mt76x2_phy_set_antenna(struct mt76x02_dev *dev);
int mt76x2_phy_start(struct mt76x02_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
index 30959746e924..67c9d1caa0bd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
@@ -25,6 +25,7 @@ mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
MT_DRV_SW_RX_AIRTIME,
.survey_flags = SURVEY_INFO_TIME_TX,
.update_survey = mt76x02_update_channel,
+ .set_channel = mt76x2e_set_channel,
.tx_prepare_skb = mt76x02_tx_prepare_skb,
.tx_complete_skb = mt76x02_tx_complete_skb,
.rx_skb = mt76x02_queue_rx_skb,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
index 6accea551319..eb70130d2711 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
@@ -32,33 +32,25 @@ mt76x2_stop(struct ieee80211_hw *hw, bool suspend)
mt76x2_stop_hardware(dev);
}
-static void
-mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
+int mt76x2e_set_channel(struct mt76_phy *phy)
{
- cancel_delayed_work_sync(&dev->cal_work);
+ struct mt76x02_dev *dev = container_of(phy->dev, struct mt76x02_dev, mt76);
+
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
tasklet_disable(&dev->dfs_pd.dfs_tasklet);
- mutex_lock(&dev->mt76.mutex);
- set_bit(MT76_RESET, &dev->mphy.state);
-
- mt76_set_channel(&dev->mphy);
-
mt76x2_mac_stop(dev, true);
- mt76x2_phy_set_channel(dev, chandef);
+ mt76x2_phy_set_channel(dev, &phy->chandef);
mt76x02_mac_cc_reset(dev);
mt76x02_dfs_init_params(dev);
mt76x2_mac_resume(dev);
- clear_bit(MT76_RESET, &dev->mphy.state);
- mutex_unlock(&dev->mt76.mutex);
-
tasklet_enable(&dev->dfs_pd.dfs_tasklet);
tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
- mt76_txq_schedule_all(&dev->mphy);
+ return 0;
}
static int
@@ -95,11 +87,8 @@ mt76x2_config(struct ieee80211_hw *hw, u32 changed)
mutex_unlock(&dev->mt76.mutex);
- if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ieee80211_stop_queues(hw);
- mt76x2_set_channel(dev, &hw->conf.chandef);
- ieee80211_wake_queues(hw);
- }
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL)
+ mt76_update_channel(&dev->mphy);
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
index e92bb871f231..e832ad53e239 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
@@ -32,6 +32,7 @@ static int mt76x2u_probe(struct usb_interface *intf,
.drv_flags = MT_DRV_SW_RX_AIRTIME,
.survey_flags = SURVEY_INFO_TIME_TX,
.update_survey = mt76x02_update_channel,
+ .set_channel = mt76x2u_set_channel,
.tx_prepare_skb = mt76x02u_tx_prepare_skb,
.tx_complete_skb = mt76x02u_tx_complete_skb,
.tx_status_data = mt76x02_tx_status_data,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
index ba0241c36672..83e7061b10e2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
@@ -31,32 +31,20 @@ static void mt76x2u_stop(struct ieee80211_hw *hw, bool suspend)
mt76x2u_stop_hw(dev);
}
-static int
-mt76x2u_set_channel(struct mt76x02_dev *dev,
- struct cfg80211_chan_def *chandef)
+int mt76x2u_set_channel(struct mt76_phy *mphy)
{
+ struct mt76x02_dev *dev = container_of(mphy->dev, struct mt76x02_dev, mt76);
int err;
- cancel_delayed_work_sync(&dev->cal_work);
mt76x02_pre_tbtt_enable(dev, false);
-
- mutex_lock(&dev->mt76.mutex);
- set_bit(MT76_RESET, &dev->mphy.state);
-
- mt76_set_channel(&dev->mphy);
-
mt76x2_mac_stop(dev, false);
- err = mt76x2u_phy_set_channel(dev, chandef);
+ err = mt76x2u_phy_set_channel(dev, &mphy->chandef);
mt76x02_mac_cc_reset(dev);
mt76x2_mac_resume(dev);
- clear_bit(MT76_RESET, &dev->mphy.state);
- mutex_unlock(&dev->mt76.mutex);
-
mt76x02_pre_tbtt_enable(dev, true);
- mt76_txq_schedule_all(&dev->mphy);
return err;
}
@@ -93,11 +81,8 @@ mt76x2u_config(struct ieee80211_hw *hw, u32 changed)
mutex_unlock(&dev->mt76.mutex);
- if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ieee80211_stop_queues(hw);
- err = mt76x2u_set_channel(dev, &hw->conf.chandef);
- ieee80211_wake_queues(hw);
- }
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL)
+ mt76_update_channel(&dev->mphy);
return err;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
index a978f434dc5e..6bef96e3d2a3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -194,6 +194,8 @@ static int mt7915_thermal_init(struct mt7915_phy *phy)
name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7915_%s",
wiphy_name(wiphy));
+ if (!name)
+ return -ENOMEM;
cdev = thermal_cooling_device_register(name, phy, &mt7915_thermal_ops);
if (!IS_ERR(cdev)) {
@@ -398,6 +400,7 @@ mt7915_init_wiphy(struct mt7915_phy *phy)
ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD);
ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
ieee80211_hw_set(hw, WANT_MONITOR_VIF);
+ ieee80211_hw_set(hw, SUPPORTS_TX_FRAG);
hw->max_tx_fragments = 4;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index 8008ce3fa6c7..cf77ce0c8759 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -1448,6 +1448,7 @@ mt7915_mac_full_reset(struct mt7915_dev *dev)
dev->recovery.hw_full_reset = true;
+ set_bit(MT76_MCU_RESET, &dev->mphy.state);
wake_up(&dev->mt76.mcu.wait);
ieee80211_stop_queues(mt76_hw(dev));
if (ext_phy)
@@ -1462,26 +1463,27 @@ mt7915_mac_full_reset(struct mt7915_dev *dev)
if (!mt7915_mac_restart(dev))
break;
}
- mutex_unlock(&dev->mt76.mutex);
if (i == 10)
dev_err(dev->mt76.dev, "chip full reset failed\n");
- ieee80211_restart_hw(mt76_hw(dev));
- if (ext_phy)
- ieee80211_restart_hw(ext_phy->hw);
+ spin_lock_bh(&dev->mt76.sta_poll_lock);
+ while (!list_empty(&dev->mt76.sta_poll_list))
+ list_del_init(dev->mt76.sta_poll_list.next);
+ spin_unlock_bh(&dev->mt76.sta_poll_lock);
- ieee80211_wake_queues(mt76_hw(dev));
- if (ext_phy)
- ieee80211_wake_queues(ext_phy->hw);
+ memset(dev->mt76.wcid_mask, 0, sizeof(dev->mt76.wcid_mask));
+ dev->mt76.vif_mask = 0;
+ i = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA);
+ dev->mt76.global_wcid.idx = i;
dev->recovery.hw_full_reset = false;
- ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
- MT7915_WATCHDOG_TIME);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ ieee80211_restart_hw(mt76_hw(dev));
if (ext_phy)
- ieee80211_queue_delayed_work(ext_phy->hw,
- &ext_phy->mac_work,
- MT7915_WATCHDOG_TIME);
+ ieee80211_restart_hw(ext_phy->hw);
}
/* system error recovery */
@@ -1537,12 +1539,14 @@ void mt7915_mac_reset_work(struct work_struct *work)
set_bit(MT76_RESET, &phy2->mt76->state);
cancel_delayed_work_sync(&phy2->mt76->mac_work);
}
+
+ mutex_lock(&dev->mt76.mutex);
+
mt76_worker_disable(&dev->mt76.tx_worker);
mt76_for_each_q_rx(&dev->mt76, i)
napi_disable(&dev->mt76.napi[i]);
napi_disable(&dev->mt76.tx_napi);
- mutex_lock(&dev->mt76.mutex);
if (mtk_wed_device_active(&dev->mt76.mmio.wed))
mtk_wed_device_stop(&dev->mt76.mmio.wed);
@@ -1692,6 +1696,11 @@ void mt7915_reset(struct mt7915_dev *dev)
return;
}
+ if ((READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA)) {
+ set_bit(MT76_MCU_RESET, &dev->mphy.state);
+ wake_up(&dev->mt76.mcu.wait);
+ }
+
queue_work(dev->mt76.wq, &dev->reset_work);
wake_up(&dev->reset_wait);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
index 049223df9beb..d75e8dea1fbd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
@@ -245,7 +245,9 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
dev->mt76.vif_mask |= BIT_ULL(mvif->mt76.idx);
phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx);
- idx = MT7915_WTBL_RESERVED - mvif->mt76.idx;
+ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, mt7915_wtbl_size(dev));
+ if (idx < 0)
+ return -ENOSPC;
INIT_LIST_HEAD(&mvif->sta.rc_list);
INIT_LIST_HEAD(&mvif->sta.wcid.poll_list);
@@ -272,7 +274,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
memset(&mvif->cap, -1, sizeof(mvif->cap));
mt7915_mcu_add_bss_info(phy, vif, true);
- mt7915_mcu_add_sta(dev, vif, NULL, true);
+ mt7915_mcu_add_sta(dev, vif, NULL, CONN_STATE_PORT_SECURE, true);
rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
out:
@@ -291,7 +293,8 @@ static void mt7915_remove_interface(struct ieee80211_hw *hw,
int idx = msta->wcid.idx;
mt7915_mcu_add_bss_info(phy, vif, false);
- mt7915_mcu_add_sta(dev, vif, NULL, false);
+ mt7915_mcu_add_sta(dev, vif, NULL, CONN_STATE_DISCONNECT, false);
+ mt76_wcid_mask_clear(dev->mt76.wcid_mask, mvif->sta.wcid.idx);
mutex_lock(&dev->mt76.mutex);
mt76_testmode_reset(phy->mt76, true);
@@ -317,18 +320,12 @@ static void mt7915_remove_interface(struct ieee80211_hw *hw,
mt76_wcid_cleanup(&dev->mt76, &msta->wcid);
}
-int mt7915_set_channel(struct mt7915_phy *phy)
+int mt7915_set_channel(struct mt76_phy *mphy)
{
+ struct mt7915_phy *phy = mphy->priv;
struct mt7915_dev *dev = phy->dev;
int ret;
- cancel_delayed_work_sync(&phy->mt76->mac_work);
-
- mutex_lock(&dev->mt76.mutex);
- set_bit(MT76_RESET, &phy->mt76->state);
-
- mt76_set_channel(phy->mt76);
-
if (dev->cal) {
ret = mt7915_mcu_apply_tx_dpd(phy);
if (ret)
@@ -347,11 +344,6 @@ int mt7915_set_channel(struct mt7915_phy *phy)
phy->noise = 0;
out:
- clear_bit(MT76_RESET, &phy->mt76->state);
- mutex_unlock(&dev->mt76.mutex);
-
- mt76_txq_schedule_all(phy->mt76);
-
if (!mt76_testmode_enabled(phy->mt76))
ieee80211_queue_delayed_work(phy->mt76->hw,
&phy->mt76->mac_work,
@@ -374,6 +366,9 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
int idx = key->keyidx;
int err = 0;
+ if (sta && !wcid->sta)
+ return -EOPNOTSUPP;
+
/* The hardware does not support per-STA RX GTK, fallback
* to software mode for these.
*/
@@ -464,11 +459,9 @@ static int mt7915_config(struct ieee80211_hw *hw, u32 changed)
mutex_unlock(&dev->mt76.mutex);
}
#endif
- ieee80211_stop_queues(hw);
- ret = mt7915_set_channel(phy);
+ ret = mt76_update_channel(phy->mt76);
if (ret)
return ret;
- ieee80211_wake_queues(hw);
}
if (changed & (IEEE80211_CONF_CHANGE_POWER |
@@ -564,8 +557,7 @@ static void mt7915_configure_filter(struct ieee80211_hw *hw,
MT76_FILTER(CONTROL, MT_WF_RFCR_DROP_CTS |
MT_WF_RFCR_DROP_RTS |
- MT_WF_RFCR_DROP_CTL_RSV |
- MT_WF_RFCR_DROP_NDPA);
+ MT_WF_RFCR_DROP_CTL_RSV);
*total_flags = flags;
rxfilter = phy->rxfilter;
@@ -633,7 +625,7 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
if (set_bss_info == 1)
mt7915_mcu_add_bss_info(phy, vif, true);
if (set_sta == 1)
- mt7915_mcu_add_sta(dev, vif, NULL, true);
+ mt7915_mcu_add_sta(dev, vif, NULL, CONN_STATE_PORT_SECURE, false);
if (changed & BSS_CHANGED_ERP_CTS_PROT)
mt7915_mac_enable_rtscts(dev, vif, info->use_cts_prot);
@@ -668,7 +660,7 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
if (set_bss_info == 0)
mt7915_mcu_add_bss_info(phy, vif, false);
if (set_sta == 0)
- mt7915_mcu_add_sta(dev, vif, NULL, false);
+ mt7915_mcu_add_sta(dev, vif, NULL, CONN_STATE_DISCONNECT, false);
mutex_unlock(&dev->mt76.mutex);
}
@@ -706,7 +698,7 @@ mt7915_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
err = mt7915_mcu_add_bss_info(phy, vif, true);
if (err)
goto out;
- err = mt7915_mcu_add_sta(dev, vif, NULL, true);
+ err = mt7915_mcu_add_sta(dev, vif, NULL, CONN_STATE_PORT_SECURE, false);
out:
mutex_unlock(&dev->mt76.mutex);
@@ -720,7 +712,7 @@ mt7915_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct mt7915_dev *dev = mt7915_hw_dev(hw);
mutex_lock(&dev->mt76.mutex);
- mt7915_mcu_add_sta(dev, vif, NULL, false);
+ mt7915_mcu_add_sta(dev, vif, NULL, CONN_STATE_DISCONNECT, false);
mutex_unlock(&dev->mt76.mutex);
}
@@ -743,8 +735,7 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
bool ext_phy = mvif->phy != &dev->phy;
- int ret, idx;
- u32 addr;
+ int idx;
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA);
if (idx < 0)
@@ -753,25 +744,61 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
INIT_LIST_HEAD(&msta->rc_list);
INIT_LIST_HEAD(&msta->wcid.poll_list);
msta->vif = mvif;
- msta->wcid.sta = 1;
+ msta->wcid.sta_disabled = 1;
msta->wcid.idx = idx;
msta->wcid.phy_idx = ext_phy;
- msta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
msta->jiffies = jiffies;
ewma_avg_signal_init(&msta->avg_ack_signal);
mt7915_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+ mt7915_mcu_add_sta(dev, vif, sta, CONN_STATE_DISCONNECT, true);
- ret = mt7915_mcu_add_sta(dev, vif, sta, true);
- if (ret)
- return ret;
+ return 0;
+}
- addr = mt7915_mac_wtbl_lmac_addr(dev, msta->wcid.idx, 30);
- mt76_rmw_field(dev, addr, GENMASK(7, 0), 0xa0);
+int mt7915_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ int i, ret;
+ u32 addr;
+
+ switch (ev) {
+ case MT76_STA_EVENT_ASSOC:
+ ret = mt7915_mcu_add_sta(dev, vif, sta, CONN_STATE_CONNECT, true);
+ if (ret)
+ return ret;
+
+ addr = mt7915_mac_wtbl_lmac_addr(dev, msta->wcid.idx, 30);
+ mt76_rmw_field(dev, addr, GENMASK(7, 0), 0xa0);
+
+ ret = mt7915_mcu_add_rate_ctrl(dev, vif, sta, false);
+ if (ret)
+ return ret;
+
+ msta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ msta->wcid.sta = 1;
+ msta->wcid.sta_disabled = 0;
+
+ return 0;
+
+ case MT76_STA_EVENT_AUTHORIZE:
+ return mt7915_mcu_add_sta(dev, vif, sta, CONN_STATE_PORT_SECURE, false);
- return mt7915_mcu_add_rate_ctrl(dev, vif, sta, false);
+ case MT76_STA_EVENT_DISASSOC:
+ for (i = 0; i < ARRAY_SIZE(msta->twt.flow); i++)
+ mt7915_mac_twt_teardown_flow(dev, msta, i);
+
+ mt7915_mcu_add_sta(dev, vif, sta, CONN_STATE_DISCONNECT, false);
+ msta->wcid.sta_disabled = 1;
+ msta->wcid.sta = 0;
+ return 0;
+ }
+
+ return 0;
}
void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
@@ -779,16 +806,10 @@ void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
{
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
- int i;
-
- mt7915_mcu_add_sta(dev, vif, sta, false);
mt7915_mac_wtbl_update(dev, msta->wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- for (i = 0; i < ARRAY_SIZE(msta->twt.flow); i++)
- mt7915_mac_twt_teardown_flow(dev, msta, i);
-
spin_lock_bh(&mdev->sta_poll_lock);
if (!list_empty(&msta->wcid.poll_list))
list_del_init(&msta->wcid.poll_list);
@@ -896,22 +917,6 @@ mt7915_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
static int
-mt7915_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NOTEXIST,
- IEEE80211_STA_NONE);
-}
-
-static int
-mt7915_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NONE,
- IEEE80211_STA_NOTEXIST);
-}
-
-static int
mt7915_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
@@ -1089,8 +1094,7 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw,
struct rate_info *txrate = &msta->wcid.rate;
struct rate_info rxrate = {};
- if (is_mt7915(&phy->dev->mt76) &&
- !mt7915_mcu_get_rx_rate(phy, vif, sta, &rxrate)) {
+ if (!mt7915_mcu_get_rx_rate(phy, vif, sta, &rxrate)) {
sinfo->rxrate = rxrate;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
}
@@ -1164,6 +1168,10 @@ static void mt7915_sta_rc_update(struct ieee80211_hw *hw,
{
struct mt7915_phy *phy = mt7915_hw_phy(hw);
struct mt7915_dev *dev = phy->dev;
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+
+ if (!msta->wcid.sta)
+ return;
mt7915_sta_rc_work(&changed, sta);
ieee80211_queue_work(hw, &dev->rc_work);
@@ -1207,6 +1215,9 @@ static void mt7915_sta_set_4addr(struct ieee80211_hw *hw,
else
clear_bit(MT_WCID_FLAG_4ADDR, &msta->wcid.flags);
+ if (!msta->wcid.sta)
+ return;
+
mt76_connac_mcu_wtbl_update_hdr_trans(&dev->mt76, vif, sta);
}
@@ -1223,6 +1234,9 @@ static void mt7915_sta_set_decap_offload(struct ieee80211_hw *hw,
else
clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
+ if (!msta->wcid.sta)
+ return;
+
mt76_connac_mcu_wtbl_update_hdr_trans(&dev->mt76, vif, sta);
}
@@ -1579,6 +1593,12 @@ mt7915_twt_teardown_request(struct ieee80211_hw *hw,
}
static int
+mt7915_set_frag_threshold(struct ieee80211_hw *hw, u32 val)
+{
+ return 0;
+}
+
+static int
mt7915_set_radar_background(struct ieee80211_hw *hw,
struct cfg80211_chan_def *chandef)
{
@@ -1660,6 +1680,17 @@ mt7915_net_fill_forward_path(struct ieee80211_hw *hw,
}
#endif
+static void
+mt7915_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type)
+{
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+
+ ieee80211_wake_queues(hw);
+ ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
+ MT7915_WATCHDOG_TIME);
+}
+
const struct ieee80211_ops mt7915_ops = {
.add_chanctx = ieee80211_emulate_add_chanctx,
.remove_chanctx = ieee80211_emulate_remove_chanctx,
@@ -1676,8 +1707,7 @@ const struct ieee80211_ops mt7915_ops = {
.bss_info_changed = mt7915_bss_info_changed,
.start_ap = mt7915_start_ap,
.stop_ap = mt7915_stop_ap,
- .sta_add = mt7915_sta_add,
- .sta_remove = mt7915_sta_remove,
+ .sta_state = mt76_sta_state,
.sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
.sta_rc_update = mt7915_sta_rc_update,
.set_key = mt7915_set_key,
@@ -1708,6 +1738,7 @@ const struct ieee80211_ops mt7915_ops = {
.sta_set_decap_offload = mt7915_sta_set_decap_offload,
.add_twt_setup = mt7915_mac_add_twt_setup,
.twt_teardown_request = mt7915_twt_teardown_request,
+ .set_frag_threshold = mt7915_set_frag_threshold,
CFG80211_TESTMODE_CMD(mt76_testmode_cmd)
CFG80211_TESTMODE_DUMP(mt76_testmode_dump)
#ifdef CONFIG_MAC80211_DEBUGFS
@@ -1718,4 +1749,5 @@ const struct ieee80211_ops mt7915_ops = {
.net_fill_forward_path = mt7915_net_fill_forward_path,
.net_setup_tc = mt76_wed_net_setup_tc,
#endif
+ .reconfig_complete = mt7915_reconfig_complete,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index 2185cd24e2e1..87d0dd040001 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -157,12 +157,21 @@ static int
mt7915_mcu_parse_response(struct mt76_dev *mdev, int cmd,
struct sk_buff *skb, int seq)
{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
struct mt76_connac2_mcu_rxd *rxd;
int ret = 0;
if (!skb) {
dev_err(mdev->dev, "Message %08x (seq %d) timeout\n",
cmd, seq);
+
+ if (!test_and_set_bit(MT76_MCU_RESET, &dev->mphy.state)) {
+ dev->recovery.restart = true;
+ wake_up(&dev->mt76.mcu.wait);
+ queue_work(dev->mt76.wq, &dev->reset_work);
+ wake_up(&dev->reset_wait);
+ }
+
return -ETIMEDOUT;
}
@@ -191,11 +200,6 @@ mt7915_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
{
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
enum mt76_mcuq_id qid;
- int ret;
-
- ret = mt76_connac2_mcu_fill_message(mdev, skb, cmd, wait_seq);
- if (ret)
- return ret;
if (cmd == MCU_CMD(FW_SCATTER))
qid = MT_MCUQ_FWDL;
@@ -293,7 +297,7 @@ mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb)
&dev->rdd2_chandef,
GFP_ATOMIC);
else
- ieee80211_radar_detected(mphy->hw);
+ ieee80211_radar_detected(mphy->hw, NULL);
dev->hw_pattern++;
}
@@ -690,13 +694,17 @@ int mt7915_mcu_add_tx_ba(struct mt7915_dev *dev,
{
struct mt7915_sta *msta = (struct mt7915_sta *)params->sta->drv_priv;
struct mt7915_vif *mvif = msta->vif;
+ int ret;
+ mt76_worker_disable(&dev->mt76.tx_worker);
if (enable && !params->amsdu)
msta->wcid.amsdu = false;
+ ret = mt76_connac_mcu_sta_ba(&dev->mt76, &mvif->mt76, params,
+ MCU_EXT_CMD(STA_REC_UPDATE),
+ enable, true);
+ mt76_worker_enable(&dev->mt76.tx_worker);
- return mt76_connac_mcu_sta_ba(&dev->mt76, &mvif->mt76, params,
- MCU_EXT_CMD(STA_REC_UPDATE),
- enable, true);
+ return ret;
}
int mt7915_mcu_add_rx_ba(struct mt7915_dev *dev,
@@ -1653,7 +1661,7 @@ mt7915_mcu_add_group(struct mt7915_dev *dev, struct ieee80211_vif *vif,
}
int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool enable)
+ struct ieee80211_sta *sta, int conn_state, bool newly)
{
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct ieee80211_link_sta *link_sta;
@@ -1670,13 +1678,10 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
return PTR_ERR(skb);
/* starec basic */
- mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, link_sta, enable,
- !rcu_access_pointer(dev->mt76.wcid[msta->wcid.idx]));
- if (!enable)
- goto out;
-
+ mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, link_sta,
+ conn_state, newly);
/* tag order is in accordance with firmware dependency. */
- if (sta) {
+ if (sta && conn_state != CONN_STATE_DISCONNECT) {
/* starec bfer */
mt7915_mcu_sta_bfer_tlv(dev, skb, vif, sta);
/* starec ht */
@@ -1687,12 +1692,17 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
mt76_connac_mcu_sta_uapsd(skb, vif, sta);
}
- ret = mt7915_mcu_sta_wtbl_tlv(dev, skb, vif, sta);
- if (ret) {
- dev_kfree_skb(skb);
- return ret;
+ if (newly || conn_state != CONN_STATE_DISCONNECT) {
+ ret = mt7915_mcu_sta_wtbl_tlv(dev, skb, vif, sta);
+ if (ret) {
+ dev_kfree_skb(skb);
+ return ret;
+ }
}
+ if (conn_state == CONN_STATE_DISCONNECT)
+ goto out;
+
if (sta) {
/* starec amsdu */
mt7915_mcu_sta_amsdu_tlv(dev, skb, vif, sta);
@@ -2352,6 +2362,8 @@ int mt7915_mcu_init_firmware(struct mt7915_dev *dev)
if (ret)
return ret;
+ mt76_connac_mcu_del_wtbl_all(&dev->mt76);
+
if ((mtk_wed_device_active(&dev->mt76.mmio.wed) &&
is_mt7915(&dev->mt76)) ||
!mtk_wed_get_rx_capa(&dev->mt76.mmio.wed))
@@ -2376,7 +2388,9 @@ int mt7915_mcu_init_firmware(struct mt7915_dev *dev)
int mt7915_mcu_init(struct mt7915_dev *dev)
{
static const struct mt76_mcu_ops mt7915_mcu_ops = {
+ .max_retry = 3,
.headroom = sizeof(struct mt76_connac2_mcu_txd),
+ .mcu_skb_prepare_msg = mt76_connac2_mcu_fill_message,
.mcu_skb_send_msg = mt7915_mcu_send_message,
.mcu_parse_response = mt7915_mcu_parse_response,
};
@@ -2747,7 +2761,7 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
if (phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR)
req.switch_reason = CH_SWITCH_NORMAL;
- else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL ||
+ else if (phy->mt76->offchannel ||
phy->mt76->hw->conf.flags & IEEE80211_CONF_IDLE)
req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
index b41ac4aaced7..49476a4182fd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
@@ -29,7 +29,7 @@ struct mt7915_mcu_thermal_ctrl {
} __packed;
struct mt7915_mcu_thermal_notify {
- struct mt76_connac2_mcu_rxd rxd;
+ struct mt76_connac2_mcu_rxd_hdr rxd;
struct mt7915_mcu_thermal_ctrl ctrl;
__le32 temperature;
@@ -37,7 +37,7 @@ struct mt7915_mcu_thermal_notify {
} __packed;
struct mt7915_mcu_csa_notify {
- struct mt76_connac2_mcu_rxd rxd;
+ struct mt76_connac2_mcu_rxd_hdr rxd;
u8 omac_idx;
u8 csa_count;
@@ -46,7 +46,7 @@ struct mt7915_mcu_csa_notify {
} __packed;
struct mt7915_mcu_bcc_notify {
- struct mt76_connac2_mcu_rxd rxd;
+ struct mt76_connac2_mcu_rxd_hdr rxd;
u8 band_idx;
u8 omac_idx;
@@ -55,7 +55,7 @@ struct mt7915_mcu_bcc_notify {
} __packed;
struct mt7915_mcu_rdd_report {
- struct mt76_connac2_mcu_rxd rxd;
+ struct mt76_connac2_mcu_rxd_hdr rxd;
u8 band_idx;
u8 long_detected;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
index d6ecd698cdcd..44e112b8b5b3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
@@ -927,8 +927,10 @@ struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
.rx_check = mt7915_rx_check,
.rx_poll_complete = mt7915_rx_poll_complete,
.sta_add = mt7915_mac_sta_add,
+ .sta_event = mt7915_mac_sta_event,
.sta_remove = mt7915_mac_sta_remove,
.update_survey = mt7915_update_channel,
+ .set_channel = mt7915_set_channel,
};
struct mt7915_dev *dev;
struct mt76_dev *mdev;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
index a30d08eb0656..ac0b1f0eb27c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
@@ -444,7 +444,7 @@ int mt7915_mcu_add_dev_info(struct mt7915_phy *phy,
int mt7915_mcu_add_bss_info(struct mt7915_phy *phy,
struct ieee80211_vif *vif, int enable);
int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool enable);
+ struct ieee80211_sta *sta, int conn_state, bool newly);
int mt7915_mcu_add_tx_ba(struct mt7915_dev *dev,
struct ieee80211_ampdu_params *params,
bool add);
@@ -463,7 +463,7 @@ int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool changed);
int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
-int mt7915_set_channel(struct mt7915_phy *phy);
+int mt7915_set_channel(struct mt76_phy *mphy);
int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd);
int mt7915_mcu_set_tx(struct mt7915_dev *dev, struct ieee80211_vif *vif);
int mt7915_mcu_update_edca(struct mt7915_dev *dev, void *req);
@@ -560,6 +560,8 @@ void mt7915_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
void mt7915_mac_set_timing(struct mt7915_phy *phy);
int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
+int mt7915_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev);
void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void mt7915_mac_work(struct work_struct *work);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
index 0d76ae31b376..d534fff5c952 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
@@ -404,6 +404,7 @@ static void
mt7915_tm_init(struct mt7915_phy *phy, bool en)
{
struct mt7915_dev *dev = phy->dev;
+ int state;
if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
return;
@@ -415,7 +416,8 @@ mt7915_tm_init(struct mt7915_phy *phy, bool en)
mt7915_tm_set_trx(phy, TM_MAC_TXRX, !en);
mt7915_mcu_add_bss_info(phy, phy->monitor_vif, en);
- mt7915_mcu_add_sta(dev, phy->monitor_vif, NULL, en);
+ state = en ? CONN_STATE_PORT_SECURE : CONN_STATE_DISCONNECT;
+ mt7915_mcu_add_sta(dev, phy->monitor_vif, NULL, state, true);
if (!en)
mt7915_tm_set_tam_arb(phy, en, 0);
@@ -425,7 +427,7 @@ static void
mt7915_tm_update_channel(struct mt7915_phy *phy)
{
mutex_unlock(&phy->dev->mt76.mutex);
- mt7915_set_channel(phy);
+ mt76_update_channel(phy->mt76);
mutex_lock(&phy->dev->mt76.mutex);
mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH));
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
index ef0c721d26e3..d1d64fa7d35d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
@@ -52,6 +52,8 @@ static int mt7921_thermal_init(struct mt792x_phy *phy)
name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7921_%s",
wiphy_name(wiphy));
+ if (!name)
+ return -ENOMEM;
hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, phy,
mt7921_hwmon_groups);
@@ -83,7 +85,7 @@ mt7921_regd_channel_update(struct wiphy *wiphy, struct mt792x_dev *dev)
}
/* UNII-4 */
- if (IS_UNII_INVALID(0, 5850, 5925))
+ if (IS_UNII_INVALID(0, 5845, 5925))
ch->flags |= IEEE80211_CHAN_DISABLED;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index 23b228804289..a7f5bfbc02ed 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -455,37 +455,30 @@ static int mt7921_cancel_remain_on_channel(struct ieee80211_hw *hw,
return mt7921_abort_roc(phy, mvif);
}
-static int mt7921_set_channel(struct mt792x_phy *phy)
+int mt7921_set_channel(struct mt76_phy *mphy)
{
+ struct mt792x_phy *phy = mphy->priv;
struct mt792x_dev *dev = phy->dev;
int ret;
- cancel_delayed_work_sync(&phy->mt76->mac_work);
-
- mt792x_mutex_acquire(dev);
- set_bit(MT76_RESET, &phy->mt76->state);
-
- mt76_set_channel(phy->mt76);
-
+ mt76_connac_pm_wake(mphy, &dev->pm);
ret = mt7921_mcu_set_chan_info(phy, MCU_EXT_CMD(CHANNEL_SWITCH));
if (ret)
goto out;
mt792x_mac_set_timeing(phy);
-
mt792x_mac_reset_counters(phy);
phy->noise = 0;
out:
- clear_bit(MT76_RESET, &phy->mt76->state);
- mt792x_mutex_release(dev);
+ mt76_connac_power_save_sched(mphy, &dev->pm);
- mt76_worker_schedule(&dev->mt76.tx_worker);
- ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mt76->mac_work,
+ ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
MT792x_WATCHDOG_TIME);
return ret;
}
+EXPORT_SYMBOL_GPL(mt7921_set_channel);
static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
@@ -620,11 +613,9 @@ static int mt7921_config(struct ieee80211_hw *hw, u32 changed)
int ret = 0;
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ieee80211_stop_queues(hw);
- ret = mt7921_set_channel(phy);
+ ret = mt76_update_channel(phy->mt76);
if (ret)
return ret;
- ieee80211_wake_queues(hw);
}
mt792x_mutex_acquire(dev);
@@ -831,13 +822,16 @@ int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
}
EXPORT_SYMBOL_GPL(mt7921_mac_sta_add);
-void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+int mt7921_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev)
{
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ if (ev != MT76_STA_EVENT_ASSOC)
+ return 0;
+
mt792x_mutex_acquire(dev);
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
@@ -853,8 +847,10 @@ void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mt7921_mcu_sta_update(dev, sta, vif, true, MT76_STA_INFO_STATE_ASSOC);
mt792x_mutex_release(dev);
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(mt7921_mac_sta_assoc);
+EXPORT_SYMBOL_GPL(mt7921_mac_sta_event);
void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index 394fcd799345..02c1de8620a7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -890,7 +890,7 @@ int mt7921_mcu_set_chan_info(struct mt792x_phy *phy, int cmd)
if (cmd == MCU_EXT_CMD(SET_RX_PATH) ||
dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR)
req.switch_reason = CH_SWITCH_NORMAL;
- else if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ else if (phy->mt76->offchannel)
req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
else if (!cfg80211_reg_can_beacon(dev->mt76.hw->wiphy, chandef,
NL80211_IFTYPE_AP))
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
index 6c5392c5d207..16c89815c0b8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
@@ -186,6 +186,7 @@ int __mt7921_start(struct mt792x_phy *phy);
int mt7921_register_device(struct mt792x_dev *dev);
void mt7921_unregister_device(struct mt792x_dev *dev);
int mt7921_run_firmware(struct mt792x_dev *dev);
+int mt7921_set_channel(struct mt76_phy *mphy);
int mt7921_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif,
bool enable);
int mt7921_mcu_sta_update(struct mt792x_dev *dev, struct ieee80211_sta *sta,
@@ -244,8 +245,8 @@ int mt7921_mac_init(struct mt792x_dev *dev);
bool mt7921_mac_wtbl_update(struct mt792x_dev *dev, int idx, u32 mask);
int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
-void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
+int mt7921_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev);
void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void mt7921_mac_reset_work(struct work_struct *work);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
index a7430216a80d..67723c22aea6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
@@ -244,9 +244,10 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
.rx_skb = mt7921_queue_rx_skb,
.rx_poll_complete = mt792x_rx_poll_complete,
.sta_add = mt7921_mac_sta_add,
- .sta_assoc = mt7921_mac_sta_assoc,
+ .sta_event = mt7921_mac_sta_event,
.sta_remove = mt7921_mac_sta_remove,
.update_survey = mt792x_update_channel,
+ .set_channel = mt7921_set_channel,
};
static const struct mt792x_hif_ops mt7921_pcie_ops = {
.init_reset = mt7921e_init_reset,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
index 004d942ee11a..95f526f7bb99 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
@@ -100,9 +100,10 @@ static int mt7921s_probe(struct sdio_func *func,
.rx_skb = mt7921_queue_rx_skb,
.rx_check = mt7921_rx_check,
.sta_add = mt7921_mac_sta_add,
- .sta_assoc = mt7921_mac_sta_assoc,
+ .sta_event = mt7921_mac_sta_event,
.sta_remove = mt7921_mac_sta_remove,
.update_survey = mt792x_update_channel,
+ .set_channel = mt7921_set_channel,
};
static const struct mt76_bus_ops mt7921s_ops = {
.rr = mt76s_rr,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
index 8b7c03c47598..8aa4f0203208 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
@@ -151,9 +151,10 @@ static int mt7921u_probe(struct usb_interface *usb_intf,
.rx_skb = mt7921_queue_rx_skb,
.rx_check = mt7921_rx_check,
.sta_add = mt7921_mac_sta_add,
- .sta_assoc = mt7921_mac_sta_assoc,
+ .sta_event = mt7921_mac_sta_event,
.sta_remove = mt7921_mac_sta_remove,
.update_survey = mt792x_update_channel,
+ .set_channel = mt7921_set_channel,
};
static const struct mt792x_hif_ops hif_ops = {
.mcu_init = mt7921u_mcu_init,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
index cf36750cf709..634c42bbf23f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
@@ -352,7 +352,7 @@ mt7925_mac_fill_rx_rate(struct mt792x_dev *dev,
static int
mt7925_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb)
{
- u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
+ u32 csum_mask = MT_RXD3_NORMAL_IP_SUM | MT_RXD3_NORMAL_UDP_TCP_SUM;
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
bool hdr_trans, unicast, insert_ccmp_hdr = false;
u8 chfreq, qos_ctl = 0, remove_pad, amsdu_info;
@@ -362,7 +362,6 @@ mt7925_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb)
struct mt792x_phy *phy = &dev->phy;
struct ieee80211_supported_band *sband;
u32 csum_status = *(u32 *)skb->cb;
- u32 rxd0 = le32_to_cpu(rxd[0]);
u32 rxd1 = le32_to_cpu(rxd[1]);
u32 rxd2 = le32_to_cpu(rxd[2]);
u32 rxd3 = le32_to_cpu(rxd[3]);
@@ -420,7 +419,7 @@ mt7925_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb)
if (!sband->channels)
return -EINVAL;
- if (mt76_is_mmio(&dev->mt76) && (rxd0 & csum_mask) == csum_mask &&
+ if (mt76_is_mmio(&dev->mt76) && (rxd3 & csum_mask) == csum_mask &&
!(csum_status & (BIT(0) | BIT(2) | BIT(3))))
skb->ip_summed = CHECKSUM_UNNECESSARY;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
index 8c0768bf9343..791c8b00e112 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
@@ -439,6 +439,19 @@ static void mt7925_roc_iter(void *priv, u8 *mac,
mt7925_mcu_abort_roc(phy, &mvif->bss_conf, phy->roc_token_id);
}
+void mt7925_roc_abort_sync(struct mt792x_dev *dev)
+{
+ struct mt792x_phy *phy = &dev->phy;
+
+ del_timer_sync(&phy->roc_timer);
+ cancel_work_sync(&phy->roc_work);
+ if (test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state))
+ ieee80211_iterate_interfaces(mt76_hw(dev),
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7925_roc_iter, (void *)phy);
+}
+EXPORT_SYMBOL_GPL(mt7925_roc_abort_sync);
+
void mt7925_roc_work(struct work_struct *work)
{
struct mt792x_phy *phy;
@@ -1078,23 +1091,26 @@ static void mt7925_mac_link_sta_assoc(struct mt76_dev *mdev,
mt792x_mutex_release(dev);
}
-void mt7925_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+int mt7925_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev)
{
+ struct ieee80211_link_sta *link_sta = &sta->deflink;
+
+ if (ev != MT76_STA_EVENT_ASSOC)
+ return 0;
+
if (ieee80211_vif_is_mld(vif)) {
struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
- struct ieee80211_link_sta *link_sta;
link_sta = mt792x_sta_to_link_sta(vif, sta, msta->deflink_id);
-
mt7925_mac_set_links(mdev, vif);
-
- mt7925_mac_link_sta_assoc(mdev, vif, link_sta);
- } else {
- mt7925_mac_link_sta_assoc(mdev, vif, &sta->deflink);
}
+
+ mt7925_mac_link_sta_assoc(mdev, vif, link_sta);
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(mt7925_mac_sta_assoc);
+EXPORT_SYMBOL_GPL(mt7925_mac_sta_event);
static void mt7925_mac_link_sta_remove(struct mt76_dev *mdev,
struct ieee80211_vif *vif,
@@ -1109,6 +1125,8 @@ static void mt7925_mac_link_sta_remove(struct mt76_dev *mdev,
msta = (struct mt792x_sta *)link_sta->sta->drv_priv;
mlink = mt792x_sta_to_link(msta, link_id);
+ mt7925_roc_abort_sync(dev);
+
mt76_connac_free_pending_tx_skbs(&dev->pm, &mlink->wcid);
mt76_connac_pm_wake(&dev->mphy, &dev->pm);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
index 9dc22fbe25d3..748ea6adbc6b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
@@ -638,6 +638,9 @@ static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name)
for (offset = 0; offset < len; offset += le32_to_cpu(clc->len)) {
clc = (const struct mt7925_clc *)(clc_base + offset);
+ if (clc->idx > ARRAY_SIZE(phy->clc))
+ break;
+
/* do not init buf again if chip reset triggered */
if (phy->clc[clc->idx])
continue;
@@ -1770,16 +1773,19 @@ mt7925_mcu_sta_cmd(struct mt76_phy *phy,
struct mt76_vif *mvif = (struct mt76_vif *)info->vif->drv_priv;
struct mt76_dev *dev = phy->dev;
struct sk_buff *skb;
+ int conn_state;
skb = __mt76_connac_mcu_alloc_sta_req(dev, mvif, info->wcid,
MT7925_STA_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
+ conn_state = info->enable ? CONN_STATE_PORT_SECURE :
+ CONN_STATE_DISCONNECT;
if (info->link_sta)
mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif,
info->link_sta,
- info->enable, info->newly);
+ conn_state, info->newly);
if (info->link_sta && info->enable) {
mt7925_mcu_sta_phy_tlv(skb, info->vif, info->link_sta);
mt7925_mcu_sta_ht_tlv(skb, info->link_sta);
@@ -2171,12 +2177,12 @@ void mt7925_mcu_bss_rlm_tlv(struct sk_buff *skb, struct mt76_phy *phy,
tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_RLM, sizeof(*req));
req = (struct bss_rlm_tlv *)tlv;
- req->control_channel = chandef->chan->hw_value,
- req->center_chan = ieee80211_frequency_to_channel(freq1),
- req->center_chan2 = ieee80211_frequency_to_channel(freq2),
- req->tx_streams = hweight8(phy->antenna_mask),
- req->ht_op_info = 4, /* set HT 40M allowed */
- req->rx_streams = hweight8(phy->antenna_mask),
+ req->control_channel = chandef->chan->hw_value;
+ req->center_chan = ieee80211_frequency_to_channel(freq1);
+ req->center_chan2 = ieee80211_frequency_to_channel(freq2);
+ req->tx_streams = hweight8(phy->antenna_mask);
+ req->ht_op_info = 4; /* set HT 40M allowed */
+ req->rx_streams = hweight8(phy->antenna_mask);
req->band = band;
switch (chandef->width) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
index 669f3a079d04..f5c02e5f5066 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
@@ -219,8 +219,8 @@ int mt7925_mac_init(struct mt792x_dev *dev);
int mt7925_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
bool mt7925_mac_wtbl_update(struct mt792x_dev *dev, int idx, u32 mask);
-void mt7925_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
+int mt7925_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev);
void mt7925_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void mt7925_mac_reset_work(struct work_struct *work);
@@ -307,6 +307,7 @@ int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf,
enum mt7925_roc_req type, u8 token_id);
int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf,
u8 token_id);
+void mt7925_roc_abort_sync(struct mt792x_dev *dev);
int mt7925_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb,
int cmd, int *wait_seq);
int mt7925_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
index 6e4f4e78c350..9aec675450f2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
@@ -279,7 +279,7 @@ static int mt7925_pci_probe(struct pci_dev *pdev,
.rx_skb = mt7925_queue_rx_skb,
.rx_poll_complete = mt792x_rx_poll_complete,
.sta_add = mt7925_mac_sta_add,
- .sta_assoc = mt7925_mac_sta_assoc,
+ .sta_event = mt7925_mac_sta_event,
.sta_remove = mt7925_mac_sta_remove,
.update_survey = mt792x_update_channel,
};
@@ -449,6 +449,8 @@ static int mt7925_pci_suspend(struct device *device)
cancel_delayed_work_sync(&pm->ps_work);
cancel_work_sync(&pm->wake_work);
+ mt7925_roc_abort_sync(dev);
+
err = mt792x_mcu_drv_pmctrl(dev);
if (err < 0)
goto restore_suspend;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
index 1e0f094fc905..682db1bab21c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
@@ -142,7 +142,7 @@ static int mt7925u_probe(struct usb_interface *usb_intf,
.rx_skb = mt7925_queue_rx_skb,
.rx_check = mt7925_rx_check,
.sta_add = mt7925_mac_sta_add,
- .sta_assoc = mt7925_mac_sta_assoc,
+ .sta_event = mt7925_mac_sta_event,
.sta_remove = mt7925_mac_sta_remove,
.update_survey = mt792x_update_channel,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x.h b/drivers/net/wireless/mediatek/mt76/mt792x.h
index 7fa74d59cc48..ab12616ec2b8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x.h
+++ b/drivers/net/wireless/mediatek/mt76/mt792x.h
@@ -68,7 +68,7 @@ struct mt792x_fw_features {
enum {
MT792x_CLC_POWER,
- MT792x_CLC_CHAN,
+ MT792x_CLC_POWER_EXT,
MT792x_CLC_MAX_NUM,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
index 283df84f1b43..5e96973226bb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
@@ -42,6 +42,7 @@ static const struct ieee80211_iface_combination if_comb[] = {
BIT(NL80211_CHAN_WIDTH_40) |
BIT(NL80211_CHAN_WIDTH_80) |
BIT(NL80211_CHAN_WIDTH_160),
+ .beacon_int_min_gcd = 100,
}
};
@@ -941,8 +942,12 @@ void mt7996_set_stream_vht_txbf_caps(struct mt7996_phy *phy)
cap = &phy->mt76->sband_5g.sband.vht_cap.cap;
*cap |= IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
- IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
- FIELD_PREP(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK, sts - 1);
+ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
+
+ if (is_mt7996(phy->mt76->dev))
+ *cap |= FIELD_PREP(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK, 3);
+ else
+ *cap |= FIELD_PREP(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK, 4);
*cap &= ~(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK |
IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
@@ -987,9 +992,15 @@ mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy,
IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO;
elem->phy_cap_info[2] |= c;
- c = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
- IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 |
- IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4;
+ c = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE;
+
+ if (is_mt7996(phy->mt76->dev))
+ c |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 |
+ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4;
+ else
+ c |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_5 |
+ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_5;
+
elem->phy_cap_info[4] |= c;
/* do not support NG16 due to spec D4.0 changes subcarrier idx */
@@ -1011,8 +1022,6 @@ mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy,
return;
elem->phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER;
- if (vif == NL80211_IFTYPE_AP)
- elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
c = FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
sts - 1) |
@@ -1020,6 +1029,11 @@ mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy,
sts - 1);
elem->phy_cap_info[5] |= c;
+ if (vif != NL80211_IFTYPE_AP)
+ return;
+
+ elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
+
c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB;
elem->phy_cap_info[6] |= c;
@@ -1179,12 +1193,12 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
IEEE80211_EHT_MAC_CAP0_OM_CONTROL;
eht_cap_elem->phy_cap_info[0] =
- IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ |
IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER |
IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE;
- val = max_t(u8, sts - 1, 3);
+ /* Set the maximum capability regardless of the antenna configuration. */
+ val = is_mt7992(phy->mt76->dev) ? 4 : 3;
eht_cap_elem->phy_cap_info[0] |=
u8_encode_bits(u8_get_bits(val, BIT(0)),
IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK);
@@ -1193,30 +1207,36 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
u8_encode_bits(u8_get_bits(val, GENMASK(2, 1)),
IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK) |
u8_encode_bits(val,
- IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK) |
- u8_encode_bits(val,
- IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK);
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK);
eht_cap_elem->phy_cap_info[2] =
u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK) |
- u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK) |
- u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK);
+ u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK);
+
+ if (band == NL80211_BAND_6GHZ) {
+ eht_cap_elem->phy_cap_info[0] |=
+ IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
+
+ eht_cap_elem->phy_cap_info[1] |=
+ u8_encode_bits(val,
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK);
+
+ eht_cap_elem->phy_cap_info[2] |=
+ u8_encode_bits(sts - 1,
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK);
+ }
eht_cap_elem->phy_cap_info[3] =
IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK |
IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
- IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
- IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
- IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK |
- IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK;
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK;
eht_cap_elem->phy_cap_info[4] =
u8_encode_bits(min_t(int, sts - 1, 2),
IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK);
eht_cap_elem->phy_cap_info[5] =
- IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
u8_encode_bits(IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US,
IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK) |
u8_encode_bits(u8_get_bits(0x11, GENMASK(1, 0)),
@@ -1230,14 +1250,6 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK) |
u8_encode_bits(val, IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK);
- eht_cap_elem->phy_cap_info[7] =
- IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ |
- IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ |
- IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ |
- IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ |
- IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ |
- IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ;
-
val = u8_encode_bits(nss, IEEE80211_EHT_MCS_NSS_RX) |
u8_encode_bits(nss, IEEE80211_EHT_MCS_NSS_TX);
#define SET_EHT_MAX_NSS(_bw, _val) do { \
@@ -1248,8 +1260,29 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
SET_EHT_MAX_NSS(80, val);
SET_EHT_MAX_NSS(160, val);
- SET_EHT_MAX_NSS(320, val);
+ if (band == NL80211_BAND_6GHZ)
+ SET_EHT_MAX_NSS(320, val);
#undef SET_EHT_MAX_NSS
+
+ if (iftype != NL80211_IFTYPE_AP)
+ return;
+
+ eht_cap_elem->phy_cap_info[3] |=
+ IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK;
+
+ eht_cap_elem->phy_cap_info[7] =
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ;
+
+ if (band != NL80211_BAND_6GHZ)
+ return;
+
+ eht_cap_elem->phy_cap_info[7] |=
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ;
}
static void
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
index bc7111a71f98..0d21414e2c88 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
@@ -435,7 +435,7 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q,
u32 rxd2 = le32_to_cpu(rxd[2]);
u32 rxd3 = le32_to_cpu(rxd[3]);
u32 rxd4 = le32_to_cpu(rxd[4]);
- u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
+ u32 csum_mask = MT_RXD3_NORMAL_IP_SUM | MT_RXD3_NORMAL_UDP_TCP_SUM;
u32 csum_status = *(u32 *)skb->cb;
u32 mesh_mask = MT_RXD0_MESH | MT_RXD0_MHCP;
bool is_mesh = (rxd0 & mesh_mask) == mesh_mask;
@@ -497,7 +497,7 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q,
if (!sband->channels)
return -EINVAL;
- if ((rxd0 & csum_mask) == csum_mask &&
+ if ((rxd3 & csum_mask) == csum_mask &&
!(csum_status & (BIT(0) | BIT(2) | BIT(3))))
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -746,7 +746,7 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
bool multicast = is_multicast_ether_addr(hdr->addr1);
u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
- __le16 fc = hdr->frame_control;
+ __le16 fc = hdr->frame_control, sc = hdr->seq_ctrl;
u8 fc_type, fc_stype;
u32 val;
@@ -780,6 +780,15 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
+ if (ieee80211_has_morefrags(fc) && ieee80211_is_first_frag(sc))
+ val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_FIRST);
+ else if (ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc))
+ val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_MID);
+ else if (!ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc))
+ val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_LAST);
+ else
+ val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_NONE);
+
txwi[2] |= cpu_to_le32(val);
txwi[3] |= cpu_to_le32(FIELD_PREP(MT_TXD3_BCM, multicast));
@@ -789,7 +798,7 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
}
if (info->flags & IEEE80211_TX_CTL_INJECTED) {
- u16 seqno = le16_to_cpu(hdr->seq_ctrl);
+ u16 seqno = le16_to_cpu(sc);
if (ieee80211_is_back_req(hdr->frame_control)) {
struct ieee80211_bar *bar;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
index bce082038219..39f071ece35e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
@@ -206,7 +206,7 @@ static int mt7996_add_interface(struct ieee80211_hw *hw,
mvif->mt76.omac_idx = idx;
mvif->phy = phy;
mvif->mt76.band_idx = band_idx;
- mvif->mt76.wmm_idx = vif->type != NL80211_IFTYPE_AP;
+ mvif->mt76.wmm_idx = vif->type == NL80211_IFTYPE_AP ? 0 : 3;
ret = mt7996_mcu_add_dev_info(phy, vif, true);
if (ret)
@@ -291,22 +291,19 @@ static void mt7996_remove_interface(struct ieee80211_hw *hw,
mt76_wcid_cleanup(&dev->mt76, &msta->wcid);
}
-int mt7996_set_channel(struct mt7996_phy *phy)
+int mt7996_set_channel(struct mt76_phy *mphy)
{
- struct mt7996_dev *dev = phy->dev;
+ struct mt7996_phy *phy = mphy->priv;
int ret;
- cancel_delayed_work_sync(&phy->mt76->mac_work);
-
- mutex_lock(&dev->mt76.mutex);
- set_bit(MT76_RESET, &phy->mt76->state);
-
- mt76_set_channel(phy->mt76);
-
ret = mt7996_mcu_set_chan_info(phy, UNI_CHANNEL_SWITCH);
if (ret)
goto out;
+ ret = mt7996_mcu_set_chan_info(phy, UNI_CHANNEL_RX_PATH);
+ if (ret)
+ goto out;
+
ret = mt7996_dfs_init_radar_detector(phy);
mt7996_mac_cca_stats_reset(phy);
@@ -314,13 +311,7 @@ int mt7996_set_channel(struct mt7996_phy *phy)
phy->noise = 0;
out:
- clear_bit(MT76_RESET, &phy->mt76->state);
- mutex_unlock(&dev->mt76.mutex);
-
- mt76_txq_schedule_all(phy->mt76);
-
- ieee80211_queue_delayed_work(phy->mt76->hw,
- &phy->mt76->mac_work,
+ ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
MT7996_WATCHDOG_TIME);
return ret;
@@ -360,14 +351,14 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
case WLAN_CIPHER_SUITE_SMS4:
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
- wcid_keyidx = &wcid->hw_key_idx2;
- key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
- fallthrough;
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- if (key->keyidx == 6 || key->keyidx == 7)
+ if (key->keyidx == 6 || key->keyidx == 7) {
+ wcid_keyidx = &wcid->hw_key_idx2;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
break;
+ }
fallthrough;
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
@@ -411,11 +402,9 @@ static int mt7996_config(struct ieee80211_hw *hw, u32 changed)
int ret;
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ieee80211_stop_queues(hw);
- ret = mt7996_set_channel(phy);
+ ret = mt76_update_channel(phy->mt76);
if (ret)
return ret;
- ieee80211_wake_queues(hw);
}
if (changed & (IEEE80211_CONF_CHANGE_POWER |
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
index 2e4fa9f48dfb..6c445a9dbc03 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
@@ -371,7 +371,7 @@ mt7996_mcu_rx_radar_detected(struct mt7996_dev *dev, struct sk_buff *skb)
&dev->rdd2_chandef,
GFP_ATOMIC);
else
- ieee80211_radar_detected(mphy->hw);
+ ieee80211_radar_detected(mphy->hw, NULL);
dev->hw_pattern++;
}
@@ -735,7 +735,7 @@ void mt7996_mcu_rx_event(struct mt7996_dev *dev, struct sk_buff *skb)
static struct tlv *
mt7996_mcu_add_uni_tlv(struct sk_buff *skb, u16 tag, u16 len)
{
- struct tlv *ptlv = skb_put(skb, len);
+ struct tlv *ptlv = skb_put_zero(skb, len);
ptlv->tag = cpu_to_le16(tag);
ptlv->len = cpu_to_le16(len);
@@ -822,7 +822,7 @@ mt7996_mcu_bss_mbssid_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
struct bss_info_uni_mbssid *mbssid;
struct tlv *tlv;
- if (!vif->bss_conf.bssid_indicator)
+ if (!vif->bss_conf.bssid_indicator && enable)
return;
tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_11V_MBSSID, sizeof(*mbssid));
@@ -1429,10 +1429,10 @@ mt7996_is_ebf_supported(struct mt7996_phy *phy, struct ieee80211_vif *vif,
if (bfee)
return vif->bss_conf.eht_su_beamformee &&
- EHT_PHY(CAP0_SU_BEAMFORMEE, pe->phy_cap_info[0]);
+ EHT_PHY(CAP0_SU_BEAMFORMER, pe->phy_cap_info[0]);
else
return vif->bss_conf.eht_su_beamformer &&
- EHT_PHY(CAP0_SU_BEAMFORMER, pe->phy_cap_info[0]);
+ EHT_PHY(CAP0_SU_BEAMFORMEE, pe->phy_cap_info[0]);
}
if (sta->deflink.he_cap.has_he) {
@@ -1544,6 +1544,9 @@ mt7996_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
u8 nss_mcs = mt7996_mcu_get_sta_nss(mcs_map);
u8 snd_dim, sts;
+ if (!vc)
+ return;
+
bf->tx_mode = MT_PHY_TYPE_HE_SU;
mt7996_mcu_sta_sounding_rate(bf);
@@ -1653,7 +1656,7 @@ mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
{
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_phy *phy = mvif->phy;
- int tx_ant = hweight8(phy->mt76->chainmask) - 1;
+ int tx_ant = hweight16(phy->mt76->chainmask) - 1;
struct sta_rec_bf *bf;
struct tlv *tlv;
static const u8 matrix[4][4] = {
@@ -2160,6 +2163,7 @@ int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_link_sta *link_sta;
struct mt7996_sta *msta;
struct sk_buff *skb;
+ int conn_state;
int ret;
msta = sta ? (struct mt7996_sta *)sta->drv_priv : &mvif->sta;
@@ -2172,8 +2176,9 @@ int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif,
return PTR_ERR(skb);
/* starec basic */
+ conn_state = enable ? CONN_STATE_PORT_SECURE : CONN_STATE_DISCONNECT;
mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, link_sta,
- enable, newly);
+ conn_state, newly);
if (!enable)
goto out;
@@ -3460,7 +3465,7 @@ int mt7996_mcu_set_chan_info(struct mt7996_phy *phy, u16 tag)
if (phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR)
req.switch_reason = CH_SWITCH_NORMAL;
- else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL ||
+ else if (phy->mt76->offchannel ||
phy->mt76->hw->conf.flags & IEEE80211_CONF_IDLE)
req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef,
@@ -3923,8 +3928,9 @@ int mt7996_mcu_set_txbf(struct mt7996_dev *dev, u8 action)
tlv = mt7996_mcu_add_uni_tlv(skb, action, sizeof(*req_mod_en));
req_mod_en = (struct bf_mod_en_ctrl *)tlv;
- req_mod_en->bf_num = 3;
- req_mod_en->bf_bitmap = GENMASK(2, 0);
+ req_mod_en->bf_num = mt7996_band_valid(dev, MT_BAND2) ? 3 : 2;
+ req_mod_en->bf_bitmap = mt7996_band_valid(dev, MT_BAND2) ?
+ GENMASK(2, 0) : GENMASK(1, 0);
break;
}
default:
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
index 928a9663b49e..40e45fb2b626 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
@@ -620,6 +620,7 @@ struct mt7996_dev *mt7996_mmio_probe(struct device *pdev,
.sta_add = mt7996_mac_sta_add,
.sta_remove = mt7996_mac_sta_remove,
.update_survey = mt7996_update_channel,
+ .set_channel = mt7996_set_channel,
};
struct mt7996_dev *dev;
struct mt76_dev *mdev;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
index 177cfff31120..ab8c9070630b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
@@ -468,7 +468,7 @@ int mt7996_mcu_add_obss_spr(struct mt7996_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_he_obss_pd *he_obss_pd);
int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool changed);
-int mt7996_set_channel(struct mt7996_phy *phy);
+int mt7996_set_channel(struct mt76_phy *mphy);
int mt7996_mcu_set_chan_info(struct mt7996_phy *phy, u16 tag);
int mt7996_mcu_set_tx(struct mt7996_dev *dev, struct ieee80211_vif *vif);
int mt7996_mcu_set_fixed_rate_ctrl(struct mt7996_dev *dev,
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index 5cf6edee4d13..ce193e625666 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -313,6 +313,9 @@ __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb,
return idx;
wcid = (struct mt76_wcid *)sta->drv_priv;
+ if (!wcid->sta)
+ return idx;
+
q->entry[idx].wcid = wcid->idx;
if (!non_aql)
@@ -330,6 +333,7 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
struct mt76_wcid *wcid, struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct sk_buff_head *head;
if (mt76_testmode_enabled(phy)) {
ieee80211_free_txskb(phy->hw, skb);
@@ -345,9 +349,15 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx);
- spin_lock_bh(&wcid->tx_pending.lock);
- __skb_queue_tail(&wcid->tx_pending, skb);
- spin_unlock_bh(&wcid->tx_pending.lock);
+ if ((info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) ||
+ (info->control.flags & IEEE80211_TX_CTRL_DONT_USE_RATE_MASK))
+ head = &wcid->tx_offchannel;
+ else
+ head = &wcid->tx_pending;
+
+ spin_lock_bh(&head->lock);
+ __skb_queue_tail(head, skb);
+ spin_unlock_bh(&head->lock);
spin_lock_bh(&phy->tx_lock);
if (list_empty(&wcid->tx_list))
@@ -478,7 +488,7 @@ mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
return idx;
do {
- if (test_bit(MT76_RESET, &phy->state))
+ if (test_bit(MT76_RESET, &phy->state) || phy->offchannel)
return -EBUSY;
if (stop || mt76_txq_stopped(q))
@@ -522,7 +532,7 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
while (1) {
int n_frames = 0;
- if (test_bit(MT76_RESET, &phy->state))
+ if (test_bit(MT76_RESET, &phy->state) || phy->offchannel)
return -EBUSY;
if (dev->queue_ops->tx_cleanup &&
@@ -568,7 +578,7 @@ void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
{
int len;
- if (qid >= 4)
+ if (qid >= 4 || phy->offchannel)
return;
local_bh_disable();
@@ -586,7 +596,8 @@ void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
EXPORT_SYMBOL_GPL(mt76_txq_schedule);
static int
-mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid)
+mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid,
+ struct sk_buff_head *head)
{
struct mt76_dev *dev = phy->dev;
struct ieee80211_sta *sta;
@@ -594,8 +605,8 @@ mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid)
struct sk_buff *skb;
int ret = 0;
- spin_lock(&wcid->tx_pending.lock);
- while ((skb = skb_peek(&wcid->tx_pending)) != NULL) {
+ spin_lock(&head->lock);
+ while ((skb = skb_peek(head)) != NULL) {
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
int qid = skb_get_queue_mapping(skb);
@@ -607,13 +618,13 @@ mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid)
qid = MT_TXQ_PSD;
q = phy->q_tx[qid];
- if (mt76_txq_stopped(q)) {
+ if (mt76_txq_stopped(q) || test_bit(MT76_RESET, &phy->state)) {
ret = -1;
break;
}
- __skb_unlink(skb, &wcid->tx_pending);
- spin_unlock(&wcid->tx_pending.lock);
+ __skb_unlink(skb, head);
+ spin_unlock(&head->lock);
sta = wcid_to_sta(wcid);
spin_lock(&q->lock);
@@ -621,15 +632,17 @@ mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid)
dev->queue_ops->kick(dev, q);
spin_unlock(&q->lock);
- spin_lock(&wcid->tx_pending.lock);
+ spin_lock(&head->lock);
}
- spin_unlock(&wcid->tx_pending.lock);
+ spin_unlock(&head->lock);
return ret;
}
static void mt76_txq_schedule_pending(struct mt76_phy *phy)
{
+ LIST_HEAD(tx_list);
+
if (list_empty(&phy->tx_list))
return;
@@ -637,22 +650,27 @@ static void mt76_txq_schedule_pending(struct mt76_phy *phy)
rcu_read_lock();
spin_lock(&phy->tx_lock);
- while (!list_empty(&phy->tx_list)) {
- struct mt76_wcid *wcid = NULL;
+ list_splice_init(&phy->tx_list, &tx_list);
+ while (!list_empty(&tx_list)) {
+ struct mt76_wcid *wcid;
int ret;
- wcid = list_first_entry(&phy->tx_list, struct mt76_wcid, tx_list);
+ wcid = list_first_entry(&tx_list, struct mt76_wcid, tx_list);
list_del_init(&wcid->tx_list);
spin_unlock(&phy->tx_lock);
- ret = mt76_txq_schedule_pending_wcid(phy, wcid);
+ ret = mt76_txq_schedule_pending_wcid(phy, wcid, &wcid->tx_offchannel);
+ if (ret >= 0 && !phy->offchannel)
+ ret = mt76_txq_schedule_pending_wcid(phy, wcid, &wcid->tx_pending);
spin_lock(&phy->tx_lock);
- if (ret) {
- if (list_empty(&wcid->tx_list))
- list_add_tail(&wcid->tx_list, &phy->tx_list);
+ if (!skb_queue_empty(&wcid->tx_pending) &&
+ !skb_queue_empty(&wcid->tx_offchannel) &&
+ list_empty(&wcid->tx_list))
+ list_add_tail(&wcid->tx_list, &phy->tx_list);
+
+ if (ret < 0)
break;
- }
}
spin_unlock(&phy->tx_lock);
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c
index 3c48e1a57b24..bba53307b960 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.c
+++ b/drivers/net/wireless/microchip/wilc1000/hif.c
@@ -384,6 +384,7 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss,
struct wilc_join_bss_param *param;
u8 rates_len = 0;
int ies_len;
+ u64 ies_tsf;
int ret;
param = kzalloc(sizeof(*param), GFP_KERNEL);
@@ -399,6 +400,7 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss,
return NULL;
}
ies_len = ies->len;
+ ies_tsf = ies->tsf;
rcu_read_unlock();
param->beacon_period = cpu_to_le16(bss->beacon_interval);
@@ -454,7 +456,7 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss,
IEEE80211_P2P_ATTR_ABSENCE_NOTICE,
(u8 *)&noa_attr, sizeof(noa_attr));
if (ret > 0) {
- param->tsf_lo = cpu_to_le32(ies->tsf);
+ param->tsf_lo = cpu_to_le32(ies_tsf);
param->noa_enabled = 1;
param->idx = noa_attr.index;
if (noa_attr.oppps_ctwindow & IEEE80211_P2P_OPPPS_ENABLE_BIT) {
diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
index 683a35c682a8..b4da05d5a498 100644
--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
+++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
@@ -174,19 +174,18 @@ static int wilc_sdio_probe(struct sdio_func *func,
wilc->bus_data = sdio_priv;
wilc->dev = &func->dev;
- wilc->rtc_clk = devm_clk_get_optional(&func->card->dev, "rtc");
+ wilc->rtc_clk = devm_clk_get_optional_enabled(&func->card->dev, "rtc");
if (IS_ERR(wilc->rtc_clk)) {
ret = PTR_ERR(wilc->rtc_clk);
goto dispose_irq;
}
- clk_prepare_enable(wilc->rtc_clk);
wilc_sdio_init(wilc, false);
ret = wilc_load_mac_from_nv(wilc);
if (ret) {
pr_err("Can not retrieve MAC address from chip\n");
- goto clk_disable_unprepare;
+ goto dispose_irq;
}
wilc_sdio_deinit(wilc);
@@ -195,14 +194,12 @@ static int wilc_sdio_probe(struct sdio_func *func,
NL80211_IFTYPE_STATION, false);
if (IS_ERR(vif)) {
ret = PTR_ERR(vif);
- goto clk_disable_unprepare;
+ goto dispose_irq;
}
dev_info(&func->dev, "Driver Initializing success\n");
return 0;
-clk_disable_unprepare:
- clk_disable_unprepare(wilc->rtc_clk);
dispose_irq:
irq_dispose_mapping(wilc->dev_irq_num);
wilc_netdev_cleanup(wilc);
@@ -217,7 +214,6 @@ static void wilc_sdio_remove(struct sdio_func *func)
struct wilc *wilc = sdio_get_drvdata(func);
struct wilc_sdio *sdio_priv = wilc->bus_data;
- clk_disable_unprepare(wilc->rtc_clk);
wilc_netdev_cleanup(wilc);
kfree(sdio_priv->cmd53_buf);
kfree(sdio_priv);
diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c
index 5ff940c53ad9..05b577b1068e 100644
--- a/drivers/net/wireless/microchip/wilc1000/spi.c
+++ b/drivers/net/wireless/microchip/wilc1000/spi.c
@@ -228,12 +228,11 @@ static int wilc_bus_probe(struct spi_device *spi)
if (ret < 0)
goto netdev_cleanup;
- wilc->rtc_clk = devm_clk_get_optional(&spi->dev, "rtc");
+ wilc->rtc_clk = devm_clk_get_optional_enabled(&spi->dev, "rtc");
if (IS_ERR(wilc->rtc_clk)) {
ret = PTR_ERR(wilc->rtc_clk);
goto netdev_cleanup;
}
- clk_prepare_enable(wilc->rtc_clk);
dev_info(&spi->dev, "Selected CRC config: crc7=%s, crc16=%s\n",
enable_crc7 ? "on" : "off", enable_crc16 ? "on" : "off");
@@ -266,7 +265,6 @@ static int wilc_bus_probe(struct spi_device *spi)
return 0;
power_down:
- clk_disable_unprepare(wilc->rtc_clk);
wilc_wlan_power(wilc, false);
netdev_cleanup:
wilc_netdev_cleanup(wilc);
@@ -280,7 +278,6 @@ static void wilc_bus_remove(struct spi_device *spi)
struct wilc *wilc = spi_get_drvdata(spi);
struct wilc_spi *spi_priv = wilc->bus_data;
- clk_disable_unprepare(wilc->rtc_clk);
wilc_netdev_cleanup(wilc);
kfree(spi_priv);
}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 663d77770fce..8b97accf6638 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -837,7 +837,7 @@ static int qtnf_channel_switch(struct wiphy *wiphy, struct net_device *dev,
static int qtnf_start_radar_detection(struct wiphy *wiphy,
struct net_device *ndev,
struct cfg80211_chan_def *chandef,
- u32 cac_time_ms)
+ u32 cac_time_ms, int link_id)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
int ret;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
index 76b07db284f8..71840f41b73c 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/event.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
@@ -520,21 +520,21 @@ static int qtnf_event_handle_radar(struct qtnf_vif *vif,
cfg80211_radar_event(wiphy, &chandef, GFP_KERNEL);
break;
case QLINK_RADAR_CAC_FINISHED:
- if (!vif->wdev.cac_started)
+ if (!vif->wdev.links[0].cac_started)
break;
cfg80211_cac_event(vif->netdev, &chandef,
- NL80211_RADAR_CAC_FINISHED, GFP_KERNEL);
+ NL80211_RADAR_CAC_FINISHED, GFP_KERNEL, 0);
break;
case QLINK_RADAR_CAC_ABORTED:
- if (!vif->wdev.cac_started)
+ if (!vif->wdev.links[0].cac_started)
break;
cfg80211_cac_event(vif->netdev, &chandef,
- NL80211_RADAR_CAC_ABORTED, GFP_KERNEL);
+ NL80211_RADAR_CAC_ABORTED, GFP_KERNEL, 0);
break;
case QLINK_RADAR_CAC_STARTED:
- if (vif->wdev.cac_started)
+ if (vif->wdev.links[0].cac_started)
break;
if (!wiphy_ext_feature_isset(wiphy,
@@ -542,7 +542,7 @@ static int qtnf_event_handle_radar(struct qtnf_vif *vif,
break;
cfg80211_cac_event(vif->netdev, &chandef,
- NL80211_RADAR_CAC_STARTED, GFP_KERNEL);
+ NL80211_RADAR_CAC_STARTED, GFP_KERNEL, 0);
break;
default:
pr_warn("%s: unhandled radar event %u\n",
diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c
index de3332eb7a22..a99776af56c2 100644
--- a/drivers/net/wireless/realtek/rtw88/coex.c
+++ b/drivers/net/wireless/realtek/rtw88/coex.c
@@ -2194,7 +2194,6 @@ static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev)
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_efuse *efuse = &rtwdev->efuse;
u8 table_case, tdma_case;
- bool wl_cpt_test = false, bt_cpt_test = false;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -2202,29 +2201,16 @@ static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev)
rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
if (efuse->share_ant) {
/* Shared-Ant */
- if (wl_cpt_test) {
- if (coex_stat->wl_gl_busy) {
- table_case = 20;
- tdma_case = 17;
- } else {
- table_case = 10;
- tdma_case = 15;
- }
- } else if (bt_cpt_test) {
- table_case = 26;
- tdma_case = 26;
- } else {
- if (coex_stat->wl_gl_busy &&
- coex_stat->wl_noisy_level == 0)
- table_case = 14;
- else
- table_case = 10;
+ if (coex_stat->wl_gl_busy &&
+ coex_stat->wl_noisy_level == 0)
+ table_case = 14;
+ else
+ table_case = 10;
- if (coex_stat->wl_gl_busy)
- tdma_case = 15;
- else
- tdma_case = 20;
- }
+ if (coex_stat->wl_gl_busy)
+ tdma_case = 15;
+ else
+ tdma_case = 20;
} else {
/* Non-Shared-Ant */
table_case = 112;
@@ -2235,11 +2221,7 @@ static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev)
tdma_case = 120;
}
- if (wl_cpt_test)
- rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[1]);
- else
- rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
-
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
rtw_coex_table(rtwdev, false, table_case);
rtw_coex_tdma(rtwdev, false, tdma_case);
}
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index ab7d414d0ba6..b9b0114e253b 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -1468,10 +1468,12 @@ int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
val |= BIT_ENSWBCN >> 8;
rtw_write8(rtwdev, REG_CR + 1, val);
- val = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL + 2);
- bckp[1] = val;
- val &= ~(BIT_EN_BCNQ_DL >> 16);
- rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, val);
+ if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE) {
+ val = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL + 2);
+ bckp[1] = val;
+ val &= ~(BIT_EN_BCNQ_DL >> 16);
+ rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, val);
+ }
ret = rtw_hci_write_data_rsvd_page(rtwdev, buf, size);
if (ret) {
@@ -1496,7 +1498,8 @@ restore:
rsvd_pg_head = rtwdev->fifo.rsvd_boundary;
rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2,
rsvd_pg_head | BIT_BCN_VALID_V1);
- rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, bckp[1]);
+ if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE)
+ rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, bckp[1]);
rtw_write8(rtwdev, REG_CR + 1, bckp[0]);
return ret;
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index 63326b352738..b39e90fb66b4 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -167,6 +167,12 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw,
mutex_lock(&rtwdev->mutex);
+ rtwvif->mac_id = rtw_acquire_macid(rtwdev);
+ if (rtwvif->mac_id >= RTW_MAX_MAC_ID_NUM) {
+ mutex_unlock(&rtwdev->mutex);
+ return -ENOSPC;
+ }
+
port = find_first_zero_bit(rtwdev->hw_port, RTW_PORT_NUM);
if (port >= RTW_PORT_NUM) {
mutex_unlock(&rtwdev->mutex);
@@ -214,7 +220,8 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw,
mutex_unlock(&rtwdev->mutex);
- rtw_dbg(rtwdev, RTW_DBG_STATE, "start vif %pM on port %d\n", vif->addr, rtwvif->port);
+ rtw_dbg(rtwdev, RTW_DBG_STATE, "start vif %pM mac_id %d on port %d\n",
+ vif->addr, rtwvif->mac_id, rtwvif->port);
return 0;
}
@@ -225,7 +232,8 @@ static void rtw_ops_remove_interface(struct ieee80211_hw *hw,
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
u32 config = 0;
- rtw_dbg(rtwdev, RTW_DBG_STATE, "stop vif %pM on port %d\n", vif->addr, rtwvif->port);
+ rtw_dbg(rtwdev, RTW_DBG_STATE, "stop vif %pM mac_id %d on port %d\n",
+ vif->addr, rtwvif->mac_id, rtwvif->port);
mutex_lock(&rtwdev->mutex);
@@ -242,6 +250,7 @@ static void rtw_ops_remove_interface(struct ieee80211_hw *hw,
config |= PORT_SET_BCN_CTRL;
rtw_vif_port_config(rtwdev, rtwvif, config);
clear_bit(rtwvif->port, rtwdev->hw_port);
+ rtw_release_macid(rtwdev, rtwvif->mac_id);
rtw_recalc_lps(rtwdev, NULL);
mutex_unlock(&rtwdev->mutex);
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index b0c9b0ff7017..bbdef38c7e34 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -311,17 +311,6 @@ static void rtw_ips_work(struct work_struct *work)
mutex_unlock(&rtwdev->mutex);
}
-static u8 rtw_acquire_macid(struct rtw_dev *rtwdev)
-{
- unsigned long mac_id;
-
- mac_id = find_first_zero_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM);
- if (mac_id < RTW_MAX_MAC_ID_NUM)
- set_bit(mac_id, rtwdev->mac_id_map);
-
- return mac_id;
-}
-
static void rtw_sta_rc_work(struct work_struct *work)
{
struct rtw_sta_info *si = container_of(work, struct rtw_sta_info,
@@ -340,12 +329,14 @@ int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
int i;
- si->mac_id = rtw_acquire_macid(rtwdev);
- if (si->mac_id >= RTW_MAX_MAC_ID_NUM)
- return -ENOSPC;
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ si->mac_id = rtwvif->mac_id;
+ } else {
+ si->mac_id = rtw_acquire_macid(rtwdev);
+ if (si->mac_id >= RTW_MAX_MAC_ID_NUM)
+ return -ENOSPC;
+ }
- if (vif->type == NL80211_IFTYPE_STATION && vif->cfg.assoc == 0)
- rtwvif->mac_id = si->mac_id;
si->rtwdev = rtwdev;
si->sta = sta;
si->vif = vif;
@@ -370,11 +361,13 @@ void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
bool fw_exist)
{
struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+ struct ieee80211_vif *vif = si->vif;
int i;
cancel_work_sync(&si->rc_work);
- rtw_release_macid(rtwdev, si->mac_id);
+ if (vif->type != NL80211_IFTYPE_STATION)
+ rtw_release_macid(rtwdev, si->mac_id);
if (fw_exist)
rtw_fw_media_status_report(rtwdev, si->mac_id, false);
@@ -614,6 +607,8 @@ static void rtw_reset_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
rtw_bf_disassoc(rtwdev, vif, NULL);
rtw_vif_assoc_changed(rtwvif, NULL);
rtw_txq_cleanup(rtwdev, vif->txq);
+
+ rtw_release_macid(rtwdev, rtwvif->mac_id);
}
void rtw_fw_recovery(struct rtw_dev *rtwdev)
@@ -2139,7 +2134,6 @@ int rtw_core_init(struct rtw_dev *rtwdev)
rtwdev->sec.total_cam_num = 32;
rtwdev->hal.current_channel = 1;
rtwdev->dm_info.fix_rate = U8_MAX;
- set_bit(RTW_BC_MC_MACID, rtwdev->mac_id_map);
rtw_stats_init(rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index 12b564ad3a58..945117afe143 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -742,7 +742,6 @@ struct rtw_txq {
unsigned long flags;
};
-#define RTW_BC_MC_MACID 1
DECLARE_EWMA(rssi, 10, 16);
struct rtw_sta_info {
@@ -805,7 +804,7 @@ struct rtw_bf_info {
struct rtw_vif {
enum rtw_net_type net_type;
u16 aid;
- u8 mac_id; /* for STA mode only */
+ u8 mac_id;
u8 mac_addr[ETH_ALEN];
u8 bssid[ETH_ALEN];
u8 port;
@@ -2131,6 +2130,17 @@ static inline bool rtw_chip_has_tx_stbc(struct rtw_dev *rtwdev)
return rtwdev->chip->tx_stbc;
}
+static inline u8 rtw_acquire_macid(struct rtw_dev *rtwdev)
+{
+ unsigned long mac_id;
+
+ mac_id = find_first_zero_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM);
+ if (mac_id < RTW_MAX_MAC_ID_NUM)
+ set_bit(mac_id, rtwdev->mac_id_map);
+
+ return mac_id;
+}
+
static inline void rtw_release_macid(struct rtw_dev *rtwdev, u8 mac_id)
{
clear_bit(mac_id, rtwdev->mac_id_map);
diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c
index c02ac673be32..dae7ca148865 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.c
+++ b/drivers/net/wireless/realtek/rtw88/tx.c
@@ -46,7 +46,8 @@ void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb)
le32_encode_bits(pkt_info->ls, RTW_TX_DESC_W0_LS) |
le32_encode_bits(pkt_info->dis_qselseq, RTW_TX_DESC_W0_DISQSELSEQ);
- tx_desc->w1 = le32_encode_bits(pkt_info->qsel, RTW_TX_DESC_W1_QSEL) |
+ tx_desc->w1 = le32_encode_bits(pkt_info->mac_id, RTW_TX_DESC_W1_MACID) |
+ le32_encode_bits(pkt_info->qsel, RTW_TX_DESC_W1_QSEL) |
le32_encode_bits(pkt_info->rate_id, RTW_TX_DESC_W1_RATE_ID) |
le32_encode_bits(pkt_info->sec_type, RTW_TX_DESC_W1_SEC_TYPE) |
le32_encode_bits(pkt_info->pkt_offset, RTW_TX_DESC_W1_PKT_OFFSET) |
@@ -401,14 +402,18 @@ void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev,
const struct rtw_chip_info *chip = rtwdev->chip;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_vif *vif = info->control.vif;
struct rtw_sta_info *si;
- struct ieee80211_vif *vif = NULL;
+ struct rtw_vif *rtwvif;
__le16 fc = hdr->frame_control;
bool bmc;
if (sta) {
si = (struct rtw_sta_info *)sta->drv_priv;
- vif = si->vif;
+ pkt_info->mac_id = si->mac_id;
+ } else if (vif) {
+ rtwvif = (struct rtw_vif *)vif->drv_priv;
+ pkt_info->mac_id = rtwvif->mac_id;
}
if (ieee80211_is_mgmt(fc) || ieee80211_is_nullfunc(fc))
diff --git a/drivers/net/wireless/realtek/rtw88/tx.h b/drivers/net/wireless/realtek/rtw88/tx.h
index 324189606257..3d544fd7f60f 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.h
+++ b/drivers/net/wireless/realtek/rtw88/tx.h
@@ -27,6 +27,7 @@ struct rtw_tx_desc {
#define RTW_TX_DESC_W0_BMC BIT(24)
#define RTW_TX_DESC_W0_LS BIT(26)
#define RTW_TX_DESC_W0_DISQSELSEQ BIT(31)
+#define RTW_TX_DESC_W1_MACID GENMASK(7, 0)
#define RTW_TX_DESC_W1_QSEL GENMASK(12, 8)
#define RTW_TX_DESC_W1_RATE_ID GENMASK(20, 16)
#define RTW_TX_DESC_W1_SEC_TYPE GENMASK(23, 22)
diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c
index a67e16ded91d..7070c85e2c28 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.c
+++ b/drivers/net/wireless/realtek/rtw89/chan.c
@@ -191,7 +191,7 @@ void rtw89_config_roc_chandef(struct rtw89_dev *rtwdev,
enum rtw89_chanctx_idx cur;
if (chandef) {
- cur = atomic_cmpxchg(&hal->roc_entity_idx,
+ cur = atomic_cmpxchg(&hal->roc_chanctx_idx,
RTW89_CHANCTX_IDLE, idx);
if (cur != RTW89_CHANCTX_IDLE) {
rtw89_debug(rtwdev, RTW89_DBG_TXRX,
@@ -201,7 +201,7 @@ void rtw89_config_roc_chandef(struct rtw89_dev *rtwdev,
hal->roc_chandef = *chandef;
} else {
- cur = atomic_cmpxchg(&hal->roc_entity_idx, idx,
+ cur = atomic_cmpxchg(&hal->roc_chanctx_idx, idx,
RTW89_CHANCTX_IDLE);
if (cur == idx)
return;
@@ -230,7 +230,7 @@ void rtw89_entity_init(struct rtw89_dev *rtwdev)
hal->entity_pause = false;
bitmap_zero(hal->entity_map, NUM_OF_RTW89_CHANCTX);
bitmap_zero(hal->changes, NUM_OF_RTW89_CHANCTX_CHANGES);
- atomic_set(&hal->roc_entity_idx, RTW89_CHANCTX_IDLE);
+ atomic_set(&hal->roc_chanctx_idx, RTW89_CHANCTX_IDLE);
rtw89_config_default_chandef(rtwdev);
}
@@ -2395,11 +2395,11 @@ static void rtw89_swap_chanctx(struct rtw89_dev *rtwdev,
rtwvif->chanctx_idx = idx1;
}
- cur = atomic_read(&hal->roc_entity_idx);
+ cur = atomic_read(&hal->roc_chanctx_idx);
if (cur == idx1)
- atomic_set(&hal->roc_entity_idx, idx2);
+ atomic_set(&hal->roc_chanctx_idx, idx2);
else if (cur == idx2)
- atomic_set(&hal->roc_entity_idx, idx1);
+ atomic_set(&hal->roc_chanctx_idx, idx1);
}
int rtw89_chanctx_ops_add(struct rtw89_dev *rtwdev,
diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c
index c1f6fcef904b..df51b29142aa 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.c
+++ b/drivers/net/wireless/realtek/rtw89/coex.c
@@ -129,6 +129,13 @@ static const u32 cxtbl[] = {
static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = {
/* firmware version must be in decreasing order for each chip */
+ {RTL8852BT, RTW89_FW_VER_CODE(0, 29, 90, 0),
+ .fcxbtcrpt = 7, .fcxtdma = 7, .fcxslots = 7, .fcxcysta = 7,
+ .fcxstep = 7, .fcxnullsta = 7, .fcxmreg = 7, .fcxgpiodbg = 7,
+ .fcxbtver = 7, .fcxbtscan = 7, .fcxbtafh = 7, .fcxbtdevinfo = 7,
+ .fwlrole = 7, .frptmap = 3, .fcxctrl = 7, .fcxinit = 7,
+ .fwevntrptl = 1, .drvinfo_type = 1, .info_buf = 1800, .max_role_num = 6,
+ },
{RTL8922A, RTW89_FW_VER_CODE(0, 35, 8, 0),
.fcxbtcrpt = 8, .fcxtdma = 7, .fcxslots = 7, .fcxcysta = 7,
.fcxstep = 7, .fcxnullsta = 7, .fcxmreg = 7, .fcxgpiodbg = 7,
@@ -1351,6 +1358,10 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
pfinfo = &pfwinfo->rpt_ctrl.finfo.v8;
pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo.v8);
break;
+ } else if (ver->fcxbtcrpt == 7) {
+ pfinfo = &pfwinfo->rpt_ctrl.finfo.v7;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo.v7);
+ break;
} else {
goto err;
}
@@ -1655,6 +1666,38 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
pfwinfo->event[BTF_EVNT_RPT]);
dm->error.map.bt_rfk_timeout = bt->rfk_info.map.timeout;
+ } else if (ver->fcxbtcrpt == 7) {
+ prpt->v7 = pfwinfo->rpt_ctrl.finfo.v7;
+ pfwinfo->rpt_en_map = le32_to_cpu(prpt->v7.rpt_info.en);
+ wl->ver_info.fw_coex = le32_to_cpu(prpt->v7.rpt_info.cx_ver);
+ wl->ver_info.fw = le32_to_cpu(prpt->v7.rpt_info.fw_ver);
+
+ for (i = RTW89_PHY_0; i < RTW89_PHY_MAX; i++)
+ memcpy(&dm->gnt.band[i], &prpt->v7.gnt_val[i][0],
+ sizeof(dm->gnt.band[i]));
+
+ btc->cx.cnt_bt[BTC_BCNT_HIPRI_TX] =
+ le16_to_cpu(prpt->v7.bt_cnt[BTC_BCNT_HI_TX_V105]);
+ btc->cx.cnt_bt[BTC_BCNT_HIPRI_RX] =
+ le16_to_cpu(prpt->v7.bt_cnt[BTC_BCNT_HI_RX_V105]);
+ btc->cx.cnt_bt[BTC_BCNT_LOPRI_TX] =
+ le16_to_cpu(prpt->v7.bt_cnt[BTC_BCNT_LO_TX_V105]);
+ btc->cx.cnt_bt[BTC_BCNT_LOPRI_RX] =
+ le16_to_cpu(prpt->v7.bt_cnt[BTC_BCNT_LO_RX_V105]);
+
+ val1 = le16_to_cpu(prpt->v7.bt_cnt[BTC_BCNT_POLLUTED_V105]);
+ if (val1 > btc->cx.cnt_bt[BTC_BCNT_POLUT_NOW])
+ val1 -= btc->cx.cnt_bt[BTC_BCNT_POLUT_NOW]; /* diff */
+
+ btc->cx.cnt_bt[BTC_BCNT_POLUT_DIFF] = val1;
+ btc->cx.cnt_bt[BTC_BCNT_POLUT_NOW] =
+ le16_to_cpu(prpt->v7.bt_cnt[BTC_BCNT_POLLUTED_V105]);
+
+ val1 = pfwinfo->event[BTF_EVNT_RPT];
+ _chk_btc_err(rtwdev, BTC_DCNT_BTCNT_HANG, 0);
+ _chk_btc_err(rtwdev, BTC_DCNT_RPT_HANG, val1);
+ _chk_btc_err(rtwdev, BTC_DCNT_WL_FW_VER_MATCH, 0);
+ _chk_btc_err(rtwdev, BTC_DCNT_BTTX_HANG, 0);
} else if (ver->fcxbtcrpt == 8) {
prpt->v8 = pfwinfo->rpt_ctrl.finfo.v8;
pfwinfo->rpt_en_map = le32_to_cpu(prpt->v8.rpt_info.en);
@@ -2397,7 +2440,7 @@ static void rtw89_btc_fw_en_rpt(struct rtw89_dev *rtwdev,
if (val == fwinfo->rpt_en_map)
return;
- if (btc->ver->fcxbtcrpt == 8) {
+ if (btc->ver->fcxbtcrpt == 7 || btc->ver->fcxbtcrpt == 8) {
r.v8.type = SET_REPORT_EN;
r.v8.fver = btc->ver->fcxbtcrpt;
r.v8.len = sizeof(r.v8.map);
@@ -2567,6 +2610,10 @@ static void _fw_set_drv_info(struct rtw89_dev *rtwdev, u8 type)
rtw89_fw_h2c_cxdrv_role_v1(rtwdev, type);
else if (ver->fwlrole == 2)
rtw89_fw_h2c_cxdrv_role_v2(rtwdev, type);
+ else if (ver->fwlrole == 7)
+ rtw89_fw_h2c_cxdrv_role_v7(rtwdev, type);
+ else if (ver->fwlrole == 8)
+ rtw89_fw_h2c_cxdrv_role_v8(rtwdev, type);
break;
case CXDRVINFO_CTRL:
if (ver->drvinfo_type == 1)
@@ -2748,7 +2795,7 @@ static void _set_gnt_v1(struct rtw89_dev *rtwdev, u8 phy_map,
rtw89_mac_cfg_gnt_v2(rtwdev, &dm->gnt);
}
-#define BTC_TDMA_WLROLE_MAX 2
+#define BTC_TDMA_WLROLE_MAX 3
static void _set_bt_ignore_wlan_act(struct rtw89_dev *rtwdev, u8 enable)
{
@@ -2998,10 +3045,12 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7;
struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
struct rtw89_btc_wl_active_role *r;
struct rtw89_btc_wl_active_role_v1 *r1;
struct rtw89_btc_wl_active_role_v2 *r2;
+ struct rtw89_btc_wl_active_role_v7 *r7;
struct rtw89_btc_wl_rlink *rlink;
u8 en = 0, i, ch = 0, bw = 0;
u8 mode, connect_cnt;
@@ -3018,6 +3067,9 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
} else if (ver->fwlrole == 2) {
mode = wl_rinfo_v2->link_mode;
connect_cnt = wl_rinfo_v2->connect_cnt;
+ } else if (ver->fwlrole == 7) {
+ mode = wl_rinfo_v7->link_mode;
+ connect_cnt = wl_rinfo_v7->connect_cnt;
} else if (ver->fwlrole == 8) {
mode = wl_rinfo_v8->link_mode;
connect_cnt = wl_rinfo_v8->connect_cnt;
@@ -3036,6 +3088,7 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
r = &wl_rinfo->active_role[i];
r1 = &wl_rinfo_v1->active_role_v1[i];
r2 = &wl_rinfo_v2->active_role_v2[i];
+ r7 = &wl_rinfo_v7->active_role[i];
rlink = &wl_rinfo_v8->rlink[i][0];
if (ver->fwlrole == 0 &&
@@ -3056,6 +3109,12 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
ch = r2->ch;
bw = r2->bw;
break;
+ } else if (ver->fwlrole == 7 &&
+ (r7->role == RTW89_WIFI_ROLE_P2P_GO ||
+ r7->role == RTW89_WIFI_ROLE_P2P_CLIENT)) {
+ ch = r7->ch;
+ bw = r7->bw;
+ break;
} else if (ver->fwlrole == 8 &&
(rlink->role == RTW89_WIFI_ROLE_P2P_GO ||
rlink->role == RTW89_WIFI_ROLE_P2P_CLIENT)) {
@@ -3071,6 +3130,7 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
r = &wl_rinfo->active_role[i];
r1 = &wl_rinfo_v1->active_role_v1[i];
r2 = &wl_rinfo_v2->active_role_v2[i];
+ r7 = &wl_rinfo_v7->active_role[i];
rlink = &wl_rinfo_v8->rlink[i][0];
if (ver->fwlrole == 0 &&
@@ -3088,6 +3148,11 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
ch = r2->ch;
bw = r2->bw;
break;
+ } else if (ver->fwlrole == 7 &&
+ r7->connected && r7->band == RTW89_BAND_2G) {
+ ch = r7->ch;
+ bw = r7->bw;
+ break;
} else if (ver->fwlrole == 8 &&
rlink->connected && rlink->rf_band == RTW89_BAND_2G) {
ch = rlink->ch;
@@ -3146,6 +3211,7 @@ static bool _check_freerun(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7;
struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info;
struct rtw89_btc_bt_hid_desc *hid = &bt_linfo->hid_desc;
@@ -3164,6 +3230,8 @@ static bool _check_freerun(struct rtw89_dev *rtwdev)
connect_cnt = wl_rinfo_v1->connect_cnt;
else if (ver->fwlrole == 2)
connect_cnt = wl_rinfo_v2->connect_cnt;
+ else if (ver->fwlrole == 7)
+ connect_cnt = wl_rinfo_v7->connect_cnt;
else if (ver->fwlrole == 8)
connect_cnt = wl_rinfo_v8->connect_cnt;
@@ -4082,6 +4150,8 @@ static void _set_ant_v0(struct rtw89_dev *rtwdev, bool force_exec,
dbcc_chg = wl->role_info_v1.dbcc_chg;
else if (btc->ver->fwlrole == 2)
dbcc_chg = wl->role_info_v2.dbcc_chg;
+ else if (btc->ver->fwlrole == 7)
+ dbcc_chg = wl->role_info_v7.dbcc_chg;
else if (btc->ver->fwlrole == 8)
dbcc_chg = wl->role_info_v8.dbcc_chg;
@@ -4754,6 +4824,7 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7;
struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
struct rtw89_btc_wl_role_info *wl_rinfo_v0 = &wl->role_info;
struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
@@ -4775,6 +4846,8 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev)
wl_rinfo.link_mode = wl_rinfo_v1->link_mode;
else if (ver->fwlrole == 2)
wl_rinfo.link_mode = wl_rinfo_v2->link_mode;
+ else if (ver->fwlrole == 7)
+ wl_rinfo.link_mode = wl_rinfo_v7->link_mode;
else if (ver->fwlrole == 8)
wl_rinfo.link_mode = wl_rinfo_v8->link_mode;
else
@@ -4790,6 +4863,8 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev)
wl_rinfo.dbcc_2g_phy = wl_rinfo_v1->dbcc_2g_phy;
} else if (ver->fwlrole == 2) {
wl_rinfo.dbcc_2g_phy = wl_rinfo_v2->dbcc_2g_phy;
+ } else if (ver->fwlrole == 7) {
+ wl_rinfo.dbcc_2g_phy = wl_rinfo_v7->dbcc_2g_phy;
} else if (ver->fwlrole == 8) {
wl_rinfo.dbcc_2g_phy = wl_rinfo_v8->dbcc_2g_phy;
} else {
@@ -4835,37 +4910,56 @@ static void _set_wl_preagc_ctrl(struct rtw89_dev *rtwdev)
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_bt_link_info *bt_linfo = &btc->cx.bt.link_info;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
- struct rtw89_btc_wl_role_info_v2 *wl_rinfo = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v2 *rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 *rinfo_v7 = &wl->role_info_v7;
+ struct rtw89_btc_wl_role_info_v8 *rinfo_v8 = &wl->role_info_v8;
const struct rtw89_chip_info *chip = rtwdev->chip;
- const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
struct rtw89_btc_dm *dm = &btc->dm;
- u8 is_preagc, val;
+ u8 is_preagc, val, link_mode, dbcc_2g_phy;
+ u8 role_ver = rtwdev->btc.ver->fwlrole;
+ bool dbcc_en;
if (btc->manual_ctrl)
return;
- if (wl_rinfo->link_mode == BTC_WLINK_25G_MCC)
+ if (role_ver == 2) {
+ dbcc_en = rinfo_v2->dbcc_en;
+ link_mode = rinfo_v2->link_mode;
+ dbcc_2g_phy = rinfo_v2->dbcc_2g_phy;
+ } else if (role_ver == 7) {
+ dbcc_en = rinfo_v7->dbcc_en;
+ link_mode = rinfo_v7->link_mode;
+ dbcc_2g_phy = rinfo_v7->dbcc_2g_phy;
+ } else if (role_ver == 8) {
+ dbcc_en = rinfo_v8->dbcc_en;
+ link_mode = rinfo_v8->link_mode;
+ dbcc_2g_phy = rinfo_v7->dbcc_2g_phy;
+ } else {
+ return;
+ }
+
+ if (link_mode == BTC_WLINK_25G_MCC) {
is_preagc = BTC_PREAGC_BB_FWCTRL;
- else if (!(bt->run_patch_code && bt->enable.now))
+ } else if (!(bt->run_patch_code && bt->enable.now)) {
is_preagc = BTC_PREAGC_DISABLE;
- else if (wl_rinfo->link_mode == BTC_WLINK_5G)
+ } else if (link_mode == BTC_WLINK_5G) {
is_preagc = BTC_PREAGC_DISABLE;
- else if (wl_rinfo->link_mode == BTC_WLINK_NOLINK ||
- btc->cx.bt.link_info.profile_cnt.now == 0)
+ } else if (link_mode == BTC_WLINK_NOLINK ||
+ btc->cx.bt.link_info.profile_cnt.now == 0) {
is_preagc = BTC_PREAGC_DISABLE;
- else if (dm->tdma_now.type != CXTDMA_OFF &&
+ } else if (dm->tdma_now.type != CXTDMA_OFF &&
!bt_linfo->hfp_desc.exist &&
!bt_linfo->hid_desc.exist &&
- dm->fddt_train == BTC_FDDT_DISABLE)
+ dm->fddt_train == BTC_FDDT_DISABLE) {
is_preagc = BTC_PREAGC_DISABLE;
- else if (ver->fwlrole == 2 && wl_rinfo->dbcc_en &&
- wl_rinfo->dbcc_2g_phy != RTW89_PHY_1)
+ } else if (dbcc_en && (dbcc_2g_phy != RTW89_PHY_1)) {
is_preagc = BTC_PREAGC_DISABLE;
- else if (btc->ant_type == BTC_ANT_SHARED)
+ } else if (btc->ant_type == BTC_ANT_SHARED) {
is_preagc = BTC_PREAGC_DISABLE;
- else
+ } else {
is_preagc = BTC_PREAGC_ENABLE;
+ }
if (dm->wl_pre_agc_rb != dm->wl_pre_agc &&
dm->wl_pre_agc_rb != BTC_PREAGC_NOTFOUND) {
@@ -4968,6 +5062,7 @@ static void _set_wl_tx_limit(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7;
struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
struct rtw89_txtime_data data = {.rtwdev = rtwdev};
u8 mode, igno_bt, tx_retry;
@@ -4984,6 +5079,8 @@ static void _set_wl_tx_limit(struct rtw89_dev *rtwdev)
mode = wl_rinfo_v1->link_mode;
else if (ver->fwlrole == 2)
mode = wl_rinfo_v2->link_mode;
+ else if (ver->fwlrole == 7)
+ mode = wl_rinfo_v7->link_mode;
else if (ver->fwlrole == 8)
mode = wl_rinfo_v8->link_mode;
else
@@ -5043,6 +5140,7 @@ static void _set_bt_rx_agc(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7;
struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
bool bt_hi_lna_rx = false;
@@ -5054,6 +5152,8 @@ static void _set_bt_rx_agc(struct rtw89_dev *rtwdev)
mode = wl_rinfo_v1->link_mode;
else if (ver->fwlrole == 2)
mode = wl_rinfo_v2->link_mode;
+ else if (ver->fwlrole == 7)
+ mode = wl_rinfo_v7->link_mode;
else if (ver->fwlrole == 8)
mode = wl_rinfo_v8->link_mode;
else
@@ -5359,15 +5459,26 @@ static void _action_wl_2g_scc_v2(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
struct rtw89_btc_dm *dm = &btc->dm;
- struct rtw89_btc_wl_role_info_v2 *wl_rinfo = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v2 *rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 *rinfo_v7 = &wl->role_info_v7;
+ u32 dur, mrole_type, mrole_noa_duration;
u16 policy_type = BTC_CXP_OFF_BT;
- u32 dur;
+
+ if (btc->ver->fwlrole == 2) {
+ mrole_type = rinfo_v2->mrole_type;
+ mrole_noa_duration = rinfo_v2->mrole_noa_duration;
+ } else if (btc->ver->fwlrole == 7) {
+ mrole_type = rinfo_v7->mrole_type;
+ mrole_noa_duration = rinfo_v7->mrole_noa_duration;
+ } else {
+ return;
+ }
if (btc->ant_type == BTC_ANT_DEDICATED) {
policy_type = BTC_CXP_OFF_EQ0;
} else {
/* shared-antenna */
- switch (wl_rinfo->mrole_type) {
+ switch (mrole_type) {
case BTC_WLMROLE_STA_GC:
dm->wl_scc.null_role1 = RTW89_WIFI_ROLE_STATION;
dm->wl_scc.null_role2 = RTW89_WIFI_ROLE_P2P_CLIENT;
@@ -5385,7 +5496,7 @@ static void _action_wl_2g_scc_v2(struct rtw89_dev *rtwdev)
case BTC_WLMROLE_STA_GO_NOA:
dm->wl_scc.null_role1 = RTW89_WIFI_ROLE_STATION;
dm->wl_scc.null_role2 = RTW89_WIFI_ROLE_NONE;
- dur = wl_rinfo->mrole_noa_duration;
+ dur = mrole_noa_duration;
if (wl->status.map._4way) {
dm->wl_scc.ebt_null = 0;
@@ -5567,6 +5678,14 @@ _update_rssi_state(struct rtw89_dev *rtwdev, u8 pre_state, u8 rssi, u8 thresh)
return next_state;
}
+static void _wl_req_mac(struct rtw89_dev *rtwdev, u8 mac)
+{
+ if (mac == RTW89_MAC_0)
+ rtw89_write32_clr(rtwdev, R_AX_BTC_CFG, B_AX_WL_SRC);
+ else
+ rtw89_write32_set(rtwdev, R_AX_BTC_CFG, B_AX_WL_SRC);
+}
+
static
void _update_dbcc_band(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
{
@@ -6065,12 +6184,20 @@ static u8 _chk_dbcc(struct rtw89_dev *rtwdev, struct rtw89_btc_chdef *ch,
u8 *phy, u8 *role, u8 *dbcc_2g_phy)
{
struct rtw89_btc_wl_info *wl = &rtwdev->btc.cx.wl;
- struct rtw89_btc_wl_role_info_v8 *wl_rinfo = &wl->role_info_v8;
+ struct rtw89_btc_wl_role_info_v7 *rinfo_v7 = &wl->role_info_v7;
+ struct rtw89_btc_wl_role_info_v8 *rinfo_v8 = &wl->role_info_v8;
bool is_2g_ch_exist = false, is_multi_role_in_2g_phy = false;
- u8 j, k, dbcc_2g_cid, dbcc_2g_cid2;
+ u8 j, k, dbcc_2g_cid, dbcc_2g_cid2, connect_cnt;
+
+ if (rtwdev->btc.ver->fwlrole == 7)
+ connect_cnt = rinfo_v7->connect_cnt;
+ else if (rtwdev->btc.ver->fwlrole == 8)
+ connect_cnt = rinfo_v8->connect_cnt;
+ else
+ return BTC_WLINK_NOLINK;
/* find out the 2G-PHY by connect-id ->ch */
- for (j = 0; j < wl_rinfo->connect_cnt; j++) {
+ for (j = 0; j < connect_cnt; j++) {
if (ch[j].center_ch <= 14) {
is_2g_ch_exist = true;
break;
@@ -6085,11 +6212,11 @@ static u8 _chk_dbcc(struct rtw89_dev *rtwdev, struct rtw89_btc_chdef *ch,
*dbcc_2g_phy = phy[dbcc_2g_cid];
/* connect_cnt <= 2 */
- if (wl_rinfo->connect_cnt < BTC_TDMA_WLROLE_MAX)
+ if (connect_cnt < BTC_TDMA_WLROLE_MAX)
return (_get_role_link_mode((role[dbcc_2g_cid])));
/* find the other-port in the 2G-PHY, ex: PHY-0:6G, PHY1: mcc/scc */
- for (k = 0; k < wl_rinfo->connect_cnt; k++) {
+ for (k = 0; k < connect_cnt; k++) {
if (k == dbcc_2g_cid)
continue;
@@ -6116,29 +6243,54 @@ static u8 _chk_dbcc(struct rtw89_dev *rtwdev, struct rtw89_btc_chdef *ch,
static void _update_role_link_mode(struct rtw89_dev *rtwdev,
bool client_joined, u32 noa)
{
- struct rtw89_btc_wl_role_info_v8 *wl_rinfo = &rtwdev->btc.cx.wl.role_info_v8;
+ struct rtw89_btc_wl_role_info_v8 *rinfo_v8 = &rtwdev->btc.cx.wl.role_info_v8;
+ struct rtw89_btc_wl_role_info_v7 *rinfo_v7 = &rtwdev->btc.cx.wl.role_info_v7;
+ u8 role_ver = rtwdev->btc.ver->fwlrole;
u32 type = BTC_WLMROLE_NONE, dur = 0;
- u32 wl_role = wl_rinfo->role_map;
+ u8 link_mode, connect_cnt;
+ u32 wl_role;
+
+ if (role_ver == 7) {
+ wl_role = rinfo_v7->role_map;
+ link_mode = rinfo_v7->link_mode;
+ connect_cnt = rinfo_v7->connect_cnt;
+ } else if (role_ver == 8) {
+ wl_role = rinfo_v8->role_map;
+ link_mode = rinfo_v8->link_mode;
+ connect_cnt = rinfo_v8->connect_cnt;
+ } else {
+ return;
+ }
/* if no client_joined, don't care P2P-GO/AP role */
if (((wl_role & BIT(RTW89_WIFI_ROLE_P2P_GO)) ||
(wl_role & BIT(RTW89_WIFI_ROLE_AP))) && !client_joined) {
- if (wl_rinfo->link_mode == BTC_WLINK_2G_SCC) {
- wl_rinfo->link_mode = BTC_WLINK_2G_STA;
- wl_rinfo->connect_cnt--;
- } else if (wl_rinfo->link_mode == BTC_WLINK_2G_GO ||
- wl_rinfo->link_mode == BTC_WLINK_2G_AP) {
- wl_rinfo->link_mode = BTC_WLINK_NOLINK;
- wl_rinfo->connect_cnt--;
+ if (link_mode == BTC_WLINK_2G_SCC) {
+ if (role_ver == 7) {
+ rinfo_v7->link_mode = BTC_WLINK_2G_STA;
+ rinfo_v7->connect_cnt--;
+ } else if (role_ver == 8) {
+ rinfo_v8->link_mode = BTC_WLINK_2G_STA;
+ rinfo_v8->connect_cnt--;
+ }
+ } else if (link_mode == BTC_WLINK_2G_GO ||
+ link_mode == BTC_WLINK_2G_AP) {
+ if (role_ver == 7) {
+ rinfo_v7->link_mode = BTC_WLINK_NOLINK;
+ rinfo_v7->connect_cnt--;
+ } else if (role_ver == 8) {
+ rinfo_v8->link_mode = BTC_WLINK_NOLINK;
+ rinfo_v8->connect_cnt--;
+ }
}
}
/* Identify 2-Role type */
- if (wl_rinfo->connect_cnt >= 2 &&
- (wl_rinfo->link_mode == BTC_WLINK_2G_SCC ||
- wl_rinfo->link_mode == BTC_WLINK_2G_MCC ||
- wl_rinfo->link_mode == BTC_WLINK_25G_MCC ||
- wl_rinfo->link_mode == BTC_WLINK_5G)) {
+ if (connect_cnt >= 2 &&
+ (link_mode == BTC_WLINK_2G_SCC ||
+ link_mode == BTC_WLINK_2G_MCC ||
+ link_mode == BTC_WLINK_25G_MCC ||
+ link_mode == BTC_WLINK_5G)) {
if ((wl_role & BIT(RTW89_WIFI_ROLE_P2P_GO)) ||
(wl_role & BIT(RTW89_WIFI_ROLE_AP)))
type = noa ? BTC_WLMROLE_STA_GO_NOA : BTC_WLMROLE_STA_GO;
@@ -6150,8 +6302,167 @@ static void _update_role_link_mode(struct rtw89_dev *rtwdev,
dur = noa;
}
- wl_rinfo->mrole_type = type;
- wl_rinfo->mrole_noa_duration = dur;
+ if (role_ver == 7) {
+ rinfo_v7->mrole_type = type;
+ rinfo_v7->mrole_noa_duration = dur;
+ } else if (role_ver == 8) {
+ rinfo_v8->mrole_type = type;
+ rinfo_v8->mrole_noa_duration = dur;
+ }
+}
+
+static void _update_wl_info_v7(struct rtw89_dev *rtwdev, u8 rid)
+{
+ struct rtw89_btc_chdef cid_ch[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER];
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_wl_role_info_v7 *wl_rinfo = &wl->role_info_v7;
+ struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
+ struct rtw89_btc_wl_link_info *wl_linfo = wl->link_info;
+ struct rtw89_btc_wl_active_role_v7 *act_role = NULL;
+ u8 i, mode, cnt = 0, cnt_2g = 0, cnt_5g = 0, phy_now = RTW89_PHY_MAX, phy_dbcc;
+ bool b2g = false, b5g = false, client_joined = false, client_inc_2g = false;
+ u8 client_cnt_last[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER] = {};
+ u8 cid_role[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER] = {};
+ u8 cid_phy[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER] = {};
+ u8 mac = RTW89_MAC_0, dbcc_2g_phy = RTW89_PHY_0;
+ u32 noa_duration = 0;
+
+ memset(wl_rinfo, 0, sizeof(*wl_rinfo));
+
+ for (i = 0; i < RTW89_PORT_NUM; i++) {
+ if (!wl_linfo[i].active || wl_linfo[i].phy >= RTW89_PHY_MAX)
+ continue;
+
+ act_role = &wl_rinfo->active_role[i];
+ act_role->role = wl_linfo[i].role;
+
+ /* check if role connect? */
+ if (wl_linfo[i].connected == MLME_NO_LINK) {
+ act_role->connected = 0;
+ continue;
+ } else if (wl_linfo[i].connected == MLME_LINKING) {
+ continue;
+ }
+
+ cnt++;
+ act_role->connected = 1;
+ act_role->pid = wl_linfo[i].pid;
+ act_role->phy = wl_linfo[i].phy;
+ act_role->band = wl_linfo[i].band;
+ act_role->ch = wl_linfo[i].ch;
+ act_role->bw = wl_linfo[i].bw;
+ act_role->noa = wl_linfo[i].noa;
+ act_role->noa_dur = wl_linfo[i].noa_duration;
+ cid_ch[cnt - 1] = wl_linfo[i].chdef;
+ cid_phy[cnt - 1] = wl_linfo[i].phy;
+ cid_role[cnt - 1] = wl_linfo[i].role;
+ wl_rinfo->role_map |= BIT(wl_linfo[i].role);
+
+ if (rid == i)
+ phy_now = act_role->phy;
+
+ if (wl_linfo[i].role == RTW89_WIFI_ROLE_P2P_GO ||
+ wl_linfo[i].role == RTW89_WIFI_ROLE_AP) {
+ if (wl_linfo[i].client_cnt > 1)
+ client_joined = true;
+ if (client_cnt_last[i] < wl_linfo[i].client_cnt &&
+ wl_linfo[i].chdef.band == RTW89_BAND_2G)
+ client_inc_2g = true;
+ act_role->client_cnt = wl_linfo[i].client_cnt;
+ } else {
+ act_role->client_cnt = 0;
+ }
+
+ if (act_role->noa && act_role->noa_dur > 0)
+ noa_duration = act_role->noa_dur;
+
+ if (rtwdev->dbcc_en) {
+ phy_dbcc = wl_linfo[i].phy;
+ wl_dinfo->role[phy_dbcc] |= BIT(wl_linfo[i].role);
+ wl_dinfo->op_band[phy_dbcc] = wl_linfo[i].chdef.band;
+ }
+
+ if (wl_linfo[i].chdef.band != RTW89_BAND_2G) {
+ cnt_5g++;
+ b5g = true;
+ } else {
+ if (((wl_linfo[i].role == RTW89_WIFI_ROLE_P2P_GO ||
+ wl_linfo[i].role == RTW89_WIFI_ROLE_AP) &&
+ client_joined) ||
+ wl_linfo[i].role == RTW89_WIFI_ROLE_P2P_CLIENT)
+ wl_rinfo->p2p_2g = 1;
+
+ if ((wl_linfo[i].mode & BIT(BTC_WL_MODE_11B)) ||
+ (wl_linfo[i].mode & BIT(BTC_WL_MODE_11G)))
+ wl->bg_mode = 1;
+ else if (wl_linfo[i].mode & BIT(BTC_WL_MODE_HE))
+ wl->he_mode = true;
+
+ cnt_2g++;
+ b2g = true;
+ }
+
+ if (act_role->band == RTW89_BAND_5G && act_role->ch >= 100)
+ wl->is_5g_hi_channel = 1;
+ else
+ wl->is_5g_hi_channel = 0;
+ }
+
+ wl_rinfo->connect_cnt = cnt;
+ wl->client_cnt_inc_2g = client_inc_2g;
+
+ if (cnt == 0) {
+ mode = BTC_WLINK_NOLINK;
+ wl_rinfo->role_map = BIT(RTW89_WIFI_ROLE_NONE);
+ } else if (!b2g && b5g) {
+ mode = BTC_WLINK_5G;
+ } else if (wl_rinfo->role_map & BIT(RTW89_WIFI_ROLE_NAN)) {
+ mode = BTC_WLINK_2G_NAN;
+ } else if (cnt > BTC_TDMA_WLROLE_MAX) {
+ mode = BTC_WLINK_OTHER;
+ } else if (rtwdev->dbcc_en) {
+ mode = _chk_dbcc(rtwdev, cid_ch, cid_phy, cid_role, &dbcc_2g_phy);
+
+ /* correct 2G-located PHY band for gnt ctrl */
+ if (dbcc_2g_phy < RTW89_PHY_MAX)
+ wl_dinfo->op_band[dbcc_2g_phy] = RTW89_BAND_2G;
+ } else if (b2g && b5g && cnt == 2) {
+ mode = BTC_WLINK_25G_MCC;
+ } else if (!b5g && cnt == 2) { /* cnt_connect = 2 */
+ if (_chk_role_ch_group(&cid_ch[0], &cid_ch[cnt - 1]))
+ mode = BTC_WLINK_2G_SCC;
+ else
+ mode = BTC_WLINK_2G_MCC;
+ } else if (!b5g && cnt == 1) { /* cnt_connect = 1 */
+ mode = _get_role_link_mode(cid_role[0]);
+ } else {
+ mode = BTC_WLINK_NOLINK;
+ }
+
+ wl_rinfo->link_mode = mode;
+ _update_role_link_mode(rtwdev, client_joined, noa_duration);
+
+ /* todo DBCC related event */
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC] wl_info phy_now=%d\n", phy_now);
+
+ if (wl_rinfo->dbcc_en != rtwdev->dbcc_en) {
+ wl_rinfo->dbcc_chg = 1;
+ wl_rinfo->dbcc_en = rtwdev->dbcc_en;
+ btc->cx.cnt_wl[BTC_WCNT_DBCC_CHG]++;
+ }
+
+ if (rtwdev->dbcc_en) {
+ wl_rinfo->dbcc_2g_phy = dbcc_2g_phy;
+
+ if (dbcc_2g_phy == RTW89_PHY_1)
+ mac = RTW89_MAC_1;
+
+ _update_dbcc_band(rtwdev, RTW89_PHY_0);
+ _update_dbcc_band(rtwdev, RTW89_PHY_1);
+ }
+ _wl_req_mac(rtwdev, mac);
+ _fw_set_drv_info(rtwdev, CXDRVINFO_ROLE);
}
static void _update_wl_info_v8(struct rtw89_dev *rtwdev, u8 role_id, u8 rlink_id,
@@ -6496,6 +6807,7 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7;
struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
u8 mode, igno_bt, always_freerun;
@@ -6511,6 +6823,8 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
mode = wl_rinfo_v1->link_mode;
else if (ver->fwlrole == 2)
mode = wl_rinfo_v2->link_mode;
+ else if (ver->fwlrole == 7)
+ mode = wl_rinfo_v7->link_mode;
else if (ver->fwlrole == 8)
mode = wl_rinfo_v8->link_mode;
else
@@ -6657,7 +6971,7 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
_action_wl_2g_scc(rtwdev);
else if (ver->fwlrole == 1)
_action_wl_2g_scc_v1(rtwdev);
- else if (ver->fwlrole == 2)
+ else if (ver->fwlrole == 2 || ver->fwlrole == 7)
_action_wl_2g_scc_v2(rtwdev);
else if (ver->fwlrole == 8)
_action_wl_2g_scc_v8(rtwdev);
@@ -7250,6 +7564,9 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif
} else if (ver->fwlrole == 2) {
*wlinfo = r;
_update_wl_info_v2(rtwdev);
+ } else if (ver->fwlrole == 7) {
+ *wlinfo = r;
+ _update_wl_info_v7(rtwdev, r.pid);
} else if (ver->fwlrole == 8) {
wlinfo = &wl->rlink_info[r.pid][rlink_id];
*wlinfo = r;
@@ -7856,6 +8173,7 @@ static void _show_wl_info(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7;
struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
u8 mode;
@@ -7870,6 +8188,8 @@ static void _show_wl_info(struct rtw89_dev *rtwdev, struct seq_file *m)
mode = wl_rinfo_v1->link_mode;
else if (ver->fwlrole == 2)
mode = wl_rinfo_v2->link_mode;
+ else if (ver->fwlrole == 7)
+ mode = wl_rinfo_v7->link_mode;
else if (ver->fwlrole == 8)
mode = wl_rinfo_v8->link_mode;
else
@@ -10288,6 +10608,108 @@ static void _show_summary_v105(struct rtw89_dev *rtwdev, struct seq_file *m)
cnt[BTC_NCNT_CUSTOMERIZE]);
}
+static void _show_summary_v7(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_btc_btf_fwinfo *pfwinfo = &rtwdev->btc.fwinfo;
+ struct rtw89_btc_fbtc_rpt_ctrl_v7 *prptctrl = NULL;
+ struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
+ struct rtw89_btc_cx *cx = &rtwdev->btc.cx;
+ struct rtw89_btc_dm *dm = &rtwdev->btc.dm;
+ struct rtw89_btc_wl_info *wl = &cx->wl;
+ u32 *cnt = rtwdev->btc.dm.cnt_notify;
+ u32 cnt_sum = 0;
+ u8 i;
+
+ if (!(dm->coex_info_map & BTC_COEX_INFO_SUMMARY))
+ return;
+
+ seq_printf(m, "%s", "\n\r========== [Statistics] ==========");
+
+ pcinfo = &pfwinfo->rpt_ctrl.cinfo;
+ if (pcinfo->valid && wl->status.map.lps != BTC_LPS_RF_OFF &&
+ !wl->status.map.rf_off) {
+ prptctrl = &pfwinfo->rpt_ctrl.finfo.v7;
+
+ seq_printf(m,
+ "\n\r %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d),"
+ "c2h_cnt=%d(fw_send:%d, len:%d, max:%d), ",
+ "[summary]", pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail,
+ le16_to_cpu(prptctrl->rpt_info.cnt_h2c), pfwinfo->cnt_c2h,
+ le16_to_cpu(prptctrl->rpt_info.cnt_c2h),
+ le16_to_cpu(prptctrl->rpt_info.len_c2h),
+ rtwdev->btc.ver->info_buf);
+
+ seq_printf(m, "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x",
+ pfwinfo->event[BTF_EVNT_RPT],
+ le16_to_cpu(prptctrl->rpt_info.cnt),
+ le32_to_cpu(prptctrl->rpt_info.en));
+
+ if (dm->error.map.wl_fw_hang)
+ seq_puts(m, " (WL FW Hang!!)");
+
+ seq_printf(m, "\n\r %-15s : send_ok:%d, send_fail:%d, recv:%d, ",
+ "[mailbox]", le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv));
+
+ seq_printf(m, "A2DP_empty:%d(stop:%d/tx:%d/ack:%d/nack:%d)",
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack));
+
+ seq_printf(m,
+ "\n\r %-15s : wl_rfk[req:%d/go:%d/reject:%d/tout:%d/time:%dms]",
+ "[RFK/LPS]", cx->cnt_wl[BTC_WCNT_RFK_REQ],
+ cx->cnt_wl[BTC_WCNT_RFK_GO],
+ cx->cnt_wl[BTC_WCNT_RFK_REJECT],
+ cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT],
+ wl->rfk_info.proc_time);
+
+ seq_printf(m, ", bt_rfk[req:%d]",
+ le16_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ]));
+
+ seq_printf(m, ", AOAC[RF_on:%d/RF_off:%d]",
+ le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_on),
+ le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_off));
+ } else {
+ seq_printf(m,
+ "\n\r %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d (lps=%d/rf_off=%d)",
+ "[summary]",
+ pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail,
+ pfwinfo->cnt_c2h,
+ wl->status.map.lps, wl->status.map.rf_off);
+ }
+
+ for (i = 0; i < BTC_NCNT_NUM; i++)
+ cnt_sum += dm->cnt_notify[i];
+
+ seq_printf(m,
+ "\n\r %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ",
+ "[notify_cnt]",
+ cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO],
+ cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]);
+
+ seq_printf(m,
+ "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d",
+ cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE],
+ cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK],
+ cnt[BTC_NCNT_WL_STA]);
+
+ seq_printf(m,
+ "\n\r %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, switch_chbw=%d, special_pkt=%d, ",
+ "[notify_cnt]",
+ cnt[BTC_NCNT_SCAN_START], cnt[BTC_NCNT_SCAN_FINISH],
+ cnt[BTC_NCNT_SWITCH_BAND], cnt[BTC_NCNT_SWITCH_CHBW],
+ cnt[BTC_NCNT_SPECIAL_PACKET]);
+
+ seq_printf(m, "timer=%d, customerize=%d, hub_msg=%d, chg_fw=%d, send_cc=%d",
+ cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CUSTOMERIZE],
+ rtwdev->btc.hubmsg_cnt, cnt[BTC_NCNT_RESUME_DL_FW],
+ cnt[BTC_NCNT_COUNTRYCODE]);
+}
+
static void _show_summary_v8(struct rtw89_dev *rtwdev, struct seq_file *m)
{
struct rtw89_btc_btf_fwinfo *pfwinfo = &rtwdev->btc.fwinfo;
@@ -10440,6 +10862,8 @@ void rtw89_btc_dump_info(struct rtw89_dev *rtwdev, struct seq_file *m)
_show_summary_v5(rtwdev, m);
else if (ver->fcxbtcrpt == 105)
_show_summary_v105(rtwdev, m);
+ else if (ver->fcxbtcrpt == 7)
+ _show_summary_v7(rtwdev, m);
else if (ver->fcxbtcrpt == 8)
_show_summary_v8(rtwdev, m);
}
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index 7b28f2c2a08e..4553810634c6 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -370,7 +370,7 @@ void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev)
return;
}
- roc_idx = atomic_read(&hal->roc_entity_idx);
+ roc_idx = atomic_read(&hal->roc_chanctx_idx);
if (roc_idx != RTW89_CHANCTX_IDLE)
chanctx_idx = roc_idx;
@@ -409,7 +409,7 @@ int rtw89_set_channel(struct rtw89_dev *rtwdev)
return -EINVAL;
}
- roc_idx = atomic_read(&hal->roc_entity_idx);
+ roc_idx = atomic_read(&hal->roc_chanctx_idx);
if (roc_idx != RTW89_CHANCTX_IDLE)
chanctx_idx = roc_idx;
@@ -429,7 +429,7 @@ int rtw89_set_channel(struct rtw89_dev *rtwdev)
if (!entity_active || chan_rcd->band_changed) {
rtw89_btc_ntfy_switch_band(rtwdev, phy_idx, chan->band_type);
- rtw89_chip_rfk_band_changed(rtwdev, phy_idx);
+ rtw89_chip_rfk_band_changed(rtwdev, phy_idx, chan);
}
rtw89_set_entity_state(rtwdev, true);
@@ -1583,11 +1583,27 @@ static u16 rtw89_core_get_phy_status_ie_len(struct rtw89_dev *rtwdev,
return ie_len;
}
+static void rtw89_core_parse_phy_status_ie01_v2(struct rtw89_dev *rtwdev,
+ const struct rtw89_phy_sts_iehdr *iehdr,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ const struct rtw89_phy_sts_ie01_v2 *ie;
+ u8 *rpl_fd = phy_ppdu->rpl_fd;
+
+ ie = (const struct rtw89_phy_sts_ie01_v2 *)iehdr;
+ rpl_fd[RF_PATH_A] = le32_get_bits(ie->w8, RTW89_PHY_STS_IE01_V2_W8_RPL_FD_A);
+ rpl_fd[RF_PATH_B] = le32_get_bits(ie->w8, RTW89_PHY_STS_IE01_V2_W8_RPL_FD_B);
+ rpl_fd[RF_PATH_C] = le32_get_bits(ie->w9, RTW89_PHY_STS_IE01_V2_W9_RPL_FD_C);
+ rpl_fd[RF_PATH_D] = le32_get_bits(ie->w9, RTW89_PHY_STS_IE01_V2_W9_RPL_FD_D);
+
+ phy_ppdu->bw_idx = le32_get_bits(ie->w5, RTW89_PHY_STS_IE01_V2_W5_BW_IDX);
+}
+
static void rtw89_core_parse_phy_status_ie01(struct rtw89_dev *rtwdev,
const struct rtw89_phy_sts_iehdr *iehdr,
struct rtw89_rx_phy_ppdu *phy_ppdu)
{
- const struct rtw89_phy_sts_ie0 *ie = (const struct rtw89_phy_sts_ie0 *)iehdr;
+ const struct rtw89_phy_sts_ie01 *ie = (const struct rtw89_phy_sts_ie01 *)iehdr;
s16 cfo;
u32 t;
@@ -1598,12 +1614,17 @@ static void rtw89_core_parse_phy_status_ie01(struct rtw89_dev *rtwdev,
phy_ppdu->stbc = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_STBC);
}
+ if (!phy_ppdu->hdr_2_en)
+ phy_ppdu->rx_path_en =
+ le32_get_bits(ie->w0, RTW89_PHY_STS_IE01_W0_RX_PATH_EN);
+
if (phy_ppdu->rate < RTW89_HW_RATE_OFDM6)
return;
if (!phy_ppdu->to_self)
return;
+ phy_ppdu->rpl_avg = le32_get_bits(ie->w0, RTW89_PHY_STS_IE01_W0_RSSI_AVG_FD);
phy_ppdu->ofdm.avg_snr = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_AVG_SNR);
phy_ppdu->ofdm.evm_max = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_EVM_MAX);
phy_ppdu->ofdm.evm_min = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_EVM_MIN);
@@ -1619,6 +1640,39 @@ static void rtw89_core_parse_phy_status_ie01(struct rtw89_dev *rtwdev,
}
rtw89_phy_cfo_parse(rtwdev, cfo, phy_ppdu);
+
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_BE)
+ rtw89_core_parse_phy_status_ie01_v2(rtwdev, iehdr, phy_ppdu);
+}
+
+static void rtw89_core_parse_phy_status_ie00(struct rtw89_dev *rtwdev,
+ const struct rtw89_phy_sts_iehdr *iehdr,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ const struct rtw89_phy_sts_ie00 *ie = (const struct rtw89_phy_sts_ie00 *)iehdr;
+ u16 tmp_rpl;
+
+ tmp_rpl = le32_get_bits(ie->w0, RTW89_PHY_STS_IE00_W0_RPL);
+ phy_ppdu->rpl_avg = tmp_rpl >> 1;
+}
+
+static void rtw89_core_parse_phy_status_ie00_v2(struct rtw89_dev *rtwdev,
+ const struct rtw89_phy_sts_iehdr *iehdr,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ const struct rtw89_phy_sts_ie00_v2 *ie;
+ u8 *rpl_path = phy_ppdu->rpl_path;
+ u16 tmp_rpl[RF_PATH_MAX];
+ u8 i;
+
+ ie = (const struct rtw89_phy_sts_ie00_v2 *)iehdr;
+ tmp_rpl[RF_PATH_A] = le32_get_bits(ie->w4, RTW89_PHY_STS_IE00_V2_W4_RPL_TD_A);
+ tmp_rpl[RF_PATH_B] = le32_get_bits(ie->w4, RTW89_PHY_STS_IE00_V2_W4_RPL_TD_B);
+ tmp_rpl[RF_PATH_C] = le32_get_bits(ie->w4, RTW89_PHY_STS_IE00_V2_W4_RPL_TD_C);
+ tmp_rpl[RF_PATH_D] = le32_get_bits(ie->w5, RTW89_PHY_STS_IE00_V2_W5_RPL_TD_D);
+
+ for (i = 0; i < RF_PATH_MAX; i++)
+ rpl_path[i] = tmp_rpl[i] >> 1;
}
static int rtw89_core_process_phy_status_ie(struct rtw89_dev *rtwdev,
@@ -1630,6 +1684,11 @@ static int rtw89_core_process_phy_status_ie(struct rtw89_dev *rtwdev,
ie = le32_get_bits(iehdr->w0, RTW89_PHY_STS_IEHDR_TYPE);
switch (ie) {
+ case RTW89_PHYSTS_IE00_CMN_CCK:
+ rtw89_core_parse_phy_status_ie00(rtwdev, iehdr, phy_ppdu);
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_BE)
+ rtw89_core_parse_phy_status_ie00_v2(rtwdev, iehdr, phy_ppdu);
+ break;
case RTW89_PHYSTS_IE01_CMN_OFDM:
rtw89_core_parse_phy_status_ie01(rtwdev, iehdr, phy_ppdu);
break;
@@ -1640,6 +1699,13 @@ static int rtw89_core_process_phy_status_ie(struct rtw89_dev *rtwdev,
return 0;
}
+static void rtw89_core_update_phy_ppdu_hdr_v2(struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ const struct rtw89_phy_sts_hdr_v2 *hdr = phy_ppdu->buf + PHY_STS_HDR_LEN;
+
+ phy_ppdu->rx_path_en = le32_get_bits(hdr->w0, RTW89_PHY_STS_HDR_V2_W0_PATH_EN);
+}
+
static void rtw89_core_update_phy_ppdu(struct rtw89_rx_phy_ppdu *phy_ppdu)
{
const struct rtw89_phy_sts_hdr *hdr = phy_ppdu->buf;
@@ -1651,6 +1717,10 @@ static void rtw89_core_update_phy_ppdu(struct rtw89_rx_phy_ppdu *phy_ppdu)
rssi[RF_PATH_B] = le32_get_bits(hdr->w1, RTW89_PHY_STS_HDR_W1_RSSI_B);
rssi[RF_PATH_C] = le32_get_bits(hdr->w1, RTW89_PHY_STS_HDR_W1_RSSI_C);
rssi[RF_PATH_D] = le32_get_bits(hdr->w1, RTW89_PHY_STS_HDR_W1_RSSI_D);
+
+ phy_ppdu->hdr_2_en = le32_get_bits(hdr->w0, RTW89_PHY_STS_HDR_W0_HDR_2_EN);
+ if (phy_ppdu->hdr_2_en)
+ rtw89_core_update_phy_ppdu_hdr_v2(phy_ppdu);
}
static int rtw89_core_rx_process_phy_ppdu(struct rtw89_dev *rtwdev,
@@ -1703,6 +1773,7 @@ static int rtw89_core_rx_parse_phy_sts(struct rtw89_dev *rtwdev,
}
}
+ rtw89_chip_convert_rpl_to_rssi(rtwdev, phy_ppdu);
rtw89_phy_antdiv_parse(rtwdev, phy_ppdu);
return 0;
@@ -4263,9 +4334,14 @@ void rtw89_core_stop(struct rtw89_dev *rtwdev)
u8 rtw89_acquire_mac_id(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
- u8 mac_id_num = chip->support_macid_num;
+ u8 mac_id_num;
u8 mac_id;
+ if (rtwdev->support_mlo)
+ mac_id_num = chip->support_macid_num / chip->support_link_num;
+ else
+ mac_id_num = chip->support_macid_num;
+
mac_id = find_first_zero_bit(rtwdev->mac_id_map, mac_id_num);
if (mac_id == mac_id_num)
return RTW89_MAX_MAC_ID_NUM;
@@ -4315,6 +4391,8 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
rtw89_init_wait(&rtwdev->mcc.wait);
rtw89_init_wait(&rtwdev->mac.fw_ofld_wait);
+ rtw89_init_wait(&rtwdev->wow.wait);
+ rtw89_init_wait(&rtwdev->mac.ps_wait);
INIT_WORK(&rtwdev->c2h_work, rtw89_fw_c2h_work);
INIT_WORK(&rtwdev->ips_work, rtw89_ips_work);
@@ -4681,6 +4759,9 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
if (chip->chip_gen == RTW89_CHIP_BE)
hw->wiphy->flags |= WIPHY_FLAG_DISABLE_WEXT;
+ if (rtwdev->support_mlo)
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;
+
hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
hw->wiphy->max_scan_ssids = RTW89_SCANOFLD_MAX_SSID;
@@ -4767,6 +4848,8 @@ EXPORT_SYMBOL(rtw89_core_register);
void rtw89_core_unregister(struct rtw89_dev *rtwdev)
{
rtw89_core_unregister_hw(rtwdev);
+
+ rtw89_debugfs_deinit(rtwdev);
}
EXPORT_SYMBOL(rtw89_core_unregister);
@@ -4781,6 +4864,7 @@ struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
struct ieee80211_ops *ops;
u32 driver_data_size;
int fw_format = -1;
+ bool support_mlo;
bool no_chanctx;
firmware = rtw89_early_fw_feature_recognize(device, chip, &early_fw, &fw_format);
@@ -4809,6 +4893,14 @@ struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
if (!hw)
goto err;
+ /* TODO: When driver MLO arch. is done, determine whether to support MLO
+ * according to the following conditions.
+ * 1. run with chanctx_ops
+ * 2. chip->support_link_num != 0
+ * 3. FW feature supports AP_LINK_PS
+ */
+ support_mlo = false;
+
hw->wiphy->iface_combinations = rtw89_iface_combs;
if (no_chanctx || chip->support_chanctx_num == 1)
@@ -4823,9 +4915,12 @@ struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
rtwdev->chip = chip;
rtwdev->fw.req.firmware = firmware;
rtwdev->fw.fw_format = fw_format;
+ rtwdev->support_mlo = support_mlo;
- rtw89_debug(rtwdev, RTW89_DBG_FW, "probe driver %s chanctx\n",
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN, "probe driver %s chanctx\n",
no_chanctx ? "without" : "with");
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN, "probe driver %s MLO cap\n",
+ support_mlo ? "with" : "without");
return rtwdev;
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index b42a33b9868a..4ed9034fdb46 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -21,6 +21,7 @@ struct rtw89_efuse_block_cfg;
struct rtw89_h2c_rf_tssi;
struct rtw89_fw_txpwr_track_cfg;
struct rtw89_phy_rfk_log_fmt;
+struct rtw89_debugfs;
extern const struct ieee80211_ops rtw89_ops;
@@ -796,6 +797,11 @@ struct rtw89_rx_phy_ppdu {
u8 chan_idx;
u8 ie;
u16 rate;
+ u8 rpl_avg;
+ u8 rpl_path[RF_PATH_MAX];
+ u8 rpl_fd[RF_PATH_MAX];
+ u8 bw_idx;
+ u8 rx_path_en;
struct {
bool has;
u8 avg_snr;
@@ -808,6 +814,7 @@ struct rtw89_rx_phy_ppdu {
bool stbc;
bool to_self;
bool valid;
+ bool hdr_2_en;
};
enum rtw89_mac_idx {
@@ -1589,6 +1596,23 @@ struct rtw89_btc_wl_active_role_v2 {
u32 noa_duration; /* ms */
};
+struct rtw89_btc_wl_active_role_v7 {
+ u8 connected;
+ u8 pid;
+ u8 phy;
+ u8 noa;
+
+ u8 band;
+ u8 client_ps;
+ u8 bw;
+ u8 role;
+
+ u8 ch;
+ u8 noa_dur;
+ u8 client_cnt;
+ u8 rsvd2;
+} __packed;
+
struct rtw89_btc_wl_role_info_bpos {
u16 none: 1;
u16 station: 1;
@@ -1670,6 +1694,22 @@ struct rtw89_btc_wl_rlink { /* H2C info, struct size must be n*4 bytes */
} __packed;
#define RTW89_BE_BTC_WL_MAX_ROLE_NUMBER 6
+struct rtw89_btc_wl_role_info_v7 { /* struct size must be n*4 bytes */
+ u8 connect_cnt;
+ u8 link_mode;
+ u8 link_mode_chg;
+ u8 p2p_2g;
+
+ struct rtw89_btc_wl_active_role_v7 active_role[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER];
+
+ u32 role_map;
+ u32 mrole_type; /* btc_wl_mrole_type */
+ u32 mrole_noa_duration; /* ms */
+ u32 dbcc_en;
+ u32 dbcc_chg;
+ u32 dbcc_2g_phy; /* which phy operate in 2G, HW_PHY_0 or HW_PHY_1 */
+} __packed;
+
struct rtw89_btc_wl_role_info_v8 { /* H2C info, struct size must be n*4 bytes */
u8 connect_cnt;
u8 link_mode;
@@ -1833,6 +1873,7 @@ struct rtw89_btc_wl_info {
struct rtw89_btc_wl_role_info role_info;
struct rtw89_btc_wl_role_info_v1 role_info_v1;
struct rtw89_btc_wl_role_info_v2 role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 role_info_v7;
struct rtw89_btc_wl_role_info_v8 role_info_v8;
struct rtw89_btc_wl_scan_info scan_info;
struct rtw89_btc_wl_dbcc_info dbcc_info;
@@ -1850,8 +1891,10 @@ struct rtw89_btc_wl_info {
bool is_5g_hi_channel;
bool pta_reg_mac_chg;
bool bg_mode;
+ bool he_mode;
bool scbd_change;
bool fw_ver_mismatch;
+ bool client_cnt_inc_2g;
u32 scbd;
};
@@ -2202,6 +2245,19 @@ struct rtw89_btc_fbtc_rpt_ctrl_v105 {
struct rtw89_btc_fbtc_rpt_ctrl_bt_mailbox bt_mbx_info;
} __packed;
+struct rtw89_btc_fbtc_rpt_ctrl_v7 {
+ u8 fver;
+ u8 rsvd0;
+ u8 rsvd1;
+ u8 rsvd2;
+
+ u8 gnt_val[RTW89_PHY_MAX][4];
+ __le16 bt_cnt[BTC_BCNT_STA_MAX_V105];
+
+ struct rtw89_btc_fbtc_rpt_ctrl_info_v8 rpt_info;
+ struct rtw89_btc_fbtc_rpt_ctrl_bt_mailbox bt_mbx_info;
+} __packed;
+
struct rtw89_btc_fbtc_rpt_ctrl_v8 {
u8 fver;
u8 rsvd0;
@@ -2220,6 +2276,7 @@ union rtw89_btc_fbtc_rpt_ctrl_ver_info {
struct rtw89_btc_fbtc_rpt_ctrl_v4 v4;
struct rtw89_btc_fbtc_rpt_ctrl_v5 v5;
struct rtw89_btc_fbtc_rpt_ctrl_v105 v105;
+ struct rtw89_btc_fbtc_rpt_ctrl_v7 v7;
struct rtw89_btc_fbtc_rpt_ctrl_v8 v8;
};
@@ -3544,7 +3601,8 @@ struct rtw89_chip_ops {
void (*rfk_init_late)(struct rtw89_dev *rtwdev);
void (*rfk_channel)(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
void (*rfk_band_changed)(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx);
+ enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan);
void (*rfk_scan)(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
bool start);
void (*rfk_track)(struct rtw89_dev *rtwdev);
@@ -3561,11 +3619,15 @@ struct rtw89_chip_ops {
void (*query_ppdu)(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu,
struct ieee80211_rx_status *status);
+ void (*convert_rpl_to_rssi)(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu);
void (*ctrl_nbtg_bt_tx)(struct rtw89_dev *rtwdev, bool en,
enum rtw89_phy_idx phy_idx);
void (*cfg_txrx_path)(struct rtw89_dev *rtwdev);
void (*set_txpwr_ul_tb_offset)(struct rtw89_dev *rtwdev,
s8 pw_ofst, enum rtw89_mac_idx mac_idx);
+ void (*digital_pwr_comp)(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx);
int (*pwr_on_func)(struct rtw89_dev *rtwdev);
int (*pwr_off_func)(struct rtw89_dev *rtwdev);
void (*query_rxdesc)(struct rtw89_dev *rtwdev,
@@ -3916,16 +3978,22 @@ struct rtw89_txpwr_conf {
const void *data;
};
+static inline bool rtw89_txpwr_entcpy(void *entry, const void *cursor, u8 size,
+ const struct rtw89_txpwr_conf *conf)
+{
+ u8 valid_size = min(size, conf->ent_sz);
+
+ memcpy(entry, cursor, valid_size);
+ return true;
+}
+
#define rtw89_txpwr_conf_valid(conf) (!!(conf)->data)
#define rtw89_for_each_in_txpwr_conf(entry, cursor, conf) \
- for (typecheck(const void *, cursor), (cursor) = (conf)->data, \
- memcpy(&(entry), cursor, \
- min_t(u8, sizeof(entry), (conf)->ent_sz)); \
+ for (typecheck(const void *, cursor), (cursor) = (conf)->data; \
(cursor) < (conf)->data + (conf)->num_ents * (conf)->ent_sz; \
- (cursor) += (conf)->ent_sz, \
- memcpy(&(entry), cursor, \
- min_t(u8, sizeof(entry), (conf)->ent_sz)))
+ (cursor) += (conf)->ent_sz) \
+ if (rtw89_txpwr_entcpy(&(entry), cursor, sizeof(entry), conf))
struct rtw89_txpwr_byrate_data {
struct rtw89_txpwr_conf conf;
@@ -4178,6 +4246,7 @@ struct rtw89_chip_info {
u8 wde_qempty_mgq_grpsel;
u32 rf_base_addr[2];
u8 support_macid_num;
+ u8 support_link_num;
u8 support_chanctx_num;
u8 support_bands;
u16 support_bandwidths;
@@ -4342,6 +4411,8 @@ struct rtw89_mac_info {
/* see RTW89_FW_OFLD_WAIT_COND series for wait condition */
struct rtw89_wait_info fw_ofld_wait;
+ /* see RTW89_PS_WAIT_COND series for wait condition */
+ struct rtw89_wait_info ps_wait;
};
enum rtw89_fwdl_check_type {
@@ -4594,7 +4665,7 @@ struct rtw89_hal {
bool ant_diversity_fixed;
bool support_cckpd;
bool support_igi;
- atomic_t roc_entity_idx;
+ atomic_t roc_chanctx_idx;
DECLARE_BITMAP(changes, NUM_OF_RTW89_CHANCTX_CHANGES);
DECLARE_BITMAP(entity_map, NUM_OF_RTW89_CHANCTX);
@@ -5312,6 +5383,9 @@ struct rtw89_wow_param {
u8 ptk_keyidx;
u8 akm;
+ /* see RTW89_WOW_WAIT_COND series for wait condition */
+ struct rtw89_wait_info wait;
+
bool pno_inited;
struct list_head pno_pkt_list;
struct cfg80211_sched_scan_request *nd_config;
@@ -5421,6 +5495,7 @@ struct rtw89_dev {
const struct ieee80211_ops *ops;
bool dbcc_en;
+ bool support_mlo;
enum rtw89_mlo_dbcc_mode mlo_dbcc_mode;
struct rtw89_hw_scan_info scan_info;
const struct rtw89_chip_info *chip;
@@ -5527,6 +5602,8 @@ struct rtw89_dev {
struct napi_struct napi;
int napi_budget_countdown;
+ struct rtw89_debugfs *debugfs;
+
/* HCI related data, keep last */
u8 priv[] __aligned(sizeof(void *));
};
@@ -6053,7 +6130,7 @@ const struct cfg80211_chan_def *rtw89_chandef_get(struct rtw89_dev *rtwdev,
enum rtw89_chanctx_idx idx)
{
struct rtw89_hal *hal = &rtwdev->hal;
- enum rtw89_chanctx_idx roc_idx = atomic_read(&hal->roc_entity_idx);
+ enum rtw89_chanctx_idx roc_idx = atomic_read(&hal->roc_chanctx_idx);
if (roc_idx == idx)
return &hal->roc_chandef;
@@ -6172,12 +6249,13 @@ static inline void rtw89_chip_rfk_channel(struct rtw89_dev *rtwdev,
}
static inline void rtw89_chip_rfk_band_changed(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
if (chip->ops->rfk_band_changed)
- chip->ops->rfk_band_changed(rtwdev, phy_idx);
+ chip->ops->rfk_band_changed(rtwdev, phy_idx, chan);
}
static inline void rtw89_chip_rfk_scan(struct rtw89_dev *rtwdev,
@@ -6243,6 +6321,15 @@ static inline void rtw89_chip_query_ppdu(struct rtw89_dev *rtwdev,
chip->ops->query_ppdu(rtwdev, phy_ppdu, status);
}
+static inline void rtw89_chip_convert_rpl_to_rssi(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->convert_rpl_to_rssi)
+ chip->ops->convert_rpl_to_rssi(rtwdev, phy_ppdu);
+}
+
static inline void rtw89_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en,
enum rtw89_phy_idx phy_idx)
{
@@ -6274,6 +6361,15 @@ void rtw89_chip_cfg_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
chip->ops->set_txpwr_ul_tb_offset(rtwdev, 0, rtwvif->mac_idx);
}
+static inline void rtw89_chip_digital_pwr_comp(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->digital_pwr_comp)
+ chip->ops->digital_pwr_comp(rtwdev, phy_idx);
+}
+
static inline void rtw89_load_txpwr_table(struct rtw89_dev *rtwdev,
const struct rtw89_txpwr_table *tbl)
{
diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
index bb65b814585a..29f85210f919 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.c
+++ b/drivers/net/wireless/realtek/rtw89/debug.c
@@ -52,6 +52,27 @@ struct rtw89_debugfs_priv {
};
};
+struct rtw89_debugfs {
+ struct rtw89_debugfs_priv read_reg;
+ struct rtw89_debugfs_priv write_reg;
+ struct rtw89_debugfs_priv read_rf;
+ struct rtw89_debugfs_priv write_rf;
+ struct rtw89_debugfs_priv rf_reg_dump;
+ struct rtw89_debugfs_priv txpwr_table;
+ struct rtw89_debugfs_priv mac_reg_dump;
+ struct rtw89_debugfs_priv mac_mem_dump;
+ struct rtw89_debugfs_priv mac_dbg_port_dump;
+ struct rtw89_debugfs_priv send_h2c;
+ struct rtw89_debugfs_priv early_h2c;
+ struct rtw89_debugfs_priv fw_crash;
+ struct rtw89_debugfs_priv btc_info;
+ struct rtw89_debugfs_priv btc_manual;
+ struct rtw89_debugfs_priv fw_log_manual;
+ struct rtw89_debugfs_priv phy_info;
+ struct rtw89_debugfs_priv stations;
+ struct rtw89_debugfs_priv disable_dm;
+};
+
static const u16 rtw89_rate_info_bw_to_mhz_map[] = {
[RATE_INFO_BW_20] = 20,
[RATE_INFO_BW_40] = 40,
@@ -3463,9 +3484,9 @@ static ssize_t rtw89_debug_priv_btc_manual_set(struct file *filp,
return count;
}
-static ssize_t rtw89_debug_fw_log_manual_set(struct file *filp,
- const char __user *user_buf,
- size_t count, loff_t *loff)
+static ssize_t rtw89_debug_priv_fw_log_manual_set(struct file *filp,
+ const char __user *user_buf,
+ size_t count, loff_t *loff)
{
struct rtw89_debugfs_priv *debugfs_priv = filp->private_data;
struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
@@ -3854,92 +3875,55 @@ rtw89_debug_priv_disable_dm_set(struct file *filp, const char __user *user_buf,
return count;
}
-static struct rtw89_debugfs_priv rtw89_debug_priv_read_reg = {
- .cb_read = rtw89_debug_priv_read_reg_get,
- .cb_write = rtw89_debug_priv_read_reg_select,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_write_reg = {
- .cb_write = rtw89_debug_priv_write_reg_set,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_read_rf = {
- .cb_read = rtw89_debug_priv_read_rf_get,
- .cb_write = rtw89_debug_priv_read_rf_select,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_write_rf = {
- .cb_write = rtw89_debug_priv_write_rf_set,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_rf_reg_dump = {
- .cb_read = rtw89_debug_priv_rf_reg_dump_get,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_txpwr_table = {
- .cb_read = rtw89_debug_priv_txpwr_table_get,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_mac_reg_dump = {
- .cb_read = rtw89_debug_priv_mac_reg_dump_get,
- .cb_write = rtw89_debug_priv_mac_reg_dump_select,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_mac_mem_dump = {
- .cb_read = rtw89_debug_priv_mac_mem_dump_get,
- .cb_write = rtw89_debug_priv_mac_mem_dump_select,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_mac_dbg_port_dump = {
- .cb_read = rtw89_debug_priv_mac_dbg_port_dump_get,
- .cb_write = rtw89_debug_priv_mac_dbg_port_dump_select,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_send_h2c = {
- .cb_write = rtw89_debug_priv_send_h2c_set,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_early_h2c = {
- .cb_read = rtw89_debug_priv_early_h2c_get,
- .cb_write = rtw89_debug_priv_early_h2c_set,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_fw_crash = {
- .cb_read = rtw89_debug_priv_fw_crash_get,
- .cb_write = rtw89_debug_priv_fw_crash_set,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_btc_info = {
- .cb_read = rtw89_debug_priv_btc_info_get,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_btc_manual = {
- .cb_write = rtw89_debug_priv_btc_manual_set,
-};
+#define rtw89_debug_priv_get(name) \
+{ \
+ .cb_read = rtw89_debug_priv_ ##name## _get, \
+}
-static struct rtw89_debugfs_priv rtw89_debug_priv_fw_log_manual = {
- .cb_write = rtw89_debug_fw_log_manual_set,
-};
+#define rtw89_debug_priv_set(name) \
+{ \
+ .cb_write = rtw89_debug_priv_ ##name## _set, \
+}
-static struct rtw89_debugfs_priv rtw89_debug_priv_phy_info = {
- .cb_read = rtw89_debug_priv_phy_info_get,
-};
+#define rtw89_debug_priv_select_and_get(name) \
+{ \
+ .cb_write = rtw89_debug_priv_ ##name## _select, \
+ .cb_read = rtw89_debug_priv_ ##name## _get, \
+}
-static struct rtw89_debugfs_priv rtw89_debug_priv_stations = {
- .cb_read = rtw89_debug_priv_stations_get,
-};
+#define rtw89_debug_priv_set_and_get(name) \
+{ \
+ .cb_write = rtw89_debug_priv_ ##name## _set, \
+ .cb_read = rtw89_debug_priv_ ##name## _get, \
+}
-static struct rtw89_debugfs_priv rtw89_debug_priv_disable_dm = {
- .cb_read = rtw89_debug_priv_disable_dm_get,
- .cb_write = rtw89_debug_priv_disable_dm_set,
+static const struct rtw89_debugfs rtw89_debugfs_templ = {
+ .read_reg = rtw89_debug_priv_select_and_get(read_reg),
+ .write_reg = rtw89_debug_priv_set(write_reg),
+ .read_rf = rtw89_debug_priv_select_and_get(read_rf),
+ .write_rf = rtw89_debug_priv_set(write_rf),
+ .rf_reg_dump = rtw89_debug_priv_get(rf_reg_dump),
+ .txpwr_table = rtw89_debug_priv_get(txpwr_table),
+ .mac_reg_dump = rtw89_debug_priv_select_and_get(mac_reg_dump),
+ .mac_mem_dump = rtw89_debug_priv_select_and_get(mac_mem_dump),
+ .mac_dbg_port_dump = rtw89_debug_priv_select_and_get(mac_dbg_port_dump),
+ .send_h2c = rtw89_debug_priv_set(send_h2c),
+ .early_h2c = rtw89_debug_priv_set_and_get(early_h2c),
+ .fw_crash = rtw89_debug_priv_set_and_get(fw_crash),
+ .btc_info = rtw89_debug_priv_get(btc_info),
+ .btc_manual = rtw89_debug_priv_set(btc_manual),
+ .fw_log_manual = rtw89_debug_priv_set(fw_log_manual),
+ .phy_info = rtw89_debug_priv_get(phy_info),
+ .stations = rtw89_debug_priv_get(stations),
+ .disable_dm = rtw89_debug_priv_set_and_get(disable_dm),
};
#define rtw89_debugfs_add(name, mode, fopname, parent) \
do { \
- rtw89_debug_priv_ ##name.rtwdev = rtwdev; \
- if (!debugfs_create_file(#name, mode, \
- parent, &rtw89_debug_priv_ ##name, \
- &file_ops_ ##fopname)) \
+ struct rtw89_debugfs_priv *priv = &rtwdev->debugfs->name; \
+ priv->rtwdev = rtwdev; \
+ if (IS_ERR(debugfs_create_file(#name, mode, parent, priv, \
+ &file_ops_ ##fopname))) \
pr_debug("Unable to initialize debugfs:%s\n", #name); \
} while (0)
@@ -3950,13 +3934,9 @@ static struct rtw89_debugfs_priv rtw89_debug_priv_disable_dm = {
#define rtw89_debugfs_add_r(name) \
rtw89_debugfs_add(name, S_IFREG | 0444, single_r, debugfs_topdir)
-void rtw89_debugfs_init(struct rtw89_dev *rtwdev)
+static
+void rtw89_debugfs_add_sec0(struct rtw89_dev *rtwdev, struct dentry *debugfs_topdir)
{
- struct dentry *debugfs_topdir;
-
- debugfs_topdir = debugfs_create_dir("rtw89",
- rtwdev->hw->wiphy->debugfsdir);
-
rtw89_debugfs_add_rw(read_reg);
rtw89_debugfs_add_w(write_reg);
rtw89_debugfs_add_rw(read_rf);
@@ -3966,6 +3946,11 @@ void rtw89_debugfs_init(struct rtw89_dev *rtwdev)
rtw89_debugfs_add_rw(mac_reg_dump);
rtw89_debugfs_add_rw(mac_mem_dump);
rtw89_debugfs_add_rw(mac_dbg_port_dump);
+}
+
+static
+void rtw89_debugfs_add_sec1(struct rtw89_dev *rtwdev, struct dentry *debugfs_topdir)
+{
rtw89_debugfs_add_w(send_h2c);
rtw89_debugfs_add_rw(early_h2c);
rtw89_debugfs_add_rw(fw_crash);
@@ -3976,6 +3961,27 @@ void rtw89_debugfs_init(struct rtw89_dev *rtwdev)
rtw89_debugfs_add_r(stations);
rtw89_debugfs_add_rw(disable_dm);
}
+
+void rtw89_debugfs_init(struct rtw89_dev *rtwdev)
+{
+ struct dentry *debugfs_topdir;
+
+ rtwdev->debugfs = kmemdup(&rtw89_debugfs_templ,
+ sizeof(rtw89_debugfs_templ), GFP_KERNEL);
+ if (!rtwdev->debugfs)
+ return;
+
+ debugfs_topdir = debugfs_create_dir("rtw89",
+ rtwdev->hw->wiphy->debugfsdir);
+
+ rtw89_debugfs_add_sec0(rtwdev, debugfs_topdir);
+ rtw89_debugfs_add_sec1(rtwdev, debugfs_topdir);
+}
+
+void rtw89_debugfs_deinit(struct rtw89_dev *rtwdev)
+{
+ kfree(rtwdev->debugfs);
+}
#endif
#ifdef CONFIG_RTW89_DEBUGMSG
diff --git a/drivers/net/wireless/realtek/rtw89/debug.h b/drivers/net/wireless/realtek/rtw89/debug.h
index 800ea59873a1..fc690f7c55dc 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.h
+++ b/drivers/net/wireless/realtek/rtw89/debug.h
@@ -49,8 +49,10 @@ enum rtw89_debug_mac_reg_sel {
#ifdef CONFIG_RTW89_DEBUGFS
void rtw89_debugfs_init(struct rtw89_dev *rtwdev);
+void rtw89_debugfs_deinit(struct rtw89_dev *rtwdev);
#else
static inline void rtw89_debugfs_init(struct rtw89_dev *rtwdev) {}
+static inline void rtw89_debugfs_deinit(struct rtw89_dev *rtwdev) {}
#endif
#define rtw89_info(rtwdev, a...) dev_info((rtwdev)->dev, ##a)
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index 47aa365991c1..d9b0e7ebe619 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -4325,6 +4325,52 @@ fail:
return ret;
}
+int rtw89_fw_h2c_cxdrv_role_v7(struct rtw89_dev *rtwdev, u8 type)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_role_info_v7 *role = &btc->cx.wl.role_info_v7;
+ struct rtw89_h2c_cxrole_v7 *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_cxrole_v7 *)skb->data;
+
+ h2c->hdr.type = type;
+ h2c->hdr.ver = btc->ver->fwlrole;
+ h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7;
+ memcpy(&h2c->_u8, role, sizeof(h2c->_u8));
+ h2c->_u32.role_map = cpu_to_le32(role->role_map);
+ h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type);
+ h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration);
+ h2c->_u32.dbcc_en = cpu_to_le32(role->dbcc_en);
+ h2c->_u32.dbcc_chg = cpu_to_le32(role->dbcc_chg);
+ h2c->_u32.dbcc_2g_phy = cpu_to_le32(role->dbcc_2g_phy);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, BTFC_SET,
+ SET_DRV_INFO, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type)
{
struct rtw89_btc *btc = &rtwdev->btc;
@@ -4343,6 +4389,7 @@ int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type)
h2c = (struct rtw89_h2c_cxrole_v8 *)skb->data;
h2c->hdr.type = type;
+ h2c->hdr.ver = btc->ver->fwlrole;
h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7;
memcpy(&h2c->_u8, role, sizeof(h2c->_u8));
h2c->_u32.role_map = cpu_to_le32(role->role_map);
@@ -4423,7 +4470,7 @@ int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type)
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
- rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
+ rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl_v7\n");
return -ENOMEM;
}
skb_put(skb, len);
@@ -5267,10 +5314,8 @@ fail:
}
int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
- enum rtw89_tssi_mode tssi_mode)
+ const struct rtw89_chan *chan, enum rtw89_tssi_mode tssi_mode)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- RTW89_CHANCTX_0);
struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_h2c_rf_tssi *h2c;
u32 len = sizeof(*h2c);
@@ -5314,7 +5359,8 @@ fail:
return ret;
}
-int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
struct rtw89_h2c_rf_iqk *h2c;
u32 len = sizeof(*h2c);
@@ -5349,10 +5395,9 @@ fail:
return ret;
}
-int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- RTW89_CHANCTX_0);
struct rtw89_h2c_rf_dpk *h2c;
u32 len = sizeof(*h2c);
struct sk_buff *skb;
@@ -5392,10 +5437,9 @@ fail:
return ret;
}
-int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- RTW89_CHANCTX_0);
struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_h2c_rf_txgapk *h2c;
u32 len = sizeof(*h2c);
@@ -5436,7 +5480,8 @@ fail:
return ret;
}
-int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
struct rtw89_h2c_rf_dack *h2c;
u32 len = sizeof(*h2c);
@@ -5472,10 +5517,9 @@ fail:
return ret;
}
-int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- RTW89_CHANCTX_0);
struct rtw89_h2c_rf_rxdck *h2c;
u32 len = sizeof(*h2c);
struct sk_buff *skb;
@@ -7132,10 +7176,10 @@ fail:
int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
bool enable)
{
+ struct rtw89_wait_info *wait = &rtwdev->mac.ps_wait;
struct rtw89_h2c_fwips *h2c;
u32 len = sizeof(*h2c);
struct sk_buff *skb;
- int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
@@ -7154,25 +7198,15 @@ int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
H2C_FUNC_IPS_CFG, 0, 1,
len);
- ret = rtw89_h2c_tx(rtwdev, skb, false);
- if (ret) {
- rtw89_err(rtwdev, "failed to send h2c\n");
- goto fail;
- }
- return 0;
-fail:
- dev_kfree_skb_any(skb);
-
- return ret;
+ return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_PS_WAIT_COND_IPS_CFG);
}
int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev)
{
- struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
+ struct rtw89_wait_info *wait = &rtwdev->wow.wait;
struct rtw89_h2c_wow_aoac *h2c;
u32 len = sizeof(*h2c);
struct sk_buff *skb;
- unsigned int cond;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
@@ -7191,8 +7225,7 @@ int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev)
H2C_FUNC_AOAC_REPORT_REQ, 1, 0,
len);
- cond = RTW89_WOW_WAIT_COND(H2C_FUNC_AOAC_REPORT_REQ);
- return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
+ return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_WOW_WAIT_COND_AOAC);
}
/* Return < 0, if failures happen during waiting for the condition.
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index 663eda5d0452..ad47e77d740b 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -2147,6 +2147,30 @@ struct rtw89_h2c_cxctrl_v7 {
#define H2C_LEN_CXDRVHDR sizeof(struct rtw89_h2c_cxhdr)
#define H2C_LEN_CXDRVHDR_V7 sizeof(struct rtw89_h2c_cxhdr_v7)
+struct rtw89_btc_wl_role_info_v7_u8 {
+ u8 connect_cnt;
+ u8 link_mode;
+ u8 link_mode_chg;
+ u8 p2p_2g;
+
+ struct rtw89_btc_wl_active_role_v7 active_role[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER];
+} __packed;
+
+struct rtw89_btc_wl_role_info_v7_u32 {
+ __le32 role_map;
+ __le32 mrole_type;
+ __le32 mrole_noa_duration;
+ __le32 dbcc_en;
+ __le32 dbcc_chg;
+ __le32 dbcc_2g_phy;
+} __packed;
+
+struct rtw89_h2c_cxrole_v7 {
+ struct rtw89_h2c_cxhdr_v7 hdr;
+ struct rtw89_btc_wl_role_info_v7_u8 _u8;
+ struct rtw89_btc_wl_role_info_v7_u32 _u32;
+} __packed;
+
struct rtw89_btc_wl_role_info_v8_u8 {
u8 connect_cnt;
u8 link_mode;
@@ -2168,7 +2192,7 @@ struct rtw89_btc_wl_role_info_v8_u32 {
} __packed;
struct rtw89_h2c_cxrole_v8 {
- struct rtw89_h2c_cxhdr hdr;
+ struct rtw89_h2c_cxhdr_v7 hdr;
struct rtw89_btc_wl_role_info_v8_u8 _u8;
struct rtw89_btc_wl_role_info_v8_u32 _u32;
} __packed;
@@ -3991,14 +4015,27 @@ enum rtw89_wow_h2c_func {
NUM_OF_RTW89_WOW_H2C_FUNC,
};
-#define RTW89_WOW_WAIT_COND(func) \
- (NUM_OF_RTW89_WOW_H2C_FUNC + (func))
+#define RTW89_WOW_WAIT_COND(tag, func) \
+ ((tag) * NUM_OF_RTW89_WOW_H2C_FUNC + (func))
+
+#define RTW89_WOW_WAIT_COND_AOAC \
+ RTW89_WOW_WAIT_COND(0 /* don't care */, H2C_FUNC_AOAC_REPORT_REQ)
/* CLASS 2 - PS */
#define H2C_CL_MAC_PS 0x2
-#define H2C_FUNC_MAC_LPS_PARM 0x0
-#define H2C_FUNC_P2P_ACT 0x1
-#define H2C_FUNC_IPS_CFG 0x3
+enum rtw89_ps_h2c_func {
+ H2C_FUNC_MAC_LPS_PARM = 0x0,
+ H2C_FUNC_P2P_ACT = 0x1,
+ H2C_FUNC_IPS_CFG = 0x3,
+
+ NUM_OF_RTW89_PS_H2C_FUNC,
+};
+
+#define RTW89_PS_WAIT_COND(tag, func) \
+ ((tag) * NUM_OF_RTW89_PS_H2C_FUNC + (func))
+
+#define RTW89_PS_WAIT_COND_IPS_CFG \
+ RTW89_PS_WAIT_COND(0 /* don't care */, H2C_FUNC_IPS_CFG)
/* CLASS 3 - FW download */
#define H2C_CL_MAC_FWDL 0x3
@@ -4426,6 +4463,7 @@ int rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev, u8 type);
+int rtw89_fw_h2c_cxdrv_role_v7(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type);
@@ -4453,12 +4491,17 @@ int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx);
int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
- enum rtw89_tssi_mode tssi_mode);
-int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
-int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
-int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
-int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
-int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+ const struct rtw89_chan *chan, enum rtw89_tssi_mode tssi_mode);
+int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan);
+int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan);
+int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan);
+int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan);
+int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan);
int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
bool rack, bool dack);
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index 89875f59d722..c70a23a763b0 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -2742,6 +2742,7 @@ bool rtw89_mac_is_qta_dbcc(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode)
static int ptcl_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
u32 val, reg;
int ret;
@@ -2780,6 +2781,12 @@ static int ptcl_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
B_AX_SPE_RPT_PATH_MASK, FWD_TO_WLCPU);
}
+ if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_AGG_LEN_VHT_0, mac_idx);
+ rtw89_write32_mask(rtwdev, reg,
+ B_AX_AMPDU_MAX_LEN_VHT_MASK, 0x3FF80);
+ }
+
return 0;
}
@@ -4880,6 +4887,7 @@ rtw89_mac_c2h_done_ack(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h, u32 le
{
/* N.B. This will run in interrupt context. */
struct rtw89_wait_info *fw_ofld_wait = &rtwdev->mac.fw_ofld_wait;
+ struct rtw89_wait_info *ps_wait = &rtwdev->mac.ps_wait;
const struct rtw89_c2h_done_ack *c2h =
(const struct rtw89_c2h_done_ack *)skb_c2h->data;
u8 h2c_cat = le32_get_bits(c2h->w2, RTW89_C2H_DONE_ACK_W2_CAT);
@@ -4900,6 +4908,18 @@ rtw89_mac_c2h_done_ack(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h, u32 le
switch (h2c_class) {
default:
return;
+ case H2C_CL_MAC_PS:
+ switch (h2c_func) {
+ default:
+ return;
+ case H2C_FUNC_IPS_CFG:
+ cond = RTW89_PS_WAIT_COND_IPS_CFG;
+ break;
+ }
+
+ data.err = !!h2c_return;
+ rtw89_complete_cond(ps_wait, cond, &data);
+ return;
case H2C_CL_MAC_FW_OFLD:
switch (h2c_func) {
default:
@@ -5158,11 +5178,10 @@ rtw89_mac_c2h_wow_aoac_rpt(struct rtw89_dev *rtwdev, struct sk_buff *skb, u32 le
{
struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt;
- struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
+ struct rtw89_wait_info *wait = &rtw_wow->wait;
const struct rtw89_c2h_wow_aoac_report *c2h =
(const struct rtw89_c2h_wow_aoac_report *)skb->data;
struct rtw89_completion_data data = {};
- unsigned int cond;
aoac_rpt->rpt_ver = c2h->rpt_ver;
aoac_rpt->sec_type = c2h->sec_type;
@@ -5180,8 +5199,7 @@ rtw89_mac_c2h_wow_aoac_rpt(struct rtw89_dev *rtwdev, struct sk_buff *skb, u32 le
aoac_rpt->igtk_ipn = le64_to_cpu(c2h->igtk_ipn);
memcpy(aoac_rpt->igtk, c2h->igtk, sizeof(aoac_rpt->igtk));
- cond = RTW89_WOW_WAIT_COND(H2C_FUNC_AOAC_REPORT_REQ);
- rtw89_complete_cond(wait, cond, &data);
+ rtw89_complete_cond(wait, RTW89_WOW_WAIT_COND_AOAC, &data);
}
static void
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index 9d3be36ffb6e..67c2a4507124 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -421,7 +421,6 @@ enum rtw89_mac_c2h_mrc_func {
enum rtw89_mac_c2h_wow_func {
RTW89_MAC_C2H_FUNC_AOAC_REPORT,
- RTW89_MAC_C2H_FUNC_READ_WOW_CAM,
NUM_OF_RTW89_MAC_C2H_FUNC_WOW,
};
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
index aa4fc9115995..c7165e757842 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.c
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -356,8 +356,8 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
csi_mode = RTW89_RA_RPT_MODE_HT;
ra_mask |= ((u64)sta->deflink.ht_cap.mcs.rx_mask[3] << 48) |
((u64)sta->deflink.ht_cap.mcs.rx_mask[2] << 36) |
- (sta->deflink.ht_cap.mcs.rx_mask[1] << 24) |
- (sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
+ ((u64)sta->deflink.ht_cap.mcs.rx_mask[1] << 24) |
+ ((u64)sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
high_rate_masks = rtw89_ra_mask_ht_rates;
if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
stbc_en = 1;
@@ -3084,6 +3084,7 @@ EXPORT_SYMBOL(rtw89_phy_rfk_pre_ntfy_and_wait);
int rtw89_phy_rfk_tssi_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
enum rtw89_tssi_mode tssi_mode,
unsigned int ms)
{
@@ -3091,7 +3092,7 @@ int rtw89_phy_rfk_tssi_and_wait(struct rtw89_dev *rtwdev,
rtw89_phy_rfk_report_prep(rtwdev);
- ret = rtw89_fw_h2c_rf_tssi(rtwdev, phy_idx, tssi_mode);
+ ret = rtw89_fw_h2c_rf_tssi(rtwdev, phy_idx, chan, tssi_mode);
if (ret)
return ret;
@@ -3101,13 +3102,14 @@ EXPORT_SYMBOL(rtw89_phy_rfk_tssi_and_wait);
int rtw89_phy_rfk_iqk_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms)
{
int ret;
rtw89_phy_rfk_report_prep(rtwdev);
- ret = rtw89_fw_h2c_rf_iqk(rtwdev, phy_idx);
+ ret = rtw89_fw_h2c_rf_iqk(rtwdev, phy_idx, chan);
if (ret)
return ret;
@@ -3117,13 +3119,14 @@ EXPORT_SYMBOL(rtw89_phy_rfk_iqk_and_wait);
int rtw89_phy_rfk_dpk_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms)
{
int ret;
rtw89_phy_rfk_report_prep(rtwdev);
- ret = rtw89_fw_h2c_rf_dpk(rtwdev, phy_idx);
+ ret = rtw89_fw_h2c_rf_dpk(rtwdev, phy_idx, chan);
if (ret)
return ret;
@@ -3133,13 +3136,14 @@ EXPORT_SYMBOL(rtw89_phy_rfk_dpk_and_wait);
int rtw89_phy_rfk_txgapk_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms)
{
int ret;
rtw89_phy_rfk_report_prep(rtwdev);
- ret = rtw89_fw_h2c_rf_txgapk(rtwdev, phy_idx);
+ ret = rtw89_fw_h2c_rf_txgapk(rtwdev, phy_idx, chan);
if (ret)
return ret;
@@ -3149,13 +3153,14 @@ EXPORT_SYMBOL(rtw89_phy_rfk_txgapk_and_wait);
int rtw89_phy_rfk_dack_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms)
{
int ret;
rtw89_phy_rfk_report_prep(rtwdev);
- ret = rtw89_fw_h2c_rf_dack(rtwdev, phy_idx);
+ ret = rtw89_fw_h2c_rf_dack(rtwdev, phy_idx, chan);
if (ret)
return ret;
@@ -3165,13 +3170,14 @@ EXPORT_SYMBOL(rtw89_phy_rfk_dack_and_wait);
int rtw89_phy_rfk_rxdck_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms)
{
int ret;
rtw89_phy_rfk_report_prep(rtwdev);
- ret = rtw89_fw_h2c_rf_rxdck(rtwdev, phy_idx);
+ ret = rtw89_fw_h2c_rf_rxdck(rtwdev, phy_idx, chan);
if (ret)
return ret;
@@ -5395,7 +5401,7 @@ static void rtw89_phy_dig_update_para(struct rtw89_dev *rtwdev)
memcpy(dig->igi_rssi_th, igi_rssi_th, sizeof(dig->igi_rssi_th));
}
-static const u8 pd_low_th_offset = 20, dynamic_igi_min = 0x20;
+static const u8 pd_low_th_offset = 16, dynamic_igi_min = 0x20;
static const u8 igi_max_performance_mode = 0x5a;
static const u8 dynamic_pd_threshold_max;
@@ -5693,38 +5699,47 @@ void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev)
}
#define IGI_RSSI_MIN 10
+#define ABS_IGI_MIN 0xc
void rtw89_phy_dig(struct rtw89_dev *rtwdev)
{
struct rtw89_dig_info *dig = &rtwdev->dig;
bool is_linked = rtwdev->total_sta_assoc > 0;
+ u8 igi_min;
if (unlikely(dig->bypass_dig)) {
dig->bypass_dig = false;
return;
}
+ rtw89_phy_dig_update_rssi_info(rtwdev);
+
if (!dig->is_linked_pre && is_linked) {
rtw89_debug(rtwdev, RTW89_DBG_DIG, "First connected\n");
rtw89_phy_dig_update_para(rtwdev);
+ dig->igi_fa_rssi = dig->igi_rssi;
} else if (dig->is_linked_pre && !is_linked) {
rtw89_debug(rtwdev, RTW89_DBG_DIG, "First disconnected\n");
rtw89_phy_dig_update_para(rtwdev);
+ dig->igi_fa_rssi = dig->igi_rssi;
}
dig->is_linked_pre = is_linked;
rtw89_phy_dig_igi_offset_by_env(rtwdev);
- rtw89_phy_dig_update_rssi_info(rtwdev);
- dig->dyn_igi_min = (dig->igi_rssi > IGI_RSSI_MIN) ?
- dig->igi_rssi - IGI_RSSI_MIN : 0;
- dig->dyn_igi_max = dig->dyn_igi_min + IGI_OFFSET_MAX;
- dig->igi_fa_rssi = dig->dyn_igi_min + dig->fa_rssi_ofst;
+ igi_min = max_t(int, dig->igi_rssi - IGI_RSSI_MIN, 0);
+ dig->dyn_igi_max = min(igi_min + IGI_OFFSET_MAX, igi_max_performance_mode);
+ dig->dyn_igi_min = max(igi_min, ABS_IGI_MIN);
- dig->igi_fa_rssi = clamp(dig->igi_fa_rssi, dig->dyn_igi_min,
- dig->dyn_igi_max);
+ if (dig->dyn_igi_max >= dig->dyn_igi_min) {
+ dig->igi_fa_rssi += dig->fa_rssi_ofst;
+ dig->igi_fa_rssi = clamp(dig->igi_fa_rssi, dig->dyn_igi_min,
+ dig->dyn_igi_max);
+ } else {
+ dig->igi_fa_rssi = dig->dyn_igi_max;
+ }
rtw89_debug(rtwdev, RTW89_DBG_DIG,
- "rssi=%03d, dyn(max,min)=(%d,%d), final_rssi=%d\n",
+ "rssi=%03d, dyn_joint(max,min)=(%d,%d), final_rssi=%d\n",
dig->igi_rssi, dig->dyn_igi_max, dig->dyn_igi_min,
dig->igi_fa_rssi);
diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
index 512f17d808fe..6dd8ec46939a 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.h
+++ b/drivers/net/wireless/realtek/rtw89/phy.h
@@ -907,22 +907,28 @@ int rtw89_phy_rfk_pre_ntfy_and_wait(struct rtw89_dev *rtwdev,
unsigned int ms);
int rtw89_phy_rfk_tssi_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
enum rtw89_tssi_mode tssi_mode,
unsigned int ms);
int rtw89_phy_rfk_iqk_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms);
int rtw89_phy_rfk_dpk_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms);
int rtw89_phy_rfk_txgapk_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms);
int rtw89_phy_rfk_dack_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms);
int rtw89_phy_rfk_rxdck_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms);
void rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c
index 92074b73ebeb..aebd6404f802 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.c
+++ b/drivers/net/wireless/realtek/rtw89/ps.c
@@ -98,10 +98,10 @@ static void __rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif
rtw89_fw_h2c_lps_ch_info(rtwdev, rtwvif);
}
-static void __rtw89_leave_lps(struct rtw89_dev *rtwdev, u8 mac_id)
+static void __rtw89_leave_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
struct rtw89_lps_parm lps_param = {
- .macid = mac_id,
+ .macid = rtwvif->mac_id,
.psmode = RTW89_MAC_AX_PS_MODE_ACTIVE,
.lastrpwm = RTW89_LAST_RPWM_ACTIVE,
};
@@ -109,6 +109,7 @@ static void __rtw89_leave_lps(struct rtw89_dev *rtwdev, u8 mac_id)
rtw89_fw_h2c_lps_parm(rtwdev, &lps_param);
rtw89_fw_leave_lps_check(rtwdev, 0);
rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_ON);
+ rtw89_chip_digital_pwr_comp(rtwdev, rtwvif->phy_idx);
}
void rtw89_leave_ps_mode(struct rtw89_dev *rtwdev)
@@ -137,7 +138,7 @@ static void rtw89_leave_lps_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwv
rtwvif->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT)
return;
- __rtw89_leave_lps(rtwdev, rtwvif->mac_id);
+ __rtw89_leave_lps(rtwdev, rtwvif);
}
void rtw89_leave_lps(struct rtw89_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
index 7afec48c4056..69678eab2309 100644
--- a/drivers/net/wireless/realtek/rtw89/reg.h
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -2506,6 +2506,10 @@
#define B_AX_RTS_TXTIME_TH_MASK GENMASK(15, 8)
#define B_AX_RTS_LEN_TH_MASK GENMASK(7, 0)
+#define R_AX_AGG_LEN_VHT_0 0xC618
+#define R_AX_AGG_LEN_VHT_0_C1 0xE618
+#define B_AX_AMPDU_MAX_LEN_VHT_MASK GENMASK(19, 0)
+
#define S_AX_CTS2S_TH_SEC_256B 1
#define R_AX_SIFS_SETTING 0xC624
#define R_AX_SIFS_SETTING_C1 0xE624
@@ -6044,6 +6048,9 @@
#define R_BE_WP_PAGE_INFO1 0xB7AC
#define B_BE_WP_AVAL_PG_MASK GENMASK(28, 16)
+#define R_BE_LTPC_T0_PATH0 0xBA28
+#define R_BE_LTPC_T0_PATH1 0xBB28
+
#define R_BE_CMAC_SHARE_FUNC_EN 0x0E000
#define B_BE_CMAC_SHARE_CRPRT BIT(31)
#define B_BE_CMAC_SHARE_EN BIT(30)
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
index e6463035a7af..1679bd408ef3 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
@@ -1587,29 +1587,31 @@ static void rtw8851b_rfk_init(struct rtw89_dev *rtwdev)
rtw8851b_aack(rtwdev);
rtw8851b_rck(rtwdev);
rtw8851b_dack(rtwdev);
- rtw8851b_rx_dck(rtwdev, RTW89_PHY_0);
+ rtw8851b_rx_dck(rtwdev, RTW89_PHY_0, RTW89_CHANCTX_0);
}
static void rtw8851b_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
+ enum rtw89_chanctx_idx chanctx_idx = rtwvif->chanctx_idx;
enum rtw89_phy_idx phy_idx = rtwvif->phy_idx;
- rtw8851b_rx_dck(rtwdev, phy_idx);
- rtw8851b_iqk(rtwdev, phy_idx);
- rtw8851b_tssi(rtwdev, phy_idx, true);
- rtw8851b_dpk(rtwdev, phy_idx);
+ rtw8851b_rx_dck(rtwdev, phy_idx, chanctx_idx);
+ rtw8851b_iqk(rtwdev, phy_idx, chanctx_idx);
+ rtw8851b_tssi(rtwdev, phy_idx, true, chanctx_idx);
+ rtw8851b_dpk(rtwdev, phy_idx, chanctx_idx);
}
static void rtw8851b_rfk_band_changed(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
- rtw8851b_tssi_scan(rtwdev, phy_idx);
+ rtw8851b_tssi_scan(rtwdev, phy_idx, chan);
}
static void rtw8851b_rfk_scan(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
bool start)
{
- rtw8851b_wifi_scan_notify(rtwdev, start, rtwvif->phy_idx);
+ rtw8851b_wifi_scan_notify(rtwdev, start, rtwvif->phy_idx, rtwvif->chanctx_idx);
}
static void rtw8851b_rfk_track(struct rtw89_dev *rtwdev)
@@ -2385,9 +2387,11 @@ static const struct rtw89_chip_ops rtw8851b_chip_ops = {
.get_thermal = rtw8851b_get_thermal,
.ctrl_btg_bt_rx = rtw8851b_ctrl_btg_bt_rx,
.query_ppdu = rtw8851b_query_ppdu,
+ .convert_rpl_to_rssi = NULL,
.ctrl_nbtg_bt_tx = rtw8851b_ctrl_nbtg_bt_tx,
.cfg_txrx_path = rtw8851b_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = rtw8851b_set_txpwr_ul_tb_offset,
+ .digital_pwr_comp = NULL,
.pwr_on_func = rtw8851b_pwr_on_func,
.pwr_off_func = rtw8851b_pwr_off_func,
.query_rxdesc = rtw89_core_query_rxdesc,
@@ -2462,6 +2466,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.dig_regs = &rtw8851b_dig_regs,
.tssi_dbw_table = NULL,
.support_macid_num = RTW89_MAX_MAC_ID_NUM,
+ .support_link_num = 0,
.support_chanctx_num = 0,
.support_rnr = false,
.support_bands = BIT(NL80211_BAND_2GHZ) |
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c
index 7942f334066c..364e36354225 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c
@@ -521,9 +521,10 @@ static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
}
static void _rx_dck_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, bool is_afe)
+ enum rtw89_rf_path path, bool is_afe,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[RX_DCK] ==== S%d RX DCK (%s / CH%d / %s / by %s)====\n", path,
@@ -574,7 +575,8 @@ static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 rf
_rxbb_ofst_swap(rtwdev, path, rf_mode);
}
-static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_afe)
+static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_afe,
+ enum rtw89_chanctx_idx chanctx_idx)
{
u32 rf_reg5;
u8 path;
@@ -584,7 +586,7 @@ static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_af
0x2, rtwdev->hal.cv);
for (path = 0; path < RF_PATH_NUM_8851B; path++) {
- _rx_dck_info(rtwdev, phy, path, is_afe);
+ _rx_dck_info(rtwdev, phy, path, is_afe, chanctx_idx);
rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
@@ -1481,9 +1483,9 @@ static void _rfk_restore_rf_reg(struct rtw89_dev *rtwdev,
}
static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- u8 path)
+ u8 path, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u8 idx = 0;
@@ -1586,10 +1588,11 @@ static void _iqk_init(struct rtw89_dev *rtwdev)
}
static void _doiqk(struct rtw89_dev *rtwdev, bool force,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx);
u32 backup_rf_val[RTW8851B_IQK_SS][BACKUP_RF_REGS_NR];
u32 backup_bb_val[BACKUP_BB_REGS_NR];
@@ -1602,7 +1605,7 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
iqk_info->version = RTW8851B_IQK_VER;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
- _iqk_get_ch_info(rtwdev, phy_idx, path);
+ _iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx);
_rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
_rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
@@ -1618,9 +1621,10 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
BTC_WRFK_ONESHOT_STOP);
}
-static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
+static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ bool force, enum rtw89_chanctx_idx chanctx_idx)
{
- _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
}
static void _dpk_bkup_kip(struct rtw89_dev *rtwdev, const u32 *reg,
@@ -1746,9 +1750,9 @@ static void _dpk_init(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
}
static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 kidx = dpk->cur_idx[path];
@@ -2449,7 +2453,8 @@ _error:
}
static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
- enum rtw89_phy_idx phy, u8 kpath)
+ enum rtw89_phy_idx phy, u8 kpath,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u32 kip_bkup[RF_PATH_NUM_8851B][DPK_KIP_REG_NUM_8851B] = {};
@@ -2465,7 +2470,7 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
continue;
_dpk_bkup_kip(rtwdev, dpk_kip_reg, kip_bkup, path);
_dpk_bkup_rf(rtwdev, dpk_rf_reg, rf_bkup, path);
- _dpk_information(rtwdev, phy, path);
+ _dpk_information(rtwdev, phy, path, chanctx_idx);
_dpk_init(rtwdev, path);
if (rtwdev->is_tssi_mode[path])
@@ -2505,13 +2510,14 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
_dpk_kip_pwr_clk_onoff(rtwdev, false);
}
-static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force)
+static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force,
+ enum rtw89_chanctx_idx chanctx_idx)
{
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[DPK] ****** 8851B DPK Start (Ver: 0x%x, Cv: %d) ******\n",
DPK_VER_8851B, rtwdev->hal.cv);
- _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy));
+ _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy), chanctx_idx);
}
static void _dpk_track(struct rtw89_dev *rtwdev)
@@ -2617,9 +2623,8 @@ static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
}
static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
rtw89_rfk_parser(rtwdev, &rtw8851b_tssi_sys_defs_tbl);
@@ -2650,7 +2655,7 @@ static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
#define RTW8851B_TSSI_GET_VAL(ptr, idx) \
({ \
@@ -2664,7 +2669,6 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
__val; \
})
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 ch = chan->channel;
u8 subband = chan->subband_type;
const s8 *thm_up_a = NULL;
@@ -2755,9 +2759,8 @@ static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx
}
static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
@@ -2766,9 +2769,9 @@ static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
}
static void _tssi_alignment_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, bool all)
+ enum rtw89_rf_path path, bool all,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
@@ -2944,10 +2947,9 @@ static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
}
static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u32 gidx, gidx_1st, gidx_2nd;
u8 ch = chan->channel;
s8 de_1st;
@@ -2980,10 +2982,9 @@ static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u32 tgidx, tgidx_1st, tgidx_2nd;
u8 ch = chan->channel;
s8 tde_1st;
@@ -3017,10 +3018,10 @@ static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
return val;
}
-static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 ch = chan->channel;
u8 gidx;
s8 ofdm_de;
@@ -3033,7 +3034,7 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx p
for (i = RF_PATH_A; i < RTW8851B_TSSI_PATH_NR; i++) {
gidx = _tssi_get_cck_group(rtwdev, ch);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = tssi_info->tssi_cck[i][gidx] + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3049,8 +3050,8 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx p
rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i],
_TSSI_DE_MASK));
- ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i, chan);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = ofdm_de + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3096,10 +3097,10 @@ static void _tssi_alimentk_dump_result(struct rtw89_dev *rtwdev, enum rtw89_rf_p
}
static void _tssi_alimentk_done(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy, enum rtw89_rf_path path)
+ enum rtw89_phy_idx phy, enum rtw89_rf_path path,
+ const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 channel = chan->channel;
u8 band;
@@ -3255,9 +3256,10 @@ void rtw8851b_dack(struct rtw89_dev *rtwdev)
_dac_cal(rtwdev, false);
}
-void rtw8851b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8851b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
u32 tx_en;
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
@@ -3265,30 +3267,32 @@ void rtw8851b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
_iqk_init(rtwdev);
- _iqk(rtwdev, phy_idx, false);
+ _iqk(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
}
-void rtw8851b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8851b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
u32 tx_en;
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
- _rx_dck(rtwdev, phy_idx, false);
+ _rx_dck(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
}
-void rtw8851b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8851b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
u32 tx_en;
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
@@ -3297,7 +3301,7 @@ void rtw8851b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
rtwdev->dpk.is_dpk_enable = true;
rtwdev->dpk.is_dpk_reload_en = false;
- _dpk(rtwdev, phy_idx, false);
+ _dpk(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
@@ -3308,9 +3312,11 @@ void rtw8851b_dpk_track(struct rtw89_dev *rtwdev)
_dpk_track(rtwdev);
}
-void rtw8851b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en)
+void rtw8851b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ bool hwtx_en, enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_A, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_A, chanctx_idx);
u8 i;
rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy);
@@ -3319,26 +3325,26 @@ void rtw8851b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_e
_tssi_disable(rtwdev, phy);
for (i = RF_PATH_A; i < RF_PATH_NUM_8851B; i++) {
- _tssi_set_sys(rtwdev, phy, i);
+ _tssi_set_sys(rtwdev, phy, i, chan);
_tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
_tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
_tssi_set_dck(rtwdev, phy, i);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
_tssi_set_dac_gain_tbl(rtwdev, phy, i);
- _tssi_slope_cal_org(rtwdev, phy, i);
- _tssi_alignment_default(rtwdev, phy, i, true);
+ _tssi_slope_cal_org(rtwdev, phy, i, chan);
+ _tssi_alignment_default(rtwdev, phy, i, true, chan);
_tssi_set_tssi_slope(rtwdev, phy, i);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
}
-void rtw8851b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+void rtw8851b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 channel = chan->channel;
u32 i;
@@ -3348,20 +3354,21 @@ void rtw8851b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
_tssi_disable(rtwdev, phy);
for (i = RF_PATH_A; i < RF_PATH_NUM_8851B; i++) {
- _tssi_set_sys(rtwdev, phy, i);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
- _tssi_slope_cal_org(rtwdev, phy, i);
- _tssi_alignment_default(rtwdev, phy, i, true);
+ _tssi_set_sys(rtwdev, phy, i, chan);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
+ _tssi_slope_cal_org(rtwdev, phy, i, chan);
+ _tssi_alignment_default(rtwdev, phy, i, true, chan);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
}
static void rtw8851b_tssi_default_txagc(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy, bool enable)
+ enum rtw89_phy_idx phy, bool enable,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u8 channel = chan->channel;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "======> %s ch=%d\n",
@@ -3379,7 +3386,7 @@ static void rtw8851b_tssi_default_txagc(struct rtw89_dev *rtwdev,
rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0);
rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1);
- _tssi_alimentk_done(rtwdev, phy, RF_PATH_A);
+ _tssi_alimentk_done(rtwdev, phy, RF_PATH_A, chan);
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"======>%s 2 SCAN_END Set 0x5818[7:0]=0x%x\n",
@@ -3391,12 +3398,13 @@ static void rtw8851b_tssi_default_txagc(struct rtw89_dev *rtwdev,
}
void rtw8851b_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
if (scan_start)
- rtw8851b_tssi_default_txagc(rtwdev, phy_idx, true);
+ rtw8851b_tssi_default_txagc(rtwdev, phy_idx, true, chanctx_idx);
else
- rtw8851b_tssi_default_txagc(rtwdev, phy_idx, false);
+ rtw8851b_tssi_default_txagc(rtwdev, phy_idx, false, chanctx_idx);
}
static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.h
index b66a23d6d367..ea7df628256b 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.h
@@ -12,15 +12,21 @@ void rtw8851b_lck_init(struct rtw89_dev *rtwdev);
void rtw8851b_lck_track(struct rtw89_dev *rtwdev);
void rtw8851b_rck(struct rtw89_dev *rtwdev);
void rtw8851b_dack(struct rtw89_dev *rtwdev);
-void rtw8851b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
-void rtw8851b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+void rtw8851b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
+void rtw8851b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8851b_dpk_init(struct rtw89_dev *rtwdev);
-void rtw8851b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8851b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8851b_dpk_track(struct rtw89_dev *rtwdev);
-void rtw8851b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en);
-void rtw8851b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8851b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ bool hwtx_en, enum rtw89_chanctx_idx chanctx_idx);
+void rtw8851b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan);
void rtw8851b_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
- enum rtw89_phy_idx phy_idx);
+ enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8851b_set_channel_rf(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
index 7ea388fa0657..dde96bd63021 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
@@ -337,6 +337,11 @@ static const struct rtw89_pwr_cfg rtw8852a_pwroff[] = {
PWR_INTF_MSK_PCIE,
PWR_BASE_MAC,
PWR_CMD_WRITE, BIT(0), 0},
+ {0x0092,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_PCIE,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, BIT(4), BIT(4)},
{0x0005,
PWR_CV_MSK_ALL,
PWR_INTF_MSK_PCIE,
@@ -1341,24 +1346,26 @@ static void rtw8852a_rfk_init(struct rtw89_dev *rtwdev)
rtwdev->is_tssi_mode[RF_PATH_B] = false;
rtw8852a_rck(rtwdev);
- rtw8852a_dack(rtwdev);
- rtw8852a_rx_dck(rtwdev, RTW89_PHY_0, true);
+ rtw8852a_dack(rtwdev, RTW89_CHANCTX_0);
+ rtw8852a_rx_dck(rtwdev, RTW89_PHY_0, true, RTW89_CHANCTX_0);
}
static void rtw8852a_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
+ enum rtw89_chanctx_idx chanctx_idx = rtwvif->chanctx_idx;
enum rtw89_phy_idx phy_idx = rtwvif->phy_idx;
- rtw8852a_rx_dck(rtwdev, phy_idx, true);
- rtw8852a_iqk(rtwdev, phy_idx);
- rtw8852a_tssi(rtwdev, phy_idx);
- rtw8852a_dpk(rtwdev, phy_idx);
+ rtw8852a_rx_dck(rtwdev, phy_idx, true, chanctx_idx);
+ rtw8852a_iqk(rtwdev, phy_idx, chanctx_idx);
+ rtw8852a_tssi(rtwdev, phy_idx, chanctx_idx);
+ rtw8852a_dpk(rtwdev, phy_idx, chanctx_idx);
}
static void rtw8852a_rfk_band_changed(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
- rtw8852a_tssi_scan(rtwdev, phy_idx);
+ rtw8852a_tssi_scan(rtwdev, phy_idx, chan);
}
static void rtw8852a_rfk_scan(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
@@ -1544,10 +1551,8 @@ static void rtw8852a_start_pmac_tx(struct rtw89_dev *rtwdev,
void rtw8852a_bb_set_pmac_tx(struct rtw89_dev *rtwdev,
struct rtw8852a_bb_pmac_info *tx_info,
- enum rtw89_phy_idx idx)
+ enum rtw89_phy_idx idx, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
-
if (!tx_info->en_pmac_tx) {
rtw8852a_stop_pmac_tx(rtwdev, tx_info, idx);
rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0, idx);
@@ -1569,7 +1574,7 @@ void rtw8852a_bb_set_pmac_tx(struct rtw89_dev *rtwdev,
void rtw8852a_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable,
u16 tx_cnt, u16 period, u16 tx_time,
- enum rtw89_phy_idx idx)
+ enum rtw89_phy_idx idx, const struct rtw89_chan *chan)
{
struct rtw8852a_bb_pmac_info tx_info = {0};
@@ -1579,7 +1584,7 @@ void rtw8852a_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable,
tx_info.tx_cnt = tx_cnt;
tx_info.period = period;
tx_info.tx_time = tx_time;
- rtw8852a_bb_set_pmac_tx(rtwdev, &tx_info, idx);
+ rtw8852a_bb_set_pmac_tx(rtwdev, &tx_info, idx, chan);
}
void rtw8852a_bb_set_power(struct rtw89_dev *rtwdev, s16 pwr_dbm,
@@ -2108,9 +2113,11 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = {
.get_thermal = rtw8852a_get_thermal,
.ctrl_btg_bt_rx = rtw8852a_ctrl_btg_bt_rx,
.query_ppdu = rtw8852a_query_ppdu,
+ .convert_rpl_to_rssi = NULL,
.ctrl_nbtg_bt_tx = rtw8852a_ctrl_nbtg_bt_tx,
.cfg_txrx_path = NULL,
.set_txpwr_ul_tb_offset = rtw8852a_set_txpwr_ul_tb_offset,
+ .digital_pwr_comp = NULL,
.pwr_on_func = NULL,
.pwr_off_func = NULL,
.query_rxdesc = rtw89_core_query_rxdesc,
@@ -2177,6 +2184,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.dig_regs = &rtw8852a_dig_regs,
.tssi_dbw_table = NULL,
.support_macid_num = RTW89_MAX_MAC_ID_NUM,
+ .support_link_num = 0,
.support_chanctx_num = 1,
.support_rnr = false,
.support_bands = BIT(NL80211_BAND_2GHZ) |
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.h b/drivers/net/wireless/realtek/rtw89/rtw8852a.h
index ea82fed7b7be..d6c1acd09238 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.h
@@ -97,10 +97,10 @@ extern const struct rtw89_chip_info rtw8852a_chip_info;
void rtw8852a_bb_set_plcp_tx(struct rtw89_dev *rtwdev);
void rtw8852a_bb_set_pmac_tx(struct rtw89_dev *rtwdev,
struct rtw8852a_bb_pmac_info *tx_info,
- enum rtw89_phy_idx idx);
+ enum rtw89_phy_idx idx, const struct rtw89_chan *chan);
void rtw8852a_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable,
u16 tx_cnt, u16 period, u16 tx_time,
- enum rtw89_phy_idx idx);
+ enum rtw89_phy_idx idx, const struct rtw89_chan *chan);
void rtw8852a_bb_set_power(struct rtw89_dev *rtwdev, s16 pwr_dbm,
enum rtw89_phy_idx idx);
void rtw8852a_bb_cfg_tx_path(struct rtw89_dev *rtwdev, u8 tx_path);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
index 6bae8bc07e93..9db8713ac99b 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
@@ -493,11 +493,12 @@ static void _dack(struct rtw89_dev *rtwdev)
_dack_s1(rtwdev);
}
-static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
+static void _dac_cal(struct rtw89_dev *rtwdev, bool force,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dack_info *dack = &rtwdev->dack;
u32 rf0_0, rf1_0;
- u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB, chanctx_idx);
dack->dack_done = false;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK b\n");
@@ -799,12 +800,13 @@ static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path, u8 ktype)
}
static bool _iqk_one_shot(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx, u8 path, u8 ktype)
+ enum rtw89_phy_idx phy_idx, u8 path, u8 ktype,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
bool fail = false;
u32 iqk_cmd = 0x0;
- u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy_idx, path, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy_idx, path, chanctx_idx);
u32 addr_rfc_ctl = 0x0;
if (path == RF_PATH_A)
@@ -888,7 +890,8 @@ static bool _iqk_one_shot(struct rtw89_dev *rtwdev,
}
static bool _rxk_group_sel(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
static const u32 rxgn_a[4] = {0x18C, 0x1A0, 0x28C, 0x2A0};
@@ -927,7 +930,7 @@ static bool _rxk_group_sel(struct rtw89_dev *rtwdev,
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, gp);
rtw89_phy_write32_mask(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN, 0x1);
rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
- fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
+ fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK, chanctx_idx);
rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(16 + gp + path * 4), fail);
}
@@ -952,7 +955,8 @@ static bool _rxk_group_sel(struct rtw89_dev *rtwdev,
}
static bool _iqk_nbrxk(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u8 group = 0x0;
@@ -991,7 +995,7 @@ static bool _iqk_nbrxk(struct rtw89_dev *rtwdev,
B_CFIR_LUT_GP, group);
rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN);
rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
- fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK);
+ fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK, chanctx_idx);
switch (iqk_info->iqk_band[path]) {
case RTW89_BAND_2G:
@@ -1040,7 +1044,8 @@ static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path)
}
static bool _txk_group_sel(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
static const u32 a_txgain[4] = {0xE466, 0x646D, 0xE4E2, 0x64ED};
static const u32 g_txgain[4] = {0x60e8, 0x60f0, 0x61e8, 0x61ED};
@@ -1083,7 +1088,7 @@ static bool _txk_group_sel(struct rtw89_dev *rtwdev,
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
B_CFIR_LUT_GP, gp);
rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
- fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK);
+ fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK, chanctx_idx);
rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(8 + gp + path * 4), fail);
}
@@ -1098,7 +1103,8 @@ static bool _txk_group_sel(struct rtw89_dev *rtwdev,
}
static bool _iqk_nbtxk(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u8 group = 0x2;
@@ -1131,7 +1137,7 @@ static bool _iqk_nbtxk(struct rtw89_dev *rtwdev,
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, group);
rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt);
rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
- fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
+ fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK, chanctx_idx);
if (!fail) {
tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
iqk_info->nb_txcfir[path] = tmp | 0x2;
@@ -1179,7 +1185,8 @@ static bool _lok_finetune_check(struct rtw89_dev *rtwdev, u8 path)
}
static bool _iqk_lok(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u32 rf0 = 0x0;
@@ -1210,11 +1217,11 @@ static bool _iqk_lok(struct rtw89_dev *rtwdev,
rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN);
rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt);
- tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_COARSE);
+ tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_COARSE, chanctx_idx);
iqk_info->lok_cor_fail[0][path] = tmp;
fsleep(10);
rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt);
- tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_FINE);
+ tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_FINE, chanctx_idx);
iqk_info->lok_fin_fail[0][path] = tmp;
fail = _lok_finetune_check(rtwdev, path);
return fail;
@@ -1321,7 +1328,8 @@ static void _iqk_info_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
}
static
-void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
+void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
bool lok_is_fail = false;
@@ -1333,30 +1341,35 @@ void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
for (i = 0; i < 3; i++) {
_lok_res_table(rtwdev, path, ibias++);
_iqk_txk_setting(rtwdev, path);
- lok_is_fail = _iqk_lok(rtwdev, phy_idx, path);
+ lok_is_fail = _iqk_lok(rtwdev, phy_idx, path, chanctx_idx);
if (!lok_is_fail)
break;
}
if (iqk_info->is_nbiqk)
- iqk_info->iqk_tx_fail[0][path] = _iqk_nbtxk(rtwdev, phy_idx, path);
+ iqk_info->iqk_tx_fail[0][path] = _iqk_nbtxk(rtwdev, phy_idx, path,
+ chanctx_idx);
else
- iqk_info->iqk_tx_fail[0][path] = _txk_group_sel(rtwdev, phy_idx, path);
+ iqk_info->iqk_tx_fail[0][path] = _txk_group_sel(rtwdev, phy_idx, path,
+ chanctx_idx);
_iqk_rxclk_setting(rtwdev, path);
_iqk_rxk_setting(rtwdev, path);
if (iqk_info->is_nbiqk || rtwdev->dbcc_en || iqk_info->iqk_band[path] == RTW89_BAND_2G)
- iqk_info->iqk_rx_fail[0][path] = _iqk_nbrxk(rtwdev, phy_idx, path);
+ iqk_info->iqk_rx_fail[0][path] = _iqk_nbrxk(rtwdev, phy_idx, path,
+ chanctx_idx);
else
- iqk_info->iqk_rx_fail[0][path] = _rxk_group_sel(rtwdev, phy_idx, path);
+ iqk_info->iqk_rx_fail[0][path] = _rxk_group_sel(rtwdev, phy_idx, path,
+ chanctx_idx);
_iqk_info_iqk(rtwdev, phy_idx, path);
}
static void _iqk_get_ch_info(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy, u8 path)
+ enum rtw89_phy_idx phy, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u32 reg_rf18 = 0x0, reg_35c = 0x0;
u8 idx = 0;
u8 get_empty_table = false;
@@ -1413,9 +1426,9 @@ static void _iqk_get_ch_info(struct rtw89_dev *rtwdev,
}
static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
- u8 path)
+ u8 path, enum rtw89_chanctx_idx chanctx_idx)
{
- _iqk_by_path(rtwdev, phy_idx, path);
+ _iqk_by_path(rtwdev, phy_idx, path, chanctx_idx);
}
static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path)
@@ -1513,7 +1526,8 @@ static void _iqk_macbb_setting(struct rtw89_dev *rtwdev,
rtw89_rfk_parser(rtwdev, tbl);
}
-static void _iqk_dbcc(struct rtw89_dev *rtwdev, u8 path)
+static void _iqk_dbcc(struct rtw89_dev *rtwdev, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u8 phy_idx = 0x0;
@@ -1525,10 +1539,10 @@ static void _iqk_dbcc(struct rtw89_dev *rtwdev, u8 path)
else
phy_idx = RTW89_PHY_1;
- _iqk_get_ch_info(rtwdev, phy_idx, path);
+ _iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx);
_iqk_macbb_setting(rtwdev, phy_idx, path);
_iqk_preset(rtwdev, path);
- _iqk_start_iqk(rtwdev, phy_idx, path);
+ _iqk_start_iqk(rtwdev, phy_idx, path, chanctx_idx);
_iqk_restore(rtwdev, path);
_iqk_afebb_restore(rtwdev, phy_idx, path);
}
@@ -1607,12 +1621,13 @@ static void _iqk_init(struct rtw89_dev *rtwdev)
}
static void _doiqk(struct rtw89_dev *rtwdev, bool force,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u32 backup_bb_val[BACKUP_BB_REGS_NR];
u32 backup_rf_val[RTW8852A_IQK_SS][BACKUP_RF_REGS_NR];
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
@@ -1622,12 +1637,12 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
iqk_info->version = RTW8852A_IQK_VER;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
- _iqk_get_ch_info(rtwdev, phy_idx, path);
+ _iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx);
_rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
_rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
_iqk_macbb_setting(rtwdev, phy_idx, path);
_iqk_preset(rtwdev, path);
- _iqk_start_iqk(rtwdev, phy_idx, path);
+ _iqk_start_iqk(rtwdev, phy_idx, path, chanctx_idx);
_iqk_restore(rtwdev, path);
_iqk_afebb_restore(rtwdev, phy_idx, path);
_rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]);
@@ -1635,18 +1650,19 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
}
-static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
+static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force,
+ enum rtw89_chanctx_idx chanctx_idx)
{
switch (_kpath(rtwdev, phy_idx)) {
case RF_A:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
break;
case RF_B:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
break;
case RF_AB:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
- _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
break;
default:
break;
@@ -1656,9 +1672,10 @@ static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool forc
#define RXDCK_VER_8852A 0xe
static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, bool is_afe)
+ enum rtw89_rf_path path, bool is_afe,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy, path, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy, path, chanctx_idx);
u32 ori_val;
rtw89_debug(rtwdev, RTW89_DBG_RFK,
@@ -1704,7 +1721,7 @@ static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- bool is_afe)
+ bool is_afe, enum rtw89_chanctx_idx chanctx_idx)
{
u8 path, kpath, dck_tune;
u32 rf_reg5;
@@ -1732,7 +1749,7 @@ static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, 0x0);
rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
- _set_rx_dck(rtwdev, phy, path, is_afe);
+ _set_rx_dck(rtwdev, phy, path, is_afe, chanctx_idx);
rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, dck_tune);
rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
@@ -1800,9 +1817,10 @@ static void _dpk_reload_kip(struct rtw89_dev *rtwdev, u32 *reg,
}
static u8 _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, enum rtw8852a_dpk_id id)
+ enum rtw89_rf_path path, enum rtw8852a_dpk_id id,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy, path, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy, path, chanctx_idx);
u16 dpk_cmd = 0x0;
u32 val;
int ret;
@@ -1841,18 +1859,19 @@ static u8 _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
static void _dpk_rx_dck(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_EN_TIA_IDA, 0x3);
- _set_rx_dck(rtwdev, phy, path, false);
+ _set_rx_dck(rtwdev, phy, path, false, chanctx_idx);
}
static void _dpk_information(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u8 kidx = dpk->cur_idx[path];
dpk->bp[path][kidx].band = chan->band_type;
@@ -1967,7 +1986,8 @@ static void _dpk_kip_restore(struct rtw89_dev *rtwdev,
static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
u8 cur_rxbb;
@@ -1997,7 +2017,7 @@ static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev,
rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11);
- _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK);
+ _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK, chanctx_idx);
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d LBK RXIQC = 0x%x\n", path,
rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD));
@@ -2186,10 +2206,11 @@ static bool _dpk_sync_check(struct rtw89_dev *rtwdev,
}
static bool _dpk_sync(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, u8 kidx)
+ enum rtw89_rf_path path, u8 kidx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
_dpk_tpg_sel(rtwdev, path, kidx);
- _dpk_one_shot(rtwdev, phy, path, SYNC);
+ _dpk_one_shot(rtwdev, phy, path, SYNC, chanctx_idx);
return _dpk_sync_check(rtwdev, path); /*1= fail*/
}
@@ -2242,10 +2263,10 @@ static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev)
static void _dpk_gainloss(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy, enum rtw89_rf_path path,
- u8 kidx)
+ u8 kidx, enum rtw89_chanctx_idx chanctx_idx)
{
_dpk_table_select(rtwdev, path, kidx, 1);
- _dpk_one_shot(rtwdev, phy, path, GAIN_LOSS);
+ _dpk_one_shot(rtwdev, phy, path, GAIN_LOSS, chanctx_idx);
}
#define DPK_TXAGC_LOWER 0x2e
@@ -2322,7 +2343,7 @@ static u8 _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check)
static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path, u8 kidx, u8 init_txagc,
- bool loss_only)
+ bool loss_only, enum rtw89_chanctx_idx chanctx_idx)
{
#define DPK_AGC_ADJ_LMT 6
#define DPK_DGAIN_UPPER 1922
@@ -2330,7 +2351,7 @@ static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
#define DPK_RXBB_UPPER 0x1f
#define DPK_RXBB_LOWER 0
#define DPK_GL_CRIT 7
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u8 tmp_txagc, tmp_rxbb = 0, tmp_gl_idx = 0;
u8 agc_cnt = 0;
bool limited_rxbb = false;
@@ -2344,7 +2365,7 @@ static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
do {
switch (step) {
case DPK_AGC_STEP_SYNC_DGAIN:
- if (_dpk_sync(rtwdev, phy, path, kidx)) {
+ if (_dpk_sync(rtwdev, phy, path, kidx, chanctx_idx)) {
tmp_txagc = DPK_TXAGC_INVAL;
goout = true;
break;
@@ -2380,7 +2401,8 @@ static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
if (chan->band_width < RTW89_CHANNEL_WIDTH_80)
_dpk_bypass_rxcfir(rtwdev, path, true);
else
- _dpk_lbk_rxiqk(rtwdev, phy, path);
+ _dpk_lbk_rxiqk(rtwdev, phy, path,
+ chanctx_idx);
}
if (dgain > DPK_DGAIN_UPPER || dgain < DPK_DGAIN_LOWER)
step = DPK_AGC_STEP_SYNC_DGAIN;
@@ -2391,7 +2413,7 @@ static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
break;
case DPK_AGC_STEP_GAIN_LOSS_IDX:
- _dpk_gainloss(rtwdev, phy, path, kidx);
+ _dpk_gainloss(rtwdev, phy, path, kidx, chanctx_idx);
tmp_gl_idx = _dpk_gainloss_read(rtwdev);
if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, true)) ||
@@ -2475,11 +2497,12 @@ static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, u8 order)
}
static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, u8 kidx, u8 gain)
+ enum rtw89_rf_path path, u8 kidx, u8 gain,
+ enum rtw89_chanctx_idx chanctx_idx)
{
_dpk_set_mdpd_para(rtwdev, 0x0);
_dpk_table_select(rtwdev, path, kidx, 1);
- _dpk_one_shot(rtwdev, phy, path, MDPK_IDL);
+ _dpk_one_shot(rtwdev, phy, path, MDPK_IDL, chanctx_idx);
}
static void _dpk_fill_result(struct rtw89_dev *rtwdev,
@@ -2518,10 +2541,10 @@ static void _dpk_fill_result(struct rtw89_dev *rtwdev,
}
static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
bool is_reload = false;
u8 idx, cur_band, cur_ch;
@@ -2545,7 +2568,8 @@ static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, u8 gain)
+ enum rtw89_rf_path path, u8 gain,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 txagc = 0, kidx = dpk->cur_idx[path];
@@ -2558,16 +2582,16 @@ static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
_rf_direct_cntrl(rtwdev, path, false);
txagc = _dpk_set_tx_pwr(rtwdev, gain, path);
_dpk_rf_setting(rtwdev, gain, path, kidx);
- _dpk_rx_dck(rtwdev, phy, path);
+ _dpk_rx_dck(rtwdev, phy, path, chanctx_idx);
_dpk_kip_setting(rtwdev, path, kidx);
_dpk_manual_txcfir(rtwdev, path, true);
- txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false);
+ txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false, chanctx_idx);
if (txagc == DPK_TXAGC_INVAL)
is_fail = true;
_dpk_get_thermal(rtwdev, kidx, path);
- _dpk_idl_mpa(rtwdev, phy, path, kidx, gain);
+ _dpk_idl_mpa(rtwdev, phy, path, kidx, gain, chanctx_idx);
rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
_dpk_fill_result(rtwdev, path, kidx, gain, txagc);
_dpk_manual_txcfir(rtwdev, path, false);
@@ -2584,7 +2608,8 @@ static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
- enum rtw89_phy_idx phy, u8 kpath)
+ enum rtw89_phy_idx phy, u8 kpath,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u32 backup_bb_val[BACKUP_BB_REGS_NR];
@@ -2599,7 +2624,8 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
if (!(kpath & BIT(path)))
continue;
- reloaded[path] = _dpk_reload_check(rtwdev, phy, path);
+ reloaded[path] = _dpk_reload_check(rtwdev, phy, path,
+ chanctx_idx);
if (!reloaded[path] && dpk->bp[path][0].ch != 0)
dpk->cur_idx[path] = !dpk->cur_idx[path];
else
@@ -2624,7 +2650,7 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
_dpk_tssi_pause(rtwdev, path, true);
_dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path);
_rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
- _dpk_information(rtwdev, phy, path);
+ _dpk_information(rtwdev, phy, path, chanctx_idx);
}
_dpk_bb_afe_setting(rtwdev, phy, path, kpath);
@@ -2633,7 +2659,7 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
if (!(kpath & BIT(path)) || reloaded[path])
continue;
- is_fail = _dpk_main(rtwdev, phy, path, 1);
+ is_fail = _dpk_main(rtwdev, phy, path, 1, chanctx_idx);
_dpk_onoff(rtwdev, path, is_fail);
}
@@ -2652,10 +2678,11 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
}
}
-static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_fem_info *fem = &rtwdev->fem;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) {
rtw89_debug(rtwdev, RTW89_DBG_RFK,
@@ -2682,17 +2709,19 @@ static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
}
}
-static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force)
+static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ bool force, enum rtw89_chanctx_idx chanctx_idx)
{
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n",
RTW8852A_DPK_VER, rtwdev->hal.cv,
RTW8852A_RF_REL_VERSION);
- if (_dpk_bypass_check(rtwdev, phy))
+ if (_dpk_bypass_check(rtwdev, phy, chanctx_idx))
_dpk_force_bypass(rtwdev, phy);
else
- _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy));
+ _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy),
+ chanctx_idx);
}
static void _dpk_onoff(struct rtw89_dev *rtwdev,
@@ -2815,9 +2844,8 @@ static void _dpk_track(struct rtw89_dev *rtwdev)
}
static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
if (band == RTW89_BAND_2G)
@@ -2826,9 +2854,9 @@ static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXA, 0x1);
}
-static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_sys_defs_tbl);
@@ -2838,9 +2866,9 @@ static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
}
static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
@@ -2869,7 +2897,7 @@ static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
#define __get_val(ptr, idx) \
({ \
@@ -2883,7 +2911,6 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
__val; \
})
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 ch = chan->channel;
u8 subband = chan->subband_type;
const s8 *thm_up_a = NULL;
@@ -3076,9 +3103,8 @@ static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev,
}
static void _tssi_pak(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 subband = chan->subband_type;
switch (subband) {
@@ -3252,10 +3278,9 @@ static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
}
static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 ch = chan->channel;
u32 gidx, gidx_1st, gidx_2nd;
s8 de_1st = 0;
@@ -3290,10 +3315,9 @@ static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 ch = chan->channel;
u32 tgidx, tgidx_1st, tgidx_2nd;
s8 tde_1st = 0;
@@ -3328,11 +3352,10 @@ static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
}
static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy)
+ enum rtw89_phy_idx phy, const struct rtw89_chan *chan)
{
#define __DE_MASK 0x003ff000
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
static const u32 r_cck_long[RF_PATH_NUM_8852A] = {0x5858, 0x7858};
static const u32 r_cck_short[RF_PATH_NUM_8852A] = {0x5860, 0x7860};
static const u32 r_mcs_20m[RF_PATH_NUM_8852A] = {0x5838, 0x7838};
@@ -3352,7 +3375,7 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
for (i = 0; i < RF_PATH_NUM_8852A; i++) {
gidx = _tssi_get_cck_group(rtwdev, ch);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = tssi_info->tssi_cck[i][gidx] + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3368,8 +3391,8 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
rtw89_phy_read32_mask(rtwdev, r_cck_long[i],
__DE_MASK));
- ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i, chan);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = ofdm_de + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3458,10 +3481,10 @@ static void _tssi_track(struct rtw89_dev *rtwdev)
}
}
-static void _tssi_high_power(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static void _tssi_high_power(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 ch = chan->channel, ch_tmp;
u8 bw = chan->band_width;
u8 band = chan->band_type;
@@ -3497,24 +3520,25 @@ static void _tssi_high_power(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
}
static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- u8 path, s16 pwr_dbm, u8 enable)
+ u8 path, s16 pwr_dbm, u8 enable, const struct rtw89_chan *chan)
{
rtw8852a_bb_set_plcp_tx(rtwdev);
rtw8852a_bb_cfg_tx_path(rtwdev, path);
rtw8852a_bb_set_power(rtwdev, pwr_dbm, phy);
- rtw8852a_bb_set_pmac_pkt_tx(rtwdev, enable, 20, 5000, 0, phy);
+ rtw8852a_bb_set_pmac_pkt_tx(rtwdev, enable, 20, 5000, 0, phy, chan);
}
-static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
const struct rtw89_chip_info *mac_reg = rtwdev->chip;
u8 ch = chan->channel, ch_tmp;
u8 bw = chan->band_width;
u8 band = chan->band_type;
u32 tx_en;
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy, 0, chanctx_idx);
s8 power;
s16 xdbm;
u32 i, tx_counter = 0;
@@ -3546,9 +3570,9 @@ static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy));
tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
- _tssi_hw_tx(rtwdev, phy, RF_PATH_AB, xdbm, true);
+ _tssi_hw_tx(rtwdev, phy, RF_PATH_AB, xdbm, true, chan);
mdelay(15);
- _tssi_hw_tx(rtwdev, phy, RF_PATH_AB, xdbm, false);
+ _tssi_hw_tx(rtwdev, phy, RF_PATH_AB, xdbm, false, chan);
tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD) -
tx_counter;
@@ -3600,19 +3624,21 @@ void rtw8852a_rck(struct rtw89_dev *rtwdev)
_rck(rtwdev, path);
}
-void rtw8852a_dack(struct rtw89_dev *rtwdev)
+void rtw8852a_dack(struct rtw89_dev *rtwdev,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
- _dac_cal(rtwdev, false);
+ _dac_cal(rtwdev, false, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
}
-void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
u32 tx_en;
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
@@ -3620,34 +3646,35 @@ void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
_iqk_init(rtwdev);
if (rtwdev->dbcc_en)
- _iqk_dbcc(rtwdev, phy_idx);
+ _iqk_dbcc(rtwdev, phy_idx, chanctx_idx);
else
- _iqk(rtwdev, phy_idx, false);
+ _iqk(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
}
void rtw8852a_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
- bool is_afe)
+ bool is_afe, enum rtw89_chanctx_idx chanctx_idx)
{
u32 tx_en;
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
- _rx_dck(rtwdev, phy_idx, is_afe);
+ _rx_dck(rtwdev, phy_idx, is_afe, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
}
-void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
u32 tx_en;
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
@@ -3655,7 +3682,7 @@ void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
rtwdev->dpk.is_dpk_enable = true;
rtwdev->dpk.is_dpk_reload_en = false;
- _dpk(rtwdev, phy_idx, false);
+ _dpk(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
@@ -3666,8 +3693,10 @@ void rtw8852a_dpk_track(struct rtw89_dev *rtwdev)
_dpk_track(rtwdev);
}
-void rtw8852a_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+void rtw8852a_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_chanctx_idx chanctx_idx)
{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u8 i;
rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n",
@@ -3676,26 +3705,27 @@ void rtw8852a_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
_tssi_disable(rtwdev, phy);
for (i = RF_PATH_A; i < RF_PATH_NUM_8852A; i++) {
- _tssi_rf_setting(rtwdev, phy, i);
- _tssi_set_sys(rtwdev, phy);
- _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
+ _tssi_rf_setting(rtwdev, phy, i, chan);
+ _tssi_set_sys(rtwdev, phy, chan);
+ _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i, chan);
_tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
_tssi_set_dck(rtwdev, phy, i);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
_tssi_set_dac_gain_tbl(rtwdev, phy, i);
_tssi_slope_cal_org(rtwdev, phy, i);
_tssi_set_rf_gap_tbl(rtwdev, phy, i);
_tssi_set_slope(rtwdev, phy, i);
- _tssi_pak(rtwdev, phy, i);
+ _tssi_pak(rtwdev, phy, i, chan);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
- _tssi_high_power(rtwdev, phy);
- _tssi_pre_tx(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
+ _tssi_high_power(rtwdev, phy, chan);
+ _tssi_pre_tx(rtwdev, phy, chanctx_idx);
}
-void rtw8852a_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+void rtw8852a_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
u8 i;
@@ -3710,14 +3740,14 @@ void rtw8852a_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
_tssi_disable(rtwdev, phy);
for (i = RF_PATH_A; i < RF_PATH_NUM_8852A; i++) {
- _tssi_rf_setting(rtwdev, phy, i);
- _tssi_set_sys(rtwdev, phy);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
- _tssi_pak(rtwdev, phy, i);
+ _tssi_rf_setting(rtwdev, phy, i, chan);
+ _tssi_set_sys(rtwdev, phy, chan);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
+ _tssi_pak(rtwdev, phy, i, chan);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
}
void rtw8852a_tssi_track(struct rtw89_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.h
index fa058ccc8616..8761f2cc9359 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.h
@@ -8,14 +8,19 @@
#include "core.h"
void rtw8852a_rck(struct rtw89_dev *rtwdev);
-void rtw8852a_dack(struct rtw89_dev *rtwdev);
-void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+void rtw8852a_dack(struct rtw89_dev *rtwdev,
+ enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852a_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
- bool is_afe);
-void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+ bool is_afe, enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852a_dpk_track(struct rtw89_dev *rtwdev);
-void rtw8852a_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
-void rtw8852a_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852a_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852a_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan);
void rtw8852a_tssi_track(struct rtw89_dev *rtwdev);
void rtw8852a_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
enum rtw89_phy_idx phy_idx);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
index f26979b89cc9..12be52f76427 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
@@ -558,30 +558,32 @@ static void rtw8852b_rfk_init(struct rtw89_dev *rtwdev)
rtw8852b_dpk_init(rtwdev);
rtw8852b_rck(rtwdev);
- rtw8852b_dack(rtwdev);
- rtw8852b_rx_dck(rtwdev, RTW89_PHY_0);
+ rtw8852b_dack(rtwdev, RTW89_CHANCTX_0);
+ rtw8852b_rx_dck(rtwdev, RTW89_PHY_0, RTW89_CHANCTX_0);
}
static void rtw8852b_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
+ enum rtw89_chanctx_idx chanctx_idx = rtwvif->chanctx_idx;
enum rtw89_phy_idx phy_idx = rtwvif->phy_idx;
- rtw8852b_rx_dck(rtwdev, phy_idx);
- rtw8852b_iqk(rtwdev, phy_idx);
- rtw8852b_tssi(rtwdev, phy_idx, true);
- rtw8852b_dpk(rtwdev, phy_idx);
+ rtw8852b_rx_dck(rtwdev, phy_idx, chanctx_idx);
+ rtw8852b_iqk(rtwdev, phy_idx, chanctx_idx);
+ rtw8852b_tssi(rtwdev, phy_idx, true, chanctx_idx);
+ rtw8852b_dpk(rtwdev, phy_idx, chanctx_idx);
}
static void rtw8852b_rfk_band_changed(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
- rtw8852b_tssi_scan(rtwdev, phy_idx);
+ rtw8852b_tssi_scan(rtwdev, phy_idx, chan);
}
static void rtw8852b_rfk_scan(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
bool start)
{
- rtw8852b_wifi_scan_notify(rtwdev, start, rtwvif->phy_idx);
+ rtw8852b_wifi_scan_notify(rtwdev, start, rtwvif->phy_idx, rtwvif->chanctx_idx);
}
static void rtw8852b_rfk_track(struct rtw89_dev *rtwdev)
@@ -739,9 +741,11 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = {
.get_thermal = rtw8852bx_get_thermal,
.ctrl_btg_bt_rx = rtw8852bx_ctrl_btg_bt_rx,
.query_ppdu = rtw8852bx_query_ppdu,
+ .convert_rpl_to_rssi = rtw8852bx_convert_rpl_to_rssi,
.ctrl_nbtg_bt_tx = rtw8852bx_ctrl_nbtg_bt_tx,
.cfg_txrx_path = rtw8852bx_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = rtw8852bx_set_txpwr_ul_tb_offset,
+ .digital_pwr_comp = NULL,
.pwr_on_func = rtw8852b_pwr_on_func,
.pwr_off_func = rtw8852b_pwr_off_func,
.query_rxdesc = rtw89_core_query_rxdesc,
@@ -817,6 +821,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.dig_regs = &rtw8852b_dig_regs,
.tssi_dbw_table = NULL,
.support_macid_num = RTW89_MAX_MAC_ID_NUM,
+ .support_link_num = 0,
.support_chanctx_num = 0,
.support_rnr = false,
.support_bands = BIT(NL80211_BAND_2GHZ) |
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
index f364e76e2b34..ede0ca5426ae 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
@@ -1445,10 +1445,8 @@ static void rtw8852bx_start_pmac_tx(struct rtw89_dev *rtwdev,
static
void rtw8852bx_bb_set_pmac_tx(struct rtw89_dev *rtwdev,
struct rtw8852bx_bb_pmac_info *tx_info,
- enum rtw89_phy_idx idx)
+ enum rtw89_phy_idx idx, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
-
if (!tx_info->en_pmac_tx) {
rtw8852bx_stop_pmac_tx(rtwdev, tx_info, idx);
rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0, idx);
@@ -1473,7 +1471,7 @@ void rtw8852bx_bb_set_pmac_tx(struct rtw89_dev *rtwdev,
static
void __rtw8852bx_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable,
u16 tx_cnt, u16 period, u16 tx_time,
- enum rtw89_phy_idx idx)
+ enum rtw89_phy_idx idx, const struct rtw89_chan *chan)
{
struct rtw8852bx_bb_pmac_info tx_info = {0};
@@ -1484,7 +1482,7 @@ void __rtw8852bx_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable,
tx_info.period = period;
tx_info.tx_time = tx_time;
- rtw8852bx_bb_set_pmac_tx(rtwdev, &tx_info, idx);
+ rtw8852bx_bb_set_pmac_tx(rtwdev, &tx_info, idx, chan);
}
static
@@ -1623,9 +1621,9 @@ static void __rtw8852bx_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en,
static
void __rtw8852bx_bb_ctrl_rx_path(struct rtw89_dev *rtwdev,
- enum rtw89_rf_path_bit rx_path)
+ enum rtw89_rf_path_bit rx_path,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u32 rst_mask0;
u32 rst_mask1;
@@ -1713,9 +1711,10 @@ static void rtw8852bx_bb_ctrl_rf_mode_rx_path(struct rtw89_dev *rtwdev,
static void __rtw8852bx_bb_cfg_txrx_path(struct rtw89_dev *rtwdev)
{
struct rtw89_hal *hal = &rtwdev->hal;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_rf_path_bit rx_path = hal->antenna_rx ? hal->antenna_rx : RF_AB;
- rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path);
+ rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path, chan);
rtw8852bx_bb_ctrl_rf_mode_rx_path(rtwdev, rx_path);
if (rtwdev->hal.rx_nss == 1) {
@@ -1948,6 +1947,19 @@ static void __rtw8852bx_query_ppdu(struct rtw89_dev *rtwdev,
rtw8852bx_fill_freq_with_ppdu(rtwdev, phy_ppdu, status);
}
+static void __rtw8852bx_convert_rpl_to_rssi(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ u8 delta = phy_ppdu->rpl_avg - phy_ppdu->rssi_avg;
+ u8 *rssi = phy_ppdu->rssi;
+ u8 i;
+
+ for (i = 0; i < RF_PATH_NUM_8852BX; i++)
+ rssi[i] += delta;
+
+ phy_ppdu->rssi_avg = phy_ppdu->rpl_avg;
+}
+
static int __rtw8852bx_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
{
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
@@ -2030,6 +2042,7 @@ const struct rtw8852bx_info rtw8852bx_info = {
.ctrl_nbtg_bt_tx = __rtw8852bx_ctrl_nbtg_bt_tx,
.ctrl_btg_bt_rx = __rtw8852bx_ctrl_btg_bt_rx,
.query_ppdu = __rtw8852bx_query_ppdu,
+ .convert_rpl_to_rssi = __rtw8852bx_convert_rpl_to_rssi,
.read_efuse = __rtw8852bx_read_efuse,
.read_phycap = __rtw8852bx_read_phycap,
.power_trim = __rtw8852bx_power_trim,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.h b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.h
index 801e7ab9f4fa..3dce5422f41e 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.h
@@ -121,13 +121,14 @@ struct rtw8852bx_info {
void (*bb_cfg_txrx_path)(struct rtw89_dev *rtwdev);
void (*bb_cfg_tx_path)(struct rtw89_dev *rtwdev, u8 tx_path);
void (*bb_ctrl_rx_path)(struct rtw89_dev *rtwdev,
- enum rtw89_rf_path_bit rx_path);
+ enum rtw89_rf_path_bit rx_path,
+ const struct rtw89_chan *chan);
void (*bb_set_plcp_tx)(struct rtw89_dev *rtwdev);
void (*bb_set_power)(struct rtw89_dev *rtwdev, s16 pwr_dbm,
enum rtw89_phy_idx idx);
void (*bb_set_pmac_pkt_tx)(struct rtw89_dev *rtwdev, u8 enable,
u16 tx_cnt, u16 period, u16 tx_time,
- enum rtw89_phy_idx idx);
+ enum rtw89_phy_idx idx, const struct rtw89_chan *chan);
void (*bb_backup_tssi)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx,
struct rtw8852bx_bb_tssi_bak *bak);
void (*bb_restore_tssi)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx,
@@ -145,6 +146,8 @@ struct rtw8852bx_info {
void (*query_ppdu)(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu,
struct ieee80211_rx_status *status);
+ void (*convert_rpl_to_rssi)(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu);
int (*read_efuse)(struct rtw89_dev *rtwdev, u8 *log_map,
enum rtw89_efuse_block block);
int (*read_phycap)(struct rtw89_dev *rtwdev, u8 *phycap_map);
@@ -207,9 +210,10 @@ void rtw8852bx_bb_cfg_tx_path(struct rtw89_dev *rtwdev, u8 tx_path)
static inline
void rtw8852bx_bb_ctrl_rx_path(struct rtw89_dev *rtwdev,
- enum rtw89_rf_path_bit rx_path)
+ enum rtw89_rf_path_bit rx_path,
+ const struct rtw89_chan *chan)
{
- rtw8852bx_info.bb_ctrl_rx_path(rtwdev, rx_path);
+ rtw8852bx_info.bb_ctrl_rx_path(rtwdev, rx_path, chan);
}
static inline
@@ -228,9 +232,10 @@ void rtw8852bx_bb_set_power(struct rtw89_dev *rtwdev, s16 pwr_dbm,
static inline
void rtw8852bx_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable,
u16 tx_cnt, u16 period, u16 tx_time,
- enum rtw89_phy_idx idx)
+ enum rtw89_phy_idx idx, const struct rtw89_chan *chan)
{
- rtw8852bx_info.bb_set_pmac_pkt_tx(rtwdev, enable, tx_cnt, period, tx_time, idx);
+ rtw8852bx_info.bb_set_pmac_pkt_tx(rtwdev, enable, tx_cnt, period, tx_time, idx,
+ chan);
}
static inline
@@ -291,6 +296,13 @@ void rtw8852bx_query_ppdu(struct rtw89_dev *rtwdev,
}
static inline
+void rtw8852bx_convert_rpl_to_rssi(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ rtw8852bx_info.convert_rpl_to_rssi(rtwdev, phy_ppdu);
+}
+
+static inline
int rtw8852bx_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map,
enum rtw89_efuse_block block)
{
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c
index 776a45d1fe33..ef47a5facc83 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c
@@ -1382,9 +1382,10 @@ static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u
_iqk_info_iqk(rtwdev, phy_idx, path);
}
-static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path)
+static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u32 reg_rf18;
u32 reg_35c;
@@ -1608,12 +1609,13 @@ static void _tmac_tx_pause(struct rtw89_dev *rtwdev, enum rtw89_phy_idx band_idx
}
static void _doiqk(struct rtw89_dev *rtwdev, bool force,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u32 backup_bb_val[BACKUP_BB_REGS_NR];
u32 backup_rf_val[RTW8852B_IQK_SS][BACKUP_RF_REGS_NR];
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
@@ -1623,7 +1625,7 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
iqk_info->version = RTW8852B_IQK_VER;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
- _iqk_get_ch_info(rtwdev, phy_idx, path);
+ _iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx);
_rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
_rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
@@ -1638,20 +1640,21 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
}
-static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
+static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force,
+ enum rtw89_chanctx_idx chanctx_idx)
{
u8 kpath = _kpath(rtwdev, phy_idx);
switch (kpath) {
case RF_A:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
break;
case RF_B:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
break;
case RF_AB:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
- _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
break;
default:
break;
@@ -1761,9 +1764,9 @@ static void _dpk_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 kidx = dpk->cur_idx[path];
@@ -1786,9 +1789,10 @@ static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, u8 kpath)
+ enum rtw89_rf_path path, u8 kpath,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
rtw89_rfk_parser(rtwdev, &rtw8852b_dpk_afe_defs_tbl);
@@ -1803,9 +1807,10 @@ static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev,
static void _dpk_bb_afe_restore(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, u8 kpath)
+ enum rtw89_rf_path path, u8 kpath,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
rtw89_rfk_parser(rtwdev, &rtw8852b_dpk_afe_restore_defs_tbl);
@@ -2217,9 +2222,9 @@ static bool _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check)
static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path, u8 kidx, u8 init_txagc,
- bool loss_only)
+ bool loss_only, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u8 step = DPK_AGC_STEP_SYNC_DGAIN;
u8 tmp_txagc, tmp_rxbb = 0, tmp_gl_idx = 0;
u8 goout = 0, agc_cnt = 0, limited_rxbb = 0;
@@ -2416,9 +2421,9 @@ static void _dpk_fill_result(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
bool is_reload = false;
u8 idx, cur_band, cur_ch;
@@ -2443,7 +2448,8 @@ static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, u8 gain)
+ enum rtw89_rf_path path, u8 gain,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 txagc = 0x38, kidx = dpk->cur_idx[path];
@@ -2464,7 +2470,7 @@ static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
_dpk_kip_set_rxagc(rtwdev, phy, path);
_dpk_table_select(rtwdev, path, kidx, gain);
- txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false);
+ txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false, chanctx_idx);
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Adjust txagc = 0x%x\n", txagc);
if (txagc == 0xff) {
@@ -2491,7 +2497,8 @@ static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
- enum rtw89_phy_idx phy, u8 kpath)
+ enum rtw89_phy_idx phy, u8 kpath,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
static const u32 kip_reg[] = {0x813c, 0x8124, 0x8120};
@@ -2503,7 +2510,8 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
if (dpk->is_dpk_reload_en) {
for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
- reloaded[path] = _dpk_reload_check(rtwdev, phy, path);
+ reloaded[path] = _dpk_reload_check(rtwdev, phy, path,
+ chanctx_idx);
if (!reloaded[path] && dpk->bp[path][0].ch)
dpk->cur_idx[path] = !dpk->cur_idx[path];
else
@@ -2519,19 +2527,19 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
_dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path);
_rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
- _dpk_information(rtwdev, phy, path);
+ _dpk_information(rtwdev, phy, path, chanctx_idx);
if (rtwdev->is_tssi_mode[path])
_dpk_tssi_pause(rtwdev, path, true);
}
- _dpk_bb_afe_setting(rtwdev, phy, path, kpath);
+ _dpk_bb_afe_setting(rtwdev, phy, path, kpath, chanctx_idx);
for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
- is_fail = _dpk_main(rtwdev, phy, path, 1);
+ is_fail = _dpk_main(rtwdev, phy, path, 1, chanctx_idx);
_dpk_onoff(rtwdev, path, is_fail);
}
- _dpk_bb_afe_restore(rtwdev, phy, path, kpath);
+ _dpk_bb_afe_restore(rtwdev, phy, path, kpath, chanctx_idx);
_rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]);
for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
@@ -2543,9 +2551,10 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
}
}
-static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_fem_info *fem = &rtwdev->fem;
if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) {
@@ -2577,17 +2586,18 @@ static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
}
}
-static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force)
+static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force,
+ enum rtw89_chanctx_idx chanctx_idx)
{
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n",
RTW8852B_DPK_VER, rtwdev->hal.cv,
RTW8852B_RF_REL_VERSION);
- if (_dpk_bypass_check(rtwdev, phy))
+ if (_dpk_bypass_check(rtwdev, phy, chanctx_idx))
_dpk_force_bypass(rtwdev, phy);
else
- _dpk_cal_select(rtwdev, force, phy, RF_AB);
+ _dpk_cal_select(rtwdev, force, phy, RF_AB, chanctx_idx);
}
static void _dpk_track(struct rtw89_dev *rtwdev)
@@ -2722,9 +2732,8 @@ static void _set_dpd_backoff(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
}
static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
if (band == RTW89_BAND_2G)
@@ -2734,9 +2743,8 @@ static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
rtw89_rfk_parser(rtwdev, &rtw8852b_tssi_sys_defs_tbl);
@@ -2778,7 +2786,7 @@ static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
#define RTW8852B_TSSI_GET_VAL(ptr, idx) \
({ \
@@ -2792,7 +2800,6 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
__val; \
})
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 ch = chan->channel;
u8 subband = chan->subband_type;
const s8 *thm_up_a = NULL;
@@ -2944,9 +2951,8 @@ static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx
}
static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
if (path == RF_PATH_A)
@@ -2960,9 +2966,9 @@ static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
}
static void _tssi_alignment_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, bool all)
+ enum rtw89_rf_path path, bool all,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
const struct rtw89_rfk_tbl *tbl = NULL;
u8 ch = chan->channel;
@@ -3231,10 +3237,9 @@ static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
}
static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 ch = chan->channel;
u32 gidx, gidx_1st, gidx_2nd;
s8 de_1st;
@@ -3267,10 +3272,9 @@ static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 ch = chan->channel;
u32 tgidx, tgidx_1st, tgidx_2nd;
s8 tde_1st;
@@ -3304,10 +3308,10 @@ static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
return val;
}
-static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 ch = chan->channel;
u8 gidx;
s8 ofdm_de;
@@ -3320,7 +3324,7 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx p
for (i = RF_PATH_A; i < RF_PATH_NUM_8852B; i++) {
gidx = _tssi_get_cck_group(rtwdev, ch);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = tssi_info->tssi_cck[i][gidx] + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3336,8 +3340,8 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx p
rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i],
_TSSI_DE_MASK));
- ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i, chan);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = ofdm_de + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3383,10 +3387,10 @@ static void _tssi_alimentk_dump_result(struct rtw89_dev *rtwdev, enum rtw89_rf_p
}
static void _tssi_alimentk_done(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy, enum rtw89_rf_path path)
+ enum rtw89_phy_idx phy, enum rtw89_rf_path path,
+ const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 channel = chan->channel;
u8 band;
@@ -3420,7 +3424,7 @@ static void _tssi_alimentk_done(struct rtw89_dev *rtwdev,
static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path, u16 cnt, u16 period, s16 pwr_dbm,
- u8 enable)
+ u8 enable, const struct rtw89_chan *chan)
{
enum rtw89_rf_path_bit rx_path;
@@ -3436,11 +3440,11 @@ static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
if (enable) {
rtw8852bx_bb_set_plcp_tx(rtwdev);
rtw8852bx_bb_cfg_tx_path(rtwdev, path);
- rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path);
+ rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path, chan);
rtw8852bx_bb_set_power(rtwdev, pwr_dbm, phy);
}
- rtw8852bx_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy);
+ rtw8852bx_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy, chan);
}
static void _tssi_backup_bb_registers(struct rtw89_dev *rtwdev,
@@ -3494,7 +3498,7 @@ static u8 _tssi_ch_to_idx(struct rtw89_dev *rtwdev, u8 channel)
static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path, const s16 *power,
- u32 *tssi_cw_rpt)
+ u32 *tssi_cw_rpt, const struct rtw89_chan *chan)
{
u32 tx_counter, tx_counter_tmp;
const int retry = 100;
@@ -3513,9 +3517,10 @@ static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
_tssi_trigger[path], tmp, path);
if (j == 0)
- _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], true);
+ _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], true, chan);
else
- _tssi_hw_tx(rtwdev, phy, RF_PATH_ABCD, 100, 5000, power[j], true);
+ _tssi_hw_tx(rtwdev, phy, RF_PATH_ABCD, 100, 5000, power[j], true,
+ chan);
tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
tx_counter_tmp -= tx_counter;
@@ -3546,14 +3551,14 @@ static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
"[TSSI PA K] TSSI finish bit k > %d mp:100ms normal:30us path=%d\n",
k, path);
- _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false);
+ _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false, chan);
return false;
}
tssi_cw_rpt[j] =
rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path], B_TSSI_CWRPT);
- _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false);
+ _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false, chan);
tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
tx_counter_tmp -= tx_counter;
@@ -3567,14 +3572,13 @@ static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
}
static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
static const u32 bb_reg[8] = {0x5820, 0x7820, 0x4978, 0x58e4,
0x78e4, 0x49c0, 0x0d18, 0x0d80};
static const s16 power_2g[4] = {48, 20, 4, 4};
static const s16 power_5g[4] = {48, 20, 4, 4};
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
s32 tssi_alim_offset_1, tssi_alim_offset_2, tssi_alim_offset_3;
u32 tssi_cw_rpt[RTW8852B_TSSI_PATH_NR] = {0};
u8 channel = chan->channel;
@@ -3635,7 +3639,7 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x2);
rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x2);
- ok = _tssi_get_cw_report(rtwdev, phy, path, power, tssi_cw_rpt);
+ ok = _tssi_get_cw_report(rtwdev, phy, path, power, tssi_cw_rpt, chan);
if (!ok)
goto out;
@@ -3755,18 +3759,19 @@ void rtw8852b_rck(struct rtw89_dev *rtwdev)
_rck(rtwdev, path);
}
-void rtw8852b_dack(struct rtw89_dev *rtwdev)
+void rtw8852b_dack(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
_dac_cal(rtwdev, false);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
}
-void rtw8852b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
u32 tx_en;
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
@@ -3774,15 +3779,16 @@ void rtw8852b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
_iqk_init(rtwdev);
- _iqk(rtwdev, phy_idx, false);
+ _iqk(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
}
-void rtw8852b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
u32 tx_en;
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
@@ -3795,9 +3801,10 @@ void rtw8852b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
}
-void rtw8852b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
u32 tx_en;
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
@@ -3806,7 +3813,7 @@ void rtw8852b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
rtwdev->dpk.is_dpk_enable = true;
rtwdev->dpk.is_dpk_reload_en = false;
- _dpk(rtwdev, phy_idx, false);
+ _dpk(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
@@ -3817,9 +3824,11 @@ void rtw8852b_dpk_track(struct rtw89_dev *rtwdev)
_dpk_track(rtwdev);
}
-void rtw8852b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en)
+void rtw8852b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ bool hwtx_en, enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_AB, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_AB, chanctx_idx);
u32 tx_en;
u8 i;
@@ -3829,34 +3838,34 @@ void rtw8852b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_e
_tssi_disable(rtwdev, phy);
for (i = RF_PATH_A; i < RF_PATH_NUM_8852B; i++) {
- _tssi_rf_setting(rtwdev, phy, i);
- _tssi_set_sys(rtwdev, phy, i);
+ _tssi_rf_setting(rtwdev, phy, i, chan);
+ _tssi_set_sys(rtwdev, phy, i, chan);
_tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
_tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
_tssi_set_dck(rtwdev, phy, i);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
_tssi_set_dac_gain_tbl(rtwdev, phy, i);
- _tssi_slope_cal_org(rtwdev, phy, i);
- _tssi_alignment_default(rtwdev, phy, i, true);
+ _tssi_slope_cal_org(rtwdev, phy, i, chan);
+ _tssi_alignment_default(rtwdev, phy, i, true, chan);
_tssi_set_tssi_slope(rtwdev, phy, i);
rtw89_chip_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL);
_tmac_tx_pause(rtwdev, phy, true);
if (hwtx_en)
- _tssi_alimentk(rtwdev, phy, i);
+ _tssi_alimentk(rtwdev, phy, i, chan);
_tmac_tx_pause(rtwdev, phy, false);
rtw89_chip_resume_sch_tx(rtwdev, phy, tx_en);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
}
-void rtw8852b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+void rtw8852b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
u8 channel = chan->channel;
u8 band;
@@ -3879,24 +3888,25 @@ void rtw8852b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
_tssi_disable(rtwdev, phy);
for (i = RF_PATH_A; i < RTW8852B_TSSI_PATH_NR; i++) {
- _tssi_rf_setting(rtwdev, phy, i);
- _tssi_set_sys(rtwdev, phy, i);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
+ _tssi_rf_setting(rtwdev, phy, i, chan);
+ _tssi_set_sys(rtwdev, phy, i, chan);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
if (tssi_info->alignment_done[i][band])
- _tssi_alimentk_done(rtwdev, phy, i);
+ _tssi_alimentk_done(rtwdev, phy, i, chan);
else
- _tssi_alignment_default(rtwdev, phy, i, true);
+ _tssi_alignment_default(rtwdev, phy, i, true, chan);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
}
static void rtw8852b_tssi_default_txagc(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy, bool enable)
+ enum rtw89_phy_idx phy, bool enable,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u8 channel = chan->channel;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "======> %s ch=%d\n",
@@ -3904,7 +3914,7 @@ static void rtw8852b_tssi_default_txagc(struct rtw89_dev *rtwdev,
if (enable) {
if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
- rtw8852b_tssi(rtwdev, phy, true);
+ rtw8852b_tssi(rtwdev, phy, true, chanctx_idx);
return;
}
@@ -3921,8 +3931,8 @@ static void rtw8852b_tssi_default_txagc(struct rtw89_dev *rtwdev,
rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0);
rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1);
- _tssi_alimentk_done(rtwdev, phy, RF_PATH_A);
- _tssi_alimentk_done(rtwdev, phy, RF_PATH_B);
+ _tssi_alimentk_done(rtwdev, phy, RF_PATH_A, chan);
+ _tssi_alimentk_done(rtwdev, phy, RF_PATH_B, chan);
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"======>%s 2 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n",
@@ -3935,12 +3945,13 @@ static void rtw8852b_tssi_default_txagc(struct rtw89_dev *rtwdev,
}
void rtw8852b_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
if (scan_start)
- rtw8852b_tssi_default_txagc(rtwdev, phy_idx, true);
+ rtw8852b_tssi_default_txagc(rtwdev, phy_idx, true, chanctx_idx);
else
- rtw8852b_tssi_default_txagc(rtwdev, phy_idx, false);
+ rtw8852b_tssi_default_txagc(rtwdev, phy_idx, false, chanctx_idx);
}
static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h
index f52832065600..c31ba446e6e0 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h
@@ -8,16 +8,22 @@
#include "core.h"
void rtw8852b_rck(struct rtw89_dev *rtwdev);
-void rtw8852b_dack(struct rtw89_dev *rtwdev);
-void rtw8852b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
-void rtw8852b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+void rtw8852b_dack(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852b_dpk_init(struct rtw89_dev *rtwdev);
-void rtw8852b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852b_dpk_track(struct rtw89_dev *rtwdev);
-void rtw8852b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en);
-void rtw8852b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ bool hwtx_en, enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan);
void rtw8852b_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
- enum rtw89_phy_idx phy_idx);
+ enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852b_set_channel_rf(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
index fb98ef9dbc54..7dfdcb5964e1 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
@@ -480,12 +480,12 @@ static void rtw8852bt_tssi_cont_en(struct rtw89_dev *rtwdev, bool en,
}
static void rtw8852bt_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en,
- u8 phy_idx)
+ u8 phy_idx, const struct rtw89_chan *chan)
{
if (!rtwdev->dbcc_en) {
rtw8852bt_tssi_cont_en(rtwdev, en, RF_PATH_A);
rtw8852bt_tssi_cont_en(rtwdev, en, RF_PATH_B);
- rtw8852bt_tssi_scan(rtwdev, phy_idx);
+ rtw8852bt_tssi_scan(rtwdev, phy_idx, chan);
} else {
if (phy_idx == RTW89_PHY_0)
rtw8852bt_tssi_cont_en(rtwdev, en, RF_PATH_A);
@@ -511,14 +511,14 @@ static void rtw8852bt_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
if (enter) {
rtw89_chip_stop_sch_tx(rtwdev, RTW89_MAC_0, &p->tx_en, RTW89_SCH_TX_SEL_ALL);
rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, false);
- rtw8852bt_tssi_cont_en_phyidx(rtwdev, false, RTW89_PHY_0);
+ rtw8852bt_tssi_cont_en_phyidx(rtwdev, false, RTW89_PHY_0, chan);
rtw8852bt_adc_en(rtwdev, false);
fsleep(40);
rtw8852bt_bb_reset_en(rtwdev, chan->band_type, phy_idx, false);
} else {
rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true);
rtw8852bt_adc_en(rtwdev, true);
- rtw8852bt_tssi_cont_en_phyidx(rtwdev, true, RTW89_PHY_0);
+ rtw8852bt_tssi_cont_en_phyidx(rtwdev, true, RTW89_PHY_0, chan);
rtw8852bt_bb_reset_en(rtwdev, chan->band_type, phy_idx, true);
rtw89_chip_resume_sch_tx(rtwdev, RTW89_MAC_0, p->tx_en);
}
@@ -531,30 +531,32 @@ static void rtw8852bt_rfk_init(struct rtw89_dev *rtwdev)
rtw8852bt_dpk_init(rtwdev);
rtw8852bt_rck(rtwdev);
- rtw8852bt_dack(rtwdev);
- rtw8852bt_rx_dck(rtwdev, RTW89_PHY_0);
+ rtw8852bt_dack(rtwdev, RTW89_CHANCTX_0);
+ rtw8852bt_rx_dck(rtwdev, RTW89_PHY_0, RTW89_CHANCTX_0);
}
static void rtw8852bt_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
+ enum rtw89_chanctx_idx chanctx_idx = rtwvif->chanctx_idx;
enum rtw89_phy_idx phy_idx = rtwvif->phy_idx;
- rtw8852bt_rx_dck(rtwdev, phy_idx);
- rtw8852bt_iqk(rtwdev, phy_idx);
- rtw8852bt_tssi(rtwdev, phy_idx, true);
- rtw8852bt_dpk(rtwdev, phy_idx);
+ rtw8852bt_rx_dck(rtwdev, phy_idx, chanctx_idx);
+ rtw8852bt_iqk(rtwdev, phy_idx, chanctx_idx);
+ rtw8852bt_tssi(rtwdev, phy_idx, true, chanctx_idx);
+ rtw8852bt_dpk(rtwdev, phy_idx, chanctx_idx);
}
static void rtw8852bt_rfk_band_changed(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
- rtw8852bt_tssi_scan(rtwdev, phy_idx);
+ rtw8852bt_tssi_scan(rtwdev, phy_idx, chan);
}
static void rtw8852bt_rfk_scan(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
bool start)
{
- rtw8852bt_wifi_scan_notify(rtwdev, start, rtwvif->phy_idx);
+ rtw8852bt_wifi_scan_notify(rtwdev, start, rtwvif->phy_idx, rtwvif->chanctx_idx);
}
static void rtw8852bt_rfk_track(struct rtw89_dev *rtwdev)
@@ -673,9 +675,11 @@ static const struct rtw89_chip_ops rtw8852bt_chip_ops = {
.get_thermal = rtw8852bx_get_thermal,
.ctrl_btg_bt_rx = rtw8852bx_ctrl_btg_bt_rx,
.query_ppdu = rtw8852bx_query_ppdu,
+ .convert_rpl_to_rssi = rtw8852bx_convert_rpl_to_rssi,
.ctrl_nbtg_bt_tx = rtw8852bx_ctrl_nbtg_bt_tx,
.cfg_txrx_path = rtw8852bx_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = rtw8852bx_set_txpwr_ul_tb_offset,
+ .digital_pwr_comp = NULL,
.pwr_on_func = rtw8852bt_pwr_on_func,
.pwr_off_func = rtw8852bt_pwr_off_func,
.query_rxdesc = rtw89_core_query_rxdesc,
@@ -750,6 +754,7 @@ const struct rtw89_chip_info rtw8852bt_chip_info = {
.dig_regs = &rtw8852bt_dig_regs,
.tssi_dbw_table = NULL,
.support_macid_num = RTW89_MAX_MAC_ID_NUM,
+ .support_link_num = 0,
.support_chanctx_num = 1,
.support_rnr = false,
.support_bands = BIT(NL80211_BAND_2GHZ) |
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c
index 278f907fd895..336a83e1d46b 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c
@@ -1525,9 +1525,10 @@ static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u
lok_result, txk_result, rxk_result);
}
-static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path)
+static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u8 get_empty_table = false;
u32 reg_rf18;
@@ -1755,12 +1756,13 @@ static void _tmac_tx_pause(struct rtw89_dev *rtwdev, enum rtw89_phy_idx band_idx
}
static void _doiqk(struct rtw89_dev *rtwdev, bool force,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u32 backup_bb_val[BACKUP_BB_REGS_NR];
u32 backup_rf_val[RTW8852BT_SS][BACKUP_RF_REGS_NR];
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
@@ -1770,7 +1772,7 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
iqk_info->version = RTW8852BT_IQK_VER;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
- _iqk_get_ch_info(rtwdev, phy_idx, path);
+ _iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx);
_rfk_backup_bb_reg(rtwdev, backup_bb_val);
_rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
@@ -1785,20 +1787,21 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
}
-static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
+static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force,
+ enum rtw89_chanctx_idx chanctx_idx)
{
u8 kpath = _kpath(rtwdev, phy_idx);
switch (kpath) {
case RF_A:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
break;
case RF_B:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
break;
case RF_AB:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
- _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
break;
default:
break;
@@ -1879,9 +1882,9 @@ static void _dpk_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 kidx = dpk->cur_idx[path];
@@ -2277,9 +2280,9 @@ static bool _dpk_pas_read(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path, u8 kidx, u8 init_txagc,
- bool loss_only)
+ bool loss_only, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 goout = 0, agc_cnt = 0, limited_rxbb = 0, gl_cnt = 0;
u8 tmp_txagc, tmp_rxbb, tmp_gl_idx = 0;
@@ -2504,9 +2507,9 @@ static void _dpk_fill_result(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 idx, cur_band, cur_ch;
bool is_reload = false;
@@ -2549,7 +2552,8 @@ void _drf_direct_cntrl(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool i
}
static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, u8 gain)
+ enum rtw89_rf_path path, u8 gain,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 txagc = 0x38, kidx = dpk->cur_idx[path];
@@ -2569,7 +2573,7 @@ static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
_dpk_kip_set_rxagc(rtwdev, phy, path);
_dpk_table_select(rtwdev, path, kidx, gain);
- txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false);
+ txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false, chanctx_idx);
_rfk_get_thermal(rtwdev, kidx, path);
@@ -2601,7 +2605,8 @@ _error:
}
static void _dpk_cal_select(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy, u8 kpath)
+ enum rtw89_phy_idx phy, u8 kpath,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u32 backup_kip_val[BACKUP_KIP_REGS_NR];
@@ -2611,7 +2616,7 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev,
u8 path;
for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++) {
- reloaded[path] = _dpk_reload_check(rtwdev, phy, path);
+ reloaded[path] = _dpk_reload_check(rtwdev, phy, path, chanctx_idx);
if (!reloaded[path] && dpk->bp[path][0].ch != 0)
dpk->cur_idx[path] = !dpk->cur_idx[path];
else
@@ -2623,7 +2628,7 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev,
for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++) {
_rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
- _dpk_information(rtwdev, phy, path);
+ _dpk_information(rtwdev, phy, path, chanctx_idx);
if (rtwdev->is_tssi_mode[path])
_dpk_tssi_pause(rtwdev, path, true);
}
@@ -2631,7 +2636,7 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev,
_rfk_bb_afe_setting(rtwdev, phy, path, kpath);
for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++)
- _dpk_main(rtwdev, phy, path, 1);
+ _dpk_main(rtwdev, phy, path, 1, chanctx_idx);
_rfk_bb_afe_restore(rtwdev, phy, path, kpath);
@@ -2646,9 +2651,10 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev,
}
}
-static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_fem_info *fem = &rtwdev->fem;
if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) {
@@ -2817,9 +2823,8 @@ static void _tssi_dpk_off(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
}
static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
if (band == RTW89_BAND_2G)
@@ -2829,9 +2834,8 @@ static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
rtw89_rfk_parser(rtwdev, &rtw8852bt_tssi_sys_defs_tbl);
@@ -2878,7 +2882,7 @@ static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
#define RTW8852BT_TSSI_GET_VAL(ptr, idx) \
({ \
@@ -2893,7 +2897,6 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
})
struct rtw89_fw_txpwr_track_cfg *trk = rtwdev->fw.elm_info.txpwr_trk;
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 ch = chan->channel;
u8 subband = chan->subband_type;
const s8 *thm_up_a = NULL;
@@ -3047,9 +3050,8 @@ static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx
}
static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
if (path == RF_PATH_A)
@@ -3063,9 +3065,9 @@ static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
}
static void _tssi_alignment_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, bool all)
+ enum rtw89_rf_path path, bool all,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
const struct rtw89_rfk_tbl *tbl = NULL;
u8 ch = chan->channel;
@@ -3310,10 +3312,9 @@ static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
}
static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 ch = chan->channel;
u32 gidx, gidx_1st, gidx_2nd;
s8 de_1st;
@@ -3346,10 +3347,9 @@ static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 ch = chan->channel;
u32 tgidx, tgidx_1st, tgidx_2nd;
s8 tde_1st;
@@ -3383,10 +3383,10 @@ static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
return val;
}
-static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 ch = chan->channel;
u8 gidx;
s8 ofdm_de;
@@ -3399,7 +3399,7 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx p
for (i = RF_PATH_A; i < RF_PATH_NUM_8852BT; i++) {
gidx = _tssi_get_cck_group(rtwdev, ch);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = tssi_info->tssi_cck[i][gidx] + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3415,8 +3415,8 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx p
rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i],
_TSSI_DE_MASK));
- ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i, chan);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = ofdm_de + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3463,10 +3463,10 @@ static void _tssi_alimentk_dump_result(struct rtw89_dev *rtwdev, enum rtw89_rf_p
}
static void _tssi_alimentk_done(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy, enum rtw89_rf_path path)
+ enum rtw89_phy_idx phy, enum rtw89_rf_path path,
+ const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 channel = chan->channel;
u8 band;
@@ -3500,7 +3500,7 @@ static void _tssi_alimentk_done(struct rtw89_dev *rtwdev,
static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path, u16 cnt, u16 period, s16 pwr_dbm,
- u8 enable)
+ u8 enable, const struct rtw89_chan *chan)
{
enum rtw89_rf_path_bit rx_path;
@@ -3516,11 +3516,11 @@ static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
if (enable) {
rtw8852bx_bb_set_plcp_tx(rtwdev);
rtw8852bx_bb_cfg_tx_path(rtwdev, path);
- rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path);
+ rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path, chan);
rtw8852bx_bb_set_power(rtwdev, pwr_dbm, phy);
}
- rtw8852bx_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy);
+ rtw8852bx_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy, chan);
}
static void _tssi_backup_bb_registers(struct rtw89_dev *rtwdev,
@@ -3574,7 +3574,7 @@ static u8 _tssi_ch_to_idx(struct rtw89_dev *rtwdev, u8 channel)
static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path, const s16 *power,
- u32 *tssi_cw_rpt)
+ u32 *tssi_cw_rpt, const struct rtw89_chan *chan)
{
u32 tx_counter, tx_counter_tmp;
const int retry = 100;
@@ -3593,9 +3593,11 @@ static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
_tssi_trigger[path], tmp, path);
if (j == 0)
- _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], true);
+ _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], true,
+ chan);
else
- _tssi_hw_tx(rtwdev, phy, RF_PATH_ABCD, 100, 5000, power[j], true);
+ _tssi_hw_tx(rtwdev, phy, RF_PATH_ABCD, 100, 5000, power[j], true,
+ chan);
tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
tx_counter_tmp -= tx_counter;
@@ -3626,7 +3628,7 @@ static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
"[TSSI PA K] TSSI finish bit k > %d mp:100ms normal:30us path=%d\n",
k, path);
- _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false);
+ _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false, chan);
return false;
}
@@ -3634,7 +3636,7 @@ static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path],
B_TSSI_CWRPT);
- _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false);
+ _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false, chan);
tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
tx_counter_tmp -= tx_counter;
@@ -3648,14 +3650,13 @@ static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
}
static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
static const u32 bb_reg[8] = {0x5820, 0x7820, 0x4978, 0x58e4,
0x78e4, 0x49c0, 0x0d18, 0x0d80};
static const s16 power_2g[4] = {48, 20, 4, -8};
static const s16 power_5g[4] = {48, 20, 4, 4};
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
s32 tssi_alim_offset_1, tssi_alim_offset_2, tssi_alim_offset_3;
u32 tssi_cw_rpt[RTW8852BT_TSSI_PATH_NR] = {};
u8 channel = chan->channel;
@@ -3701,7 +3702,7 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x2);
rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x2);
- ok = _tssi_get_cw_report(rtwdev, phy, path, power, tssi_cw_rpt);
+ ok = _tssi_get_cw_report(rtwdev, phy, path, power, tssi_cw_rpt, chan);
if (!ok)
goto out;
@@ -3833,18 +3834,19 @@ void rtw8852bt_rck(struct rtw89_dev *rtwdev)
_rck(rtwdev, path);
}
-void rtw8852bt_dack(struct rtw89_dev *rtwdev)
+void rtw8852bt_dack(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
_dac_cal(rtwdev, false);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
}
-void rtw8852bt_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852bt_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
u32 tx_en;
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
@@ -3852,15 +3854,16 @@ void rtw8852bt_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
_iqk_init(rtwdev);
- _iqk(rtwdev, phy_idx, false);
+ _iqk(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
}
-void rtw8852bt_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852bt_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
u32 tx_en;
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
@@ -3873,15 +3876,16 @@ void rtw8852bt_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
}
-void rtw8852bt_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852bt_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[DPK] ****** DPK Start (Ver: 0x%x) ******\n", RTW8852BT_DPK_VER);
- if (_dpk_bypass_check(rtwdev, phy_idx))
+ if (_dpk_bypass_check(rtwdev, phy_idx, chanctx_idx))
_dpk_force_bypass(rtwdev, phy_idx);
else
- _dpk_cal_select(rtwdev, phy_idx, RF_AB);
+ _dpk_cal_select(rtwdev, phy_idx, RF_AB, chanctx_idx);
}
void rtw8852bt_dpk_track(struct rtw89_dev *rtwdev)
@@ -3889,10 +3893,12 @@ void rtw8852bt_dpk_track(struct rtw89_dev *rtwdev)
_dpk_track(rtwdev);
}
-void rtw8852bt_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en)
+void rtw8852bt_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ bool hwtx_en, enum rtw89_chanctx_idx chanctx_idx)
{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
static const u32 reg[2] = {R_DPD_CH0A, R_DPD_CH0B};
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_AB, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_AB, chanctx_idx);
u32 reg_backup[2] = {};
u32 tx_en;
u8 i;
@@ -3905,36 +3911,36 @@ void rtw8852bt_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_
_tssi_disable(rtwdev, phy);
for (i = RF_PATH_A; i < RF_PATH_NUM_8852BT; i++) {
- _tssi_rf_setting(rtwdev, phy, i);
- _tssi_set_sys(rtwdev, phy, i);
+ _tssi_rf_setting(rtwdev, phy, i, chan);
+ _tssi_set_sys(rtwdev, phy, i, chan);
_tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
_tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
_tssi_set_dck(rtwdev, phy, i);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
_tssi_set_dac_gain_tbl(rtwdev, phy, i);
- _tssi_slope_cal_org(rtwdev, phy, i);
- _tssi_alignment_default(rtwdev, phy, i, true);
+ _tssi_slope_cal_org(rtwdev, phy, i, chan);
+ _tssi_alignment_default(rtwdev, phy, i, true, chan);
_tssi_set_tssi_slope(rtwdev, phy, i);
rtw89_chip_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL);
_tmac_tx_pause(rtwdev, phy, true);
if (hwtx_en)
- _tssi_alimentk(rtwdev, phy, i);
+ _tssi_alimentk(rtwdev, phy, i, chan);
_tmac_tx_pause(rtwdev, phy, false);
rtw89_chip_resume_sch_tx(rtwdev, phy, tx_en);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
_tssi_reload_bb_registers(rtwdev, phy, reg, reg_backup, 2);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
}
-void rtw8852bt_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+void rtw8852bt_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
u8 channel = chan->channel;
u8 band;
@@ -3957,24 +3963,25 @@ void rtw8852bt_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
_tssi_disable(rtwdev, phy);
for (i = RF_PATH_A; i < RTW8852BT_TSSI_PATH_NR; i++) {
- _tssi_rf_setting(rtwdev, phy, i);
- _tssi_set_sys(rtwdev, phy, i);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
+ _tssi_rf_setting(rtwdev, phy, i, chan);
+ _tssi_set_sys(rtwdev, phy, i, chan);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
if (tssi_info->alignment_done[i][band])
- _tssi_alimentk_done(rtwdev, phy, i);
+ _tssi_alimentk_done(rtwdev, phy, i, chan);
else
- _tssi_alignment_default(rtwdev, phy, i, true);
+ _tssi_alignment_default(rtwdev, phy, i, true, chan);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
}
static void rtw8852bt_tssi_default_txagc(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy, bool enable)
+ enum rtw89_phy_idx phy, bool enable,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u8 channel = chan->channel;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "======> %s ch=%d\n",
@@ -3996,8 +4003,8 @@ static void rtw8852bt_tssi_default_txagc(struct rtw89_dev *rtwdev,
rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0);
rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1);
- _tssi_alimentk_done(rtwdev, phy, RF_PATH_A);
- _tssi_alimentk_done(rtwdev, phy, RF_PATH_B);
+ _tssi_alimentk_done(rtwdev, phy, RF_PATH_A, chan);
+ _tssi_alimentk_done(rtwdev, phy, RF_PATH_B, chan);
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"======>%s 2 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n",
@@ -4010,12 +4017,13 @@ static void rtw8852bt_tssi_default_txagc(struct rtw89_dev *rtwdev,
}
void rtw8852bt_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
if (scan_start)
- rtw8852bt_tssi_default_txagc(rtwdev, phy_idx, true);
+ rtw8852bt_tssi_default_txagc(rtwdev, phy_idx, true, chanctx_idx);
else
- rtw8852bt_tssi_default_txagc(rtwdev, phy_idx, false);
+ rtw8852bt_tssi_default_txagc(rtwdev, phy_idx, false, chanctx_idx);
}
static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h
index ef3d98f80400..e34560b4905f 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h
@@ -8,16 +8,22 @@
#include "core.h"
void rtw8852bt_rck(struct rtw89_dev *rtwdev);
-void rtw8852bt_dack(struct rtw89_dev *rtwdev);
-void rtw8852bt_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
-void rtw8852bt_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+void rtw8852bt_dack(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852bt_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852bt_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852bt_dpk_init(struct rtw89_dev *rtwdev);
-void rtw8852bt_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852bt_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852bt_dpk_track(struct rtw89_dev *rtwdev);
-void rtw8852bt_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en);
-void rtw8852bt_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852bt_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ bool hwtx_en, enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852bt_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan);
void rtw8852bt_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
- enum rtw89_phy_idx phy_idx);
+ enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852bt_set_channel_rf(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
index dc1da9ff055c..1c6e89ab0f4b 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
@@ -1817,7 +1817,7 @@ static void rtw8852c_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
RTW89_SCH_TX_SEL_ALL);
rtw89_mac_cfg_ppdu_status(rtwdev, mac_idx, false);
rtw8852c_dfs_en(rtwdev, false);
- rtw8852c_tssi_cont_en_phyidx(rtwdev, false, phy_idx);
+ rtw8852c_tssi_cont_en_phyidx(rtwdev, false, phy_idx, chan);
rtw8852c_adc_en(rtwdev, false);
fsleep(40);
rtw8852c_bb_reset_en(rtwdev, chan->band_type, phy_idx, false);
@@ -1825,7 +1825,7 @@ static void rtw8852c_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
rtw89_mac_cfg_ppdu_status(rtwdev, mac_idx, true);
rtw8852c_adc_en(rtwdev, true);
rtw8852c_dfs_en(rtwdev, true);
- rtw8852c_tssi_cont_en_phyidx(rtwdev, true, phy_idx);
+ rtw8852c_tssi_cont_en_phyidx(rtwdev, true, phy_idx, chan);
rtw8852c_bb_reset_en(rtwdev, chan->band_type, phy_idx, true);
rtw89_chip_resume_sch_tx(rtwdev, mac_idx, p->tx_en);
}
@@ -1842,26 +1842,28 @@ static void rtw8852c_rfk_init(struct rtw89_dev *rtwdev)
rtw8852c_dpk_init(rtwdev);
rtw8852c_rck(rtwdev);
- rtw8852c_dack(rtwdev);
+ rtw8852c_dack(rtwdev, RTW89_CHANCTX_0);
rtw8852c_rx_dck(rtwdev, RTW89_PHY_0, false);
}
static void rtw8852c_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
+ enum rtw89_chanctx_idx chanctx_idx = rtwvif->chanctx_idx;
enum rtw89_phy_idx phy_idx = rtwvif->phy_idx;
rtw8852c_mcc_get_ch_info(rtwdev, phy_idx);
rtw8852c_rx_dck(rtwdev, phy_idx, false);
- rtw8852c_iqk(rtwdev, phy_idx);
- rtw8852c_tssi(rtwdev, phy_idx);
- rtw8852c_dpk(rtwdev, phy_idx);
+ rtw8852c_iqk(rtwdev, phy_idx, chanctx_idx);
+ rtw8852c_tssi(rtwdev, phy_idx, chanctx_idx);
+ rtw8852c_dpk(rtwdev, phy_idx, chanctx_idx);
rtw89_fw_h2c_rf_ntfy_mcc(rtwdev);
}
static void rtw8852c_rfk_band_changed(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
- rtw8852c_tssi_scan(rtwdev, phy_idx);
+ rtw8852c_tssi_scan(rtwdev, phy_idx, chan);
}
static void rtw8852c_rfk_scan(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
@@ -2888,9 +2890,11 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = {
.get_thermal = rtw8852c_get_thermal,
.ctrl_btg_bt_rx = rtw8852c_ctrl_btg_bt_rx,
.query_ppdu = rtw8852c_query_ppdu,
+ .convert_rpl_to_rssi = NULL,
.ctrl_nbtg_bt_tx = rtw8852c_ctrl_nbtg_bt_tx,
.cfg_txrx_path = rtw8852c_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = rtw8852c_set_txpwr_ul_tb_offset,
+ .digital_pwr_comp = NULL,
.pwr_on_func = rtw8852c_pwr_on_func,
.pwr_off_func = rtw8852c_pwr_off_func,
.query_rxdesc = rtw89_core_query_rxdesc,
@@ -2958,6 +2962,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.dig_regs = &rtw8852c_dig_regs,
.tssi_dbw_table = &rtw89_8852c_tssi_dbw_table,
.support_macid_num = RTW89_MAX_MAC_ID_NUM,
+ .support_link_num = 0,
.support_chanctx_num = 2,
.support_rnr = false,
.support_bands = BIT(NL80211_BAND_2GHZ) |
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
index 6e199e82690b..211c051c2967 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
@@ -5,6 +5,7 @@
#include "chan.h"
#include "coex.h"
#include "debug.h"
+#include "fw.h"
#include "phy.h"
#include "reg.h"
#include "rtw8852c.h"
@@ -584,11 +585,12 @@ static void _drck(struct rtw89_dev *rtwdev)
rtw89_phy_read32_mask(rtwdev, R_DRCK, MASKDWORD));
}
-static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
+static void _dac_cal(struct rtw89_dev *rtwdev, bool force,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dack_info *dack = &rtwdev->dack;
u32 rf0_0, rf1_0;
- u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB, chanctx_idx);
dack->dack_done = false;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK b\n");
@@ -1321,9 +1323,10 @@ static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u
}
static void _iqk_get_ch_info(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy, u8 path)
+ enum rtw89_phy_idx phy, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
@@ -1516,12 +1519,13 @@ static void _iqk_init(struct rtw89_dev *rtwdev)
}
static void _doiqk(struct rtw89_dev *rtwdev, bool force,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u32 backup_bb_val[BACKUP_BB_REGS_NR];
u32 backup_rf_val[RTW8852C_IQK_SS][BACKUP_RF_REGS_NR];
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
@@ -1531,7 +1535,7 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
iqk_info->version = RTW8852C_IQK_VER;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
- _iqk_get_ch_info(rtwdev, phy_idx, path);
+ _iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx);
_rfk_backup_bb_reg(rtwdev, backup_bb_val);
_rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
_iqk_macbb_setting(rtwdev, phy_idx, path);
@@ -1544,18 +1548,19 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
}
-static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
+static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force,
+ enum rtw89_chanctx_idx chanctx_idx)
{
switch (_kpath(rtwdev, phy_idx)) {
case RF_A:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
break;
case RF_B:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
break;
case RF_AB:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
- _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
break;
default:
break;
@@ -1901,9 +1906,9 @@ static u8 _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
static void _dpk_information(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 kidx = dpk->cur_idx[path];
@@ -2495,9 +2500,9 @@ static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
bool is_reload = false;
u8 idx, cur_band, cur_ch;
@@ -2689,7 +2694,8 @@ static void _dpk_drf_direct_cntrl(struct rtw89_dev *rtwdev, u8 path, bool is_byb
}
static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
- enum rtw89_phy_idx phy, u8 kpath)
+ enum rtw89_phy_idx phy, u8 kpath,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
static const u32 kip_reg[] = {0x813c, 0x8124, 0x8120, 0xc0c4, 0xc0e8, 0xc0d4, 0xc0d8};
@@ -2705,7 +2711,8 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
if (!(kpath & BIT(path)))
continue;
- reloaded[path] = _dpk_reload_check(rtwdev, phy, path);
+ reloaded[path] = _dpk_reload_check(rtwdev, phy, path,
+ chanctx_idx);
if (!reloaded[path] && dpk->bp[path][0].ch != 0)
dpk->cur_idx[path] = !dpk->cur_idx[path];
else
@@ -2722,7 +2729,7 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
path, dpk->cur_idx[path]);
_dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path);
_rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
- _dpk_information(rtwdev, phy, path);
+ _dpk_information(rtwdev, phy, path, chanctx_idx);
_dpk_init(rtwdev, path);
if (rtwdev->is_tssi_mode[path])
_dpk_tssi_pause(rtwdev, path, true);
@@ -2755,10 +2762,11 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
_dpk_kip_pwr_clk_onoff(rtwdev, false);
}
-static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_fem_info *fem = &rtwdev->fem;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u8 band = chan->band_type;
if (rtwdev->hal.cv == CHIP_CAV && band != RTW89_BAND_2G) {
@@ -2790,17 +2798,18 @@ static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
}
}
-static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force)
+static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force,
+ enum rtw89_chanctx_idx chanctx_idx)
{
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n",
RTW8852C_DPK_VER, rtwdev->hal.cv,
RTW8852C_RF_REL_VERSION);
- if (_dpk_bypass_check(rtwdev, phy))
+ if (_dpk_bypass_check(rtwdev, phy, chanctx_idx))
_dpk_force_bypass(rtwdev, phy);
else
- _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy));
+ _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy), chanctx_idx);
if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_DCKC, RR_DCKC_CHK) == 0x1)
rtw8852c_rx_dck(rtwdev, phy, false);
@@ -2891,9 +2900,8 @@ static void _dpk_track(struct rtw89_dev *rtwdev)
}
static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_bandwidth bw = chan->band_width;
enum rtw89_band band = chan->band_type;
u32 clk = 0x0;
@@ -2945,9 +2953,8 @@ static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev,
}
static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
if (path == RF_PATH_A) {
@@ -2972,7 +2979,7 @@ static void _tssi_set_bbgain_split(struct rtw89_dev *rtwdev, enum rtw89_phy_idx
}
static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
#define RTW8852C_TSSI_GET_VAL(ptr, idx) \
({ \
@@ -2985,8 +2992,8 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
} \
__val; \
})
+ struct rtw89_fw_txpwr_track_cfg *trk = rtwdev->fw.elm_info.txpwr_trk;
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 ch = chan->channel;
u8 subband = chan->subband_type;
const s8 *thm_up_a = NULL;
@@ -3001,56 +3008,88 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
switch (subband) {
default:
case RTW89_CH_2G:
- thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_2ga_p;
- thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_2ga_n;
- thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_2gb_p;
- thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_2gb_n;
+ thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_P][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_2ga_p;
+ thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_N][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_2ga_n;
+ thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_P][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_2gb_p;
+ thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_N][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_2gb_n;
break;
case RTW89_CH_5G_BAND_1:
- thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[0];
- thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[0];
- thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[0];
- thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[0];
+ thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[0];
+ thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[0];
+ thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[0];
+ thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[0];
break;
case RTW89_CH_5G_BAND_3:
- thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[1];
- thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[1];
- thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[1];
- thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[1];
+ thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][1] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[1];
+ thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][1] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[1];
+ thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][1] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[1];
+ thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][1] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[1];
break;
case RTW89_CH_5G_BAND_4:
- thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[2];
- thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[2];
- thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[2];
- thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[2];
+ thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][2] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[2];
+ thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][2] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[2];
+ thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][2] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[2];
+ thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][2] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[2];
break;
case RTW89_CH_6G_BAND_IDX0:
case RTW89_CH_6G_BAND_IDX1:
- thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[0];
- thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[0];
- thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[0];
- thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[0];
+ thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[0];
+ thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[0];
+ thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[0];
+ thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[0];
break;
case RTW89_CH_6G_BAND_IDX2:
case RTW89_CH_6G_BAND_IDX3:
- thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[1];
- thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[1];
- thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[1];
- thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[1];
+ thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][1] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[1];
+ thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][1] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[1];
+ thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][1] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[1];
+ thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][1] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[1];
break;
case RTW89_CH_6G_BAND_IDX4:
case RTW89_CH_6G_BAND_IDX5:
- thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[2];
- thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[2];
- thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[2];
- thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[2];
+ thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][2] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[2];
+ thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][2] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[2];
+ thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][2] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[2];
+ thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][2] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[2];
break;
case RTW89_CH_6G_BAND_IDX6:
case RTW89_CH_6G_BAND_IDX7:
- thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[3];
- thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[3];
- thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[3];
- thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[3];
+ thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][3] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[3];
+ thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][3] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[3];
+ thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][3] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[3];
+ thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][3] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[3];
break;
}
@@ -3158,9 +3197,8 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
}
static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
if (path == RF_PATH_A) {
@@ -3175,9 +3213,9 @@ static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
}
static void _tssi_set_aligk_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
const struct rtw89_rfk_tbl *tbl;
@@ -3586,10 +3624,9 @@ static u32 _tssi_get_6g_trim_group(struct rtw89_dev *rtwdev, u8 ch)
}
static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
u8 ch = chan->channel;
u32 gidx, gidx_1st, gidx_2nd;
@@ -3650,10 +3687,9 @@ static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
u8 ch = chan->channel;
u32 tgidx, tgidx_1st, tgidx_2nd;
@@ -3715,10 +3751,9 @@ static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
}
static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy)
+ enum rtw89_phy_idx phy, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 ch = chan->channel;
u8 gidx;
s8 ofdm_de;
@@ -3741,7 +3776,7 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
for (i = path; i < path_max; i++) {
gidx = _tssi_get_cck_group(rtwdev, ch);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = tssi_info->tssi_cck[i][gidx] + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3757,8 +3792,8 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i],
_TSSI_DE_MASK));
- ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i, chan);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = ofdm_de + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3781,7 +3816,7 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
}
static void rtw8852c_tssi_cont_en(struct rtw89_dev *rtwdev, bool en,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
static const u32 tssi_trk[2] = {0x5818, 0x7818};
static const u32 tssi_en[2] = {0x5820, 0x7820};
@@ -3790,25 +3825,26 @@ static void rtw8852c_tssi_cont_en(struct rtw89_dev *rtwdev, bool en,
rtw89_phy_write32_mask(rtwdev, tssi_trk[path], BIT(30), 0x0);
rtw89_phy_write32_mask(rtwdev, tssi_en[path], BIT(31), 0x0);
if (rtwdev->dbcc_en && path == RF_PATH_B)
- _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_1);
+ _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_1, chan);
else
- _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_0);
+ _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_0, chan);
} else {
rtw89_phy_write32_mask(rtwdev, tssi_trk[path], BIT(30), 0x1);
rtw89_phy_write32_mask(rtwdev, tssi_en[path], BIT(31), 0x1);
}
}
-void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx)
+void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx,
+ const struct rtw89_chan *chan)
{
if (!rtwdev->dbcc_en) {
- rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A);
- rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B);
+ rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A, chan);
+ rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B, chan);
} else {
if (phy_idx == RTW89_PHY_0)
- rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A);
+ rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A, chan);
else
- rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B);
+ rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B, chan);
}
}
@@ -4112,26 +4148,27 @@ void rtw8852c_rck(struct rtw89_dev *rtwdev)
_rck(rtwdev, path);
}
-void rtw8852c_dack(struct rtw89_dev *rtwdev)
+void rtw8852c_dack(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
- _dac_cal(rtwdev, false);
+ _dac_cal(rtwdev, false, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
}
-void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
u32 tx_en;
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
_iqk_init(rtwdev);
- _iqk(rtwdev, phy_idx, false);
+ _iqk(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
@@ -4202,10 +4239,11 @@ void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_a
void rtw8852c_rx_dck_track(struct rtw89_dev *rtwdev)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ enum rtw89_chanctx_idx chanctx_idx = RTW89_CHANCTX_0;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_rx_dck_info *rx_dck = &rtwdev->rx_dck;
enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
u8 dck_channel;
u8 cur_thermal;
u32 tx_en;
@@ -4259,16 +4297,17 @@ void rtw8852c_dpk_init(struct rtw89_dev *rtwdev)
dpk->is_dpk_reload_en = false;
}
-void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
u32 tx_en;
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
- _dpk(rtwdev, phy_idx, false);
+ _dpk(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
@@ -4279,8 +4318,10 @@ void rtw8852c_dpk_track(struct rtw89_dev *rtwdev)
_dpk_track(rtwdev);
}
-void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_chanctx_idx chanctx_idx)
{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy);
@@ -4298,23 +4339,24 @@ void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
_tssi_disable(rtwdev, phy);
for (i = path; i < path_max; i++) {
- _tssi_set_sys(rtwdev, phy, i);
+ _tssi_set_sys(rtwdev, phy, i, chan);
_tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
_tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
- _tssi_set_dck(rtwdev, phy, i);
+ _tssi_set_dck(rtwdev, phy, i, chan);
_tssi_set_bbgain_split(rtwdev, phy, i);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
- _tssi_slope_cal_org(rtwdev, phy, i);
- _tssi_set_aligk_default(rtwdev, phy, i);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
+ _tssi_slope_cal_org(rtwdev, phy, i, chan);
+ _tssi_set_aligk_default(rtwdev, phy, i, chan);
_tssi_set_slope(rtwdev, phy, i);
_tssi_run_slope(rtwdev, phy, i);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
}
-void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
@@ -4339,15 +4381,15 @@ void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
_tssi_disable(rtwdev, phy);
for (i = path; i < path_max; i++) {
- _tssi_set_sys(rtwdev, phy, i);
- _tssi_set_dck(rtwdev, phy, i);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
- _tssi_slope_cal_org(rtwdev, phy, i);
- _tssi_set_aligk_default(rtwdev, phy, i);
+ _tssi_set_sys(rtwdev, phy, i, chan);
+ _tssi_set_dck(rtwdev, phy, i, chan);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
+ _tssi_slope_cal_org(rtwdev, phy, i, chan);
+ _tssi_set_aligk_default(rtwdev, phy, i, chan);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
}
static void rtw8852c_tssi_default_txagc(struct rtw89_dev *rtwdev,
@@ -4422,7 +4464,7 @@ void rtw8852c_rfk_chanctx_cb(struct rtw89_dev *rtwdev,
dpk->is_dpk_enable = true;
for (path = 0; path < RTW8852C_DPK_RF_PATH; path++)
_dpk_onoff(rtwdev, path, false);
- rtw8852c_dpk(rtwdev, RTW89_PHY_0);
+ rtw8852c_dpk(rtwdev, RTW89_PHY_0, RTW89_CHANCTX_0);
break;
default:
break;
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h
index 6605137e61aa..306dd0a0be73 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h
@@ -9,16 +9,21 @@
void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
void rtw8852c_rck(struct rtw89_dev *rtwdev);
-void rtw8852c_dack(struct rtw89_dev *rtwdev);
-void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+void rtw8852c_dack(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool is_afe);
void rtw8852c_rx_dck_track(struct rtw89_dev *rtwdev);
void rtw8852c_dpk_init(struct rtw89_dev *rtwdev);
-void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852c_dpk_track(struct rtw89_dev *rtwdev);
-void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
-void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
-void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx);
+void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan);
+void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx,
+ const struct rtw89_chan *chan);
void rtw8852c_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
enum rtw89_phy_idx phy_idx);
void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.c b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
index 6004a622d244..63b1ff2f98ed 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
@@ -1689,6 +1689,60 @@ static int rtw8922a_ctrl_rx_path_tmac(struct rtw89_dev *rtwdev,
return 0;
}
+#define DIGITAL_PWR_COMP_REG_NUM 22
+static const u32 rtw8922a_digital_pwr_comp_val[][DIGITAL_PWR_COMP_REG_NUM] = {
+ {0x012C0096, 0x044C02BC, 0x00322710, 0x015E0096, 0x03C8028A,
+ 0x0BB80708, 0x17701194, 0x02020100, 0x03030303, 0x01000303,
+ 0x05030302, 0x06060605, 0x06050300, 0x0A090807, 0x02000B0B,
+ 0x09080604, 0x0D0D0C0B, 0x08060400, 0x110F0C0B, 0x05001111,
+ 0x0D0C0907, 0x12121210},
+ {0x012C0096, 0x044C02BC, 0x00322710, 0x015E0096, 0x03C8028A,
+ 0x0BB80708, 0x17701194, 0x04030201, 0x05050505, 0x01000505,
+ 0x07060504, 0x09090908, 0x09070400, 0x0E0D0C0B, 0x03000E0E,
+ 0x0D0B0907, 0x1010100F, 0x0B080500, 0x1512100D, 0x05001515,
+ 0x100D0B08, 0x15151512},
+};
+
+static void rtw8922a_set_digital_pwr_comp(struct rtw89_dev *rtwdev,
+ bool enable, u8 nss,
+ enum rtw89_rf_path path)
+{
+ static const u32 ltpc_t0[2] = {R_BE_LTPC_T0_PATH0, R_BE_LTPC_T0_PATH1};
+ const u32 *digital_pwr_comp;
+ u32 addr, val;
+ u32 i;
+
+ if (nss == 1)
+ digital_pwr_comp = rtw8922a_digital_pwr_comp_val[0];
+ else
+ digital_pwr_comp = rtw8922a_digital_pwr_comp_val[1];
+
+ addr = ltpc_t0[path];
+ for (i = 0; i < DIGITAL_PWR_COMP_REG_NUM; i++, addr += 4) {
+ val = enable ? digital_pwr_comp[i] : 0;
+ rtw89_phy_write32(rtwdev, addr, val);
+ }
+}
+
+static void rtw8922a_digital_pwr_comp(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ bool enable = chan->band_type != RTW89_BAND_2G;
+ u8 path;
+
+ if (rtwdev->mlo_dbcc_mode == MLO_1_PLUS_1_1RF) {
+ if (phy_idx == RTW89_PHY_0)
+ path = RF_PATH_A;
+ else
+ path = RF_PATH_B;
+ rtw8922a_set_digital_pwr_comp(rtwdev, enable, 1, path);
+ } else {
+ rtw8922a_set_digital_pwr_comp(rtwdev, enable, 2, RF_PATH_A);
+ rtw8922a_set_digital_pwr_comp(rtwdev, enable, 2, RF_PATH_B);
+ }
+}
+
static int rtw8922a_ctrl_mlo(struct rtw89_dev *rtwdev, enum rtw89_mlo_dbcc_mode mode)
{
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
@@ -1810,11 +1864,13 @@ static void rtw8922a_pre_set_channel_bb(struct rtw89_dev *rtwdev,
}
static void rtw8922a_post_set_channel_bb(struct rtw89_dev *rtwdev,
- enum rtw89_mlo_dbcc_mode mode)
+ enum rtw89_mlo_dbcc_mode mode,
+ enum rtw89_phy_idx phy_idx)
{
if (!rtwdev->dbcc_en)
return;
+ rtw8922a_digital_pwr_comp(rtwdev, phy_idx);
rtw8922a_ctrl_mlo(rtwdev, mode);
}
@@ -1921,7 +1977,7 @@ static void rtw8922a_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
rtw8922a_hal_reset(rtwdev, phy_idx, mac_idx, chan->band_type, &p->tx_en, enter);
if (!enter) {
- rtw8922a_post_set_channel_bb(rtwdev, rtwdev->mlo_dbcc_mode);
+ rtw8922a_post_set_channel_bb(rtwdev, rtwdev->mlo_dbcc_mode, phy_idx);
rtw8922a_post_set_channel_rf(rtwdev, phy_idx);
}
}
@@ -1937,10 +1993,12 @@ static void rtw8922a_rfk_init(struct rtw89_dev *rtwdev)
static void rtw8922a_rfk_init_late(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+
rtw89_phy_rfk_pre_ntfy_and_wait(rtwdev, RTW89_PHY_0, 5);
- rtw89_phy_rfk_dack_and_wait(rtwdev, RTW89_PHY_0, 58);
- rtw89_phy_rfk_rxdck_and_wait(rtwdev, RTW89_PHY_0, 32);
+ rtw89_phy_rfk_dack_and_wait(rtwdev, RTW89_PHY_0, chan, 58);
+ rtw89_phy_rfk_rxdck_and_wait(rtwdev, RTW89_PHY_0, chan, 32);
}
static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
@@ -1964,8 +2022,10 @@ static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
static void rtw8922a_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
+ enum rtw89_chanctx_idx chanctx_idx = rtwvif->chanctx_idx;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
enum rtw89_phy_idx phy_idx = rtwvif->phy_idx;
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx);
u32 tx_en;
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_CHLK, BTC_WRFK_START);
@@ -1973,20 +2033,21 @@ static void rtw8922a_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtw
_wait_rx_mode(rtwdev, RF_AB);
rtw89_phy_rfk_pre_ntfy_and_wait(rtwdev, phy_idx, 5);
- rtw89_phy_rfk_txgapk_and_wait(rtwdev, phy_idx, 54);
- rtw89_phy_rfk_iqk_and_wait(rtwdev, phy_idx, 84);
- rtw89_phy_rfk_tssi_and_wait(rtwdev, phy_idx, RTW89_TSSI_NORMAL, 6);
- rtw89_phy_rfk_dpk_and_wait(rtwdev, phy_idx, 34);
- rtw89_phy_rfk_rxdck_and_wait(rtwdev, RTW89_PHY_0, 32);
+ rtw89_phy_rfk_txgapk_and_wait(rtwdev, phy_idx, chan, 54);
+ rtw89_phy_rfk_iqk_and_wait(rtwdev, phy_idx, chan, 84);
+ rtw89_phy_rfk_tssi_and_wait(rtwdev, phy_idx, chan, RTW89_TSSI_NORMAL, 6);
+ rtw89_phy_rfk_dpk_and_wait(rtwdev, phy_idx, chan, 34);
+ rtw89_phy_rfk_rxdck_and_wait(rtwdev, RTW89_PHY_0, chan, 32);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_CHLK, BTC_WRFK_STOP);
}
static void rtw8922a_rfk_band_changed(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
- rtw89_phy_rfk_tssi_and_wait(rtwdev, phy_idx, RTW89_TSSI_SCAN, 6);
+ rtw89_phy_rfk_tssi_and_wait(rtwdev, phy_idx, chan, RTW89_TSSI_SCAN, 6);
}
static void rtw8922a_rfk_scan(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
@@ -2436,6 +2497,38 @@ static void rtw8922a_query_ppdu(struct rtw89_dev *rtwdev,
rtw8922a_fill_freq_with_ppdu(rtwdev, phy_ppdu, status);
}
+static void rtw8922a_convert_rpl_to_rssi(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ /* Mapping to BW: 5, 10, 20, 40, 80, 160, 80_80 */
+ static const u8 bw_compensate[] = {0, 0, 0, 6, 12, 18, 0};
+ u8 *rssi = phy_ppdu->rssi;
+ u8 compensate = 0;
+ u16 rpl_tmp;
+ u8 i;
+
+ if (phy_ppdu->bw_idx < ARRAY_SIZE(bw_compensate))
+ compensate = bw_compensate[phy_ppdu->bw_idx];
+
+ for (i = 0; i < RF_PATH_NUM_8922A; i++) {
+ if (!(phy_ppdu->rx_path_en & BIT(i))) {
+ rssi[i] = 0;
+ phy_ppdu->rpl_path[i] = 0;
+ phy_ppdu->rpl_fd[i] = 0;
+ }
+ if (phy_ppdu->rate >= RTW89_HW_RATE_OFDM6) {
+ rpl_tmp = phy_ppdu->rpl_fd[i];
+ if (rpl_tmp)
+ rpl_tmp += compensate;
+
+ phy_ppdu->rpl_path[i] = rpl_tmp;
+ }
+ rssi[i] = phy_ppdu->rpl_path[i];
+ }
+
+ phy_ppdu->rssi_avg = phy_ppdu->rpl_avg;
+}
+
static int rtw8922a_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
{
rtw89_write8_set(rtwdev, R_BE_FEN_RST_ENABLE,
@@ -2455,10 +2548,12 @@ static int rtw8922a_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
#ifdef CONFIG_PM
static const struct wiphy_wowlan_support rtw_wowlan_stub_8922a = {
- .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
+ .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_NET_DETECT,
.n_patterns = RTW89_MAX_PATTERN_NUM,
.pattern_max_len = RTW89_MAX_PATTERN_SIZE,
.pattern_min_len = 1,
+ .max_nd_match_sets = RTW89_SCANOFLD_MAX_SSID,
};
#endif
@@ -2491,9 +2586,11 @@ static const struct rtw89_chip_ops rtw8922a_chip_ops = {
.get_thermal = rtw8922a_get_thermal,
.ctrl_btg_bt_rx = rtw8922a_ctrl_btg_bt_rx,
.query_ppdu = rtw8922a_query_ppdu,
+ .convert_rpl_to_rssi = rtw8922a_convert_rpl_to_rssi,
.ctrl_nbtg_bt_tx = rtw8922a_ctrl_nbtg_bt_tx,
.cfg_txrx_path = rtw8922a_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = NULL,
+ .digital_pwr_comp = rtw8922a_digital_pwr_comp,
.pwr_on_func = rtw8922a_pwr_on_func,
.pwr_off_func = rtw8922a_pwr_off_func,
.query_rxdesc = rtw89_core_query_rxdesc_v2,
@@ -2559,6 +2656,7 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.dig_regs = &rtw8922a_dig_regs,
.tssi_dbw_table = NULL,
.support_macid_num = 32,
+ .support_link_num = 2,
.support_chanctx_num = 2,
.support_rnr = true,
.support_bands = BIT(NL80211_BAND_2GHZ) |
diff --git a/drivers/net/wireless/realtek/rtw89/txrx.h b/drivers/net/wireless/realtek/rtw89/txrx.h
index a33280ec2a25..b2e47829983f 100644
--- a/drivers/net/wireless/realtek/rtw89/txrx.h
+++ b/drivers/net/wireless/realtek/rtw89/txrx.h
@@ -441,6 +441,7 @@ struct rtw89_phy_sts_hdr {
} __packed;
#define RTW89_PHY_STS_HDR_W0_IE_MAP GENMASK(4, 0)
+#define RTW89_PHY_STS_HDR_W0_HDR_2_EN BIT(5)
#define RTW89_PHY_STS_HDR_W0_VALID BIT(7)
#define RTW89_PHY_STS_HDR_W0_LEN GENMASK(15, 8)
#define RTW89_PHY_STS_HDR_W0_RSSI_AVG GENMASK(31, 24)
@@ -449,6 +450,13 @@ struct rtw89_phy_sts_hdr {
#define RTW89_PHY_STS_HDR_W1_RSSI_C GENMASK(23, 16)
#define RTW89_PHY_STS_HDR_W1_RSSI_D GENMASK(31, 24)
+struct rtw89_phy_sts_hdr_v2 {
+ __le32 w0;
+ __le32 w1;
+} __packed;
+
+#define RTW89_PHY_STS_HDR_V2_W0_PATH_EN GENMASK(20, 16)
+
struct rtw89_phy_sts_iehdr {
__le32 w0;
};
@@ -552,13 +560,43 @@ struct rtw89_phy_sts_iehdr {
#define BE_RXD_HDR_OFFSET_MASK GENMASK(20, 16)
#define BE_RXD_WL_HD_IV_LEN_MASK GENMASK(26, 21)
-struct rtw89_phy_sts_ie0 {
+struct rtw89_phy_sts_ie00 {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+} __packed;
+
+#define RTW89_PHY_STS_IE00_W0_RPL GENMASK(15, 7)
+
+struct rtw89_phy_sts_ie00_v2 {
__le32 w0;
__le32 w1;
__le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+ __le32 w7;
+} __packed;
+
+#define RTW89_PHY_STS_IE00_V2_W4_RPL_TD_A GENMASK(8, 0)
+#define RTW89_PHY_STS_IE00_V2_W4_RPL_TD_B GENMASK(17, 9)
+#define RTW89_PHY_STS_IE00_V2_W4_RPL_TD_C GENMASK(26, 18)
+#define RTW89_PHY_STS_IE00_V2_W5_RPL_TD_D GENMASK(8, 0)
+
+struct rtw89_phy_sts_ie01 {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
} __packed;
#define RTW89_PHY_STS_IE01_W0_CH_IDX GENMASK(23, 16)
+#define RTW89_PHY_STS_IE01_W0_RSSI_AVG_FD GENMASK(15, 8)
+#define RTW89_PHY_STS_IE01_W0_RX_PATH_EN GENMASK(31, 28)
#define RTW89_PHY_STS_IE01_W1_FD_CFO GENMASK(19, 8)
#define RTW89_PHY_STS_IE01_W1_PREMB_CFO GENMASK(31, 20)
#define RTW89_PHY_STS_IE01_W2_AVG_SNR GENMASK(5, 0)
@@ -567,6 +605,25 @@ struct rtw89_phy_sts_ie0 {
#define RTW89_PHY_STS_IE01_W2_LDPC BIT(28)
#define RTW89_PHY_STS_IE01_W2_STBC BIT(30)
+struct rtw89_phy_sts_ie01_v2 {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+ __le32 w7;
+ __le32 w8;
+ __le32 w9;
+} __packed;
+
+#define RTW89_PHY_STS_IE01_V2_W5_BW_IDX GENMASK(31, 29)
+#define RTW89_PHY_STS_IE01_V2_W8_RPL_FD_A GENMASK(11, 4)
+#define RTW89_PHY_STS_IE01_V2_W8_RPL_FD_B GENMASK(23, 16)
+#define RTW89_PHY_STS_IE01_V2_W9_RPL_FD_C GENMASK(11, 4)
+#define RTW89_PHY_STS_IE01_V2_W9_RPL_FD_D GENMASK(23, 16)
+
enum rtw89_tx_channel {
RTW89_TXCH_ACH0 = 0,
RTW89_TXCH_ACH1 = 1,
diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c
index 0f0f4beec4d9..86e24e07780d 100644
--- a/drivers/net/wireless/realtek/rtw89/wow.c
+++ b/drivers/net/wireless/realtek/rtw89/wow.c
@@ -1438,6 +1438,7 @@ static int rtw89_pno_scan_offload(struct rtw89_dev *rtwdev, bool enable)
struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
struct ieee80211_vif *wow_vif = rtw_wow->wow_vif;
struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv;
+ int interval = rtw_wow->nd_config->scan_plans[0].interval;
struct rtw89_scan_option opt = {};
int ret;
@@ -1457,7 +1458,7 @@ static int rtw89_pno_scan_offload(struct rtw89_dev *rtwdev, bool enable)
opt.enable = enable;
opt.repeat = RTW89_SCAN_NORMAL;
- opt.norm_pd = 10; /* in unit of 100ms */
+ opt.norm_pd = max(interval, 1) * 10; /* in unit of 100ms */
opt.delay = max(rtw_wow->nd_config->delay, 1);
if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
diff --git a/drivers/net/wireless/rsi/rsi_debugfs.h b/drivers/net/wireless/rsi/rsi_debugfs.h
index a6a28640ad40..bbc1200dbb62 100644
--- a/drivers/net/wireless/rsi/rsi_debugfs.h
+++ b/drivers/net/wireless/rsi/rsi_debugfs.h
@@ -39,7 +39,6 @@ struct rsi_dbg_files {
struct rsi_debugfs {
struct dentry *subdir;
- struct rsi_dbg_ops *dfs_get_ops;
struct dentry *rsi_files[MAX_DEBUGFS_ENTRIES];
};
int rsi_init_dbgfs(struct rsi_hw *adapter);
diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c
index 34d95f458e1a..a9f090e15cbb 100644
--- a/drivers/net/wireless/ti/wl18xx/event.c
+++ b/drivers/net/wireless/ti/wl18xx/event.c
@@ -142,7 +142,7 @@ int wl18xx_process_mailbox_events(struct wl1271 *wl)
wl18xx_radar_type_decode(mbox->radar_type));
if (!wl->radar_debug_mode)
- ieee80211_radar_detected(wl->hw);
+ ieee80211_radar_detected(wl->hw, NULL);
}
if (vector & PERIODIC_SCAN_REPORT_EVENT_ID) {
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
index 5fe9e4e26142..f0e528abb1b4 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
@@ -1146,7 +1146,7 @@ static int hwsim_write_simulate_radar(void *dat, u64 val)
{
struct mac80211_hwsim_data *data = dat;
- ieee80211_radar_detected(data->hw);
+ ieee80211_radar_detected(data->hw, NULL);
return 0;
}
diff --git a/drivers/ptp/ptp_ines.c b/drivers/ptp/ptp_ines.c
index e6f7d2bf8dde..14a23d3a27f2 100644
--- a/drivers/ptp/ptp_ines.c
+++ b/drivers/ptp/ptp_ines.c
@@ -562,12 +562,8 @@ static int ines_ts_info(struct mii_timestamper *mii_ts,
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
- info->phc_index = -1;
-
info->tx_types =
(1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON) |
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 234ad6f16e92..b6f8e3834bd3 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -80,23 +80,15 @@ enum {
enum {
MLX5_OBJ_TYPE_SW_ICM = 0x0008,
- MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT = 0x23,
-};
-
-enum {
- MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM = (1ULL << MLX5_OBJ_TYPE_SW_ICM),
- MLX5_GENERAL_OBJ_TYPES_CAP_GENEVE_TLV_OPT = (1ULL << 11),
- MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q = (1ULL << 13),
- MLX5_GENERAL_OBJ_TYPES_CAP_HEADER_MODIFY_ARGUMENT =
- (1ULL << MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT),
- MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD = (1ULL << 39),
-};
-
-enum {
MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b,
MLX5_OBJ_TYPE_VIRTIO_NET_Q = 0x000d,
MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS = 0x001c,
MLX5_OBJ_TYPE_MATCH_DEFINER = 0x0018,
+ MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT = 0x23,
+ MLX5_OBJ_TYPE_STC = 0x0040,
+ MLX5_OBJ_TYPE_RTC = 0x0041,
+ MLX5_OBJ_TYPE_STE = 0x0042,
+ MLX5_OBJ_TYPE_MODIFY_HDR_PATTERN = 0x0043,
MLX5_OBJ_TYPE_PAGE_TRACK = 0x46,
MLX5_OBJ_TYPE_MKEY = 0xff01,
MLX5_OBJ_TYPE_QP = 0xff02,
@@ -112,6 +104,16 @@ enum {
MLX5_OBJ_TYPE_RQT = 0xff0e,
MLX5_OBJ_TYPE_FLOW_COUNTER = 0xff0f,
MLX5_OBJ_TYPE_CQ = 0xff10,
+ MLX5_OBJ_TYPE_FT_ALIAS = 0xff15,
+};
+
+enum {
+ MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM = (1ULL << MLX5_OBJ_TYPE_SW_ICM),
+ MLX5_GENERAL_OBJ_TYPES_CAP_GENEVE_TLV_OPT = (1ULL << 11),
+ MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q = (1ULL << 13),
+ MLX5_GENERAL_OBJ_TYPES_CAP_HEADER_MODIFY_ARGUMENT =
+ (1ULL << MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT),
+ MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD = (1ULL << 39),
};
enum {
@@ -313,6 +315,7 @@ enum {
MLX5_CMD_OP_MODIFY_VHCA_STATE = 0xb0e,
MLX5_CMD_OP_SYNC_CRYPTO = 0xb12,
MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS = 0xb16,
+ MLX5_CMD_OP_GENERATE_WQE = 0xb17,
MLX5_CMD_OP_MAX
};
@@ -485,7 +488,13 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 reserved_at_66[0x2];
u8 reformat_add_macsec[0x1];
u8 reformat_remove_macsec[0x1];
- u8 reserved_at_6a[0xe];
+ u8 reparse[0x1];
+ u8 reserved_at_6b[0x1];
+ u8 cross_vhca_object[0x1];
+ u8 reformat_l2_to_l3_audp_tunnel[0x1];
+ u8 reformat_l3_audp_tunnel_to_l2[0x1];
+ u8 ignore_flow_level_rtc_valid[0x1];
+ u8 reserved_at_70[0x8];
u8 log_max_ft_num[0x8];
u8 reserved_at_80[0x10];
@@ -522,7 +531,15 @@ struct mlx5_ifc_ipv6_layout_bits {
u8 ipv6[16][0x8];
};
+struct mlx5_ifc_ipv6_simple_layout_bits {
+ u8 ipv6_127_96[0x20];
+ u8 ipv6_95_64[0x20];
+ u8 ipv6_63_32[0x20];
+ u8 ipv6_31_0[0x20];
+};
+
union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits {
+ struct mlx5_ifc_ipv6_simple_layout_bits ipv6_simple_layout;
struct mlx5_ifc_ipv6_layout_bits ipv6_layout;
struct mlx5_ifc_ipv4_layout_bits ipv4_layout;
u8 reserved_at_0[0x80];
@@ -911,7 +928,9 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits {
u8 reserved_at_8[0x5];
u8 fdb_uplink_hairpin[0x1];
u8 fdb_multi_path_any_table_limit_regc[0x1];
- u8 reserved_at_f[0x3];
+ u8 reserved_at_f[0x1];
+ u8 fdb_dynamic_tunnel[0x1];
+ u8 reserved_at_11[0x1];
u8 fdb_multi_path_any_table[0x1];
u8 reserved_at_13[0x2];
u8 fdb_modify_header_fwd_to_table[0x1];
@@ -950,6 +969,73 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits {
u8 reserved_at_1900[0x6700];
};
+struct mlx5_ifc_wqe_based_flow_table_cap_bits {
+ u8 reserved_at_0[0x3];
+ u8 log_max_num_ste[0x5];
+ u8 reserved_at_8[0x3];
+ u8 log_max_num_stc[0x5];
+ u8 reserved_at_10[0x3];
+ u8 log_max_num_rtc[0x5];
+ u8 reserved_at_18[0x3];
+ u8 log_max_num_header_modify_pattern[0x5];
+
+ u8 rtc_hash_split_table[0x1];
+ u8 rtc_linear_lookup_table[0x1];
+ u8 reserved_at_22[0x1];
+ u8 stc_alloc_log_granularity[0x5];
+ u8 reserved_at_28[0x3];
+ u8 stc_alloc_log_max[0x5];
+ u8 reserved_at_30[0x3];
+ u8 ste_alloc_log_granularity[0x5];
+ u8 reserved_at_38[0x3];
+ u8 ste_alloc_log_max[0x5];
+
+ u8 reserved_at_40[0xb];
+ u8 rtc_reparse_mode[0x5];
+ u8 reserved_at_50[0x3];
+ u8 rtc_index_mode[0x5];
+ u8 reserved_at_58[0x3];
+ u8 rtc_log_depth_max[0x5];
+
+ u8 reserved_at_60[0x10];
+ u8 ste_format[0x10];
+
+ u8 stc_action_type[0x80];
+
+ u8 header_insert_type[0x10];
+ u8 header_remove_type[0x10];
+
+ u8 trivial_match_definer[0x20];
+
+ u8 reserved_at_140[0x1b];
+ u8 rtc_max_num_hash_definer_gen_wqe[0x5];
+
+ u8 reserved_at_160[0x18];
+ u8 access_index_mode[0x8];
+
+ u8 reserved_at_180[0x10];
+ u8 ste_format_gen_wqe[0x10];
+
+ u8 linear_match_definer_reg_c3[0x20];
+
+ u8 fdb_jump_to_tir_stc[0x1];
+ u8 reserved_at_1c1[0x1f];
+};
+
+struct mlx5_ifc_esw_cap_bits {
+ u8 reserved_at_0[0x1d];
+ u8 merged_eswitch[0x1];
+ u8 reserved_at_1e[0x2];
+
+ u8 reserved_at_20[0x40];
+
+ u8 esw_manager_vport_number_valid[0x1];
+ u8 reserved_at_61[0xf];
+ u8 esw_manager_vport_number[0x10];
+
+ u8 reserved_at_80[0x780];
+};
+
enum {
MLX5_COUNTER_SOURCE_ESWITCH = 0x0,
MLX5_COUNTER_FLOW_ESWITCH = 0x1,
@@ -1443,9 +1529,13 @@ enum {
};
enum {
+ MLX5_FLEX_IPV4_OVER_VXLAN_ENABLED = 1 << 0,
+ MLX5_FLEX_IPV6_OVER_VXLAN_ENABLED = 1 << 1,
+ MLX5_FLEX_IPV6_OVER_IP_ENABLED = 1 << 2,
MLX5_FLEX_PARSER_GENEVE_ENABLED = 1 << 3,
MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED = 1 << 4,
MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED = 1 << 5,
+ MLX5_FLEX_P_BIT_VXLAN_GPE_ENABLED = 1 << 6,
MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED = 1 << 7,
MLX5_FLEX_PARSER_ICMP_V4_ENABLED = 1 << 8,
MLX5_FLEX_PARSER_ICMP_V6_ENABLED = 1 << 9,
@@ -1650,7 +1740,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 pci_sync_for_fw_update_event[0x1];
u8 reserved_at_1f2[0x6];
u8 init2_lag_tx_port_affinity[0x1];
- u8 reserved_at_1fa[0x3];
+ u8 reserved_at_1fa[0x2];
+ u8 wqe_based_flow_table_update_cap[0x1];
u8 cqe_version[0x4];
u8 compact_address_vector[0x1];
@@ -1959,7 +2050,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_760[0x3];
u8 log_max_num_header_modify_argument[0x5];
- u8 reserved_at_768[0x4];
+ u8 log_header_modify_argument_granularity_offset[0x4];
u8 log_header_modify_argument_granularity[0x4];
u8 reserved_at_770[0x3];
u8 log_header_modify_argument_max_alloc[0x5];
@@ -2006,7 +2097,8 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 reserved_at_140[0x60];
u8 flow_table_type_2_type[0x8];
- u8 reserved_at_1a8[0x3];
+ u8 reserved_at_1a8[0x2];
+ u8 format_select_dw_8_6_ext[0x1];
u8 log_min_mkey_entity_size[0x5];
u8 reserved_at_1b0[0x10];
@@ -2022,6 +2114,16 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 reserved_at_250[0x10];
u8 reserved_at_260[0x120];
+
+ u8 format_select_dw_gtpu_dw_0[0x8];
+ u8 format_select_dw_gtpu_dw_1[0x8];
+ u8 format_select_dw_gtpu_dw_2[0x8];
+ u8 format_select_dw_gtpu_first_ext_dw_0[0x8];
+
+ u8 generate_wqe_type[0x20];
+
+ u8 reserved_at_2c0[0xc0];
+
u8 reserved_at_380[0xb];
u8 min_mkey_log_entity_size_fixed_buffer[0x5];
u8 ec_vf_vport_base[0x10];
@@ -2037,9 +2139,11 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 reserved_at_400[0x1];
u8 min_mkey_log_entity_size_fixed_buffer_valid[0x1];
- u8 reserved_at_402[0x1e];
+ u8 reserved_at_402[0xe];
+ u8 return_reg_id[0x10];
- u8 reserved_at_420[0x20];
+ u8 reserved_at_420[0x1c];
+ u8 flow_table_hash_type[0x4];
u8 reserved_at_440[0x8];
u8 max_num_eqs_24b[0x18];
@@ -2086,7 +2190,7 @@ struct mlx5_ifc_extended_dest_format_bits {
u8 reserved_at_60[0x20];
};
-union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits {
+union mlx5_ifc_dest_format_flow_counter_list_auto_bits {
struct mlx5_ifc_extended_dest_format_bits extended_dest_format;
struct mlx5_ifc_flow_counter_list_bits flow_counter_list;
};
@@ -2178,7 +2282,10 @@ struct mlx5_ifc_wq_bits {
u8 reserved_at_139[0x4];
u8 log_wqe_stride_size[0x3];
- u8 reserved_at_140[0x80];
+ u8 dbr_umem_id[0x20];
+ u8 wq_umem_id[0x20];
+
+ u8 wq_umem_offset[0x40];
u8 headers_mkey[0x20];
@@ -3562,6 +3669,8 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_per_protocol_networking_offload_caps_bits per_protocol_networking_offload_caps;
struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap;
+ struct mlx5_ifc_wqe_based_flow_table_cap_bits wqe_based_flow_table_cap;
+ struct mlx5_ifc_esw_cap_bits esw_cap;
struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
struct mlx5_ifc_port_selection_cap_bits port_selection_cap;
struct mlx5_ifc_qos_cap_bits qos_cap;
@@ -3678,7 +3787,7 @@ struct mlx5_ifc_flow_context_bits {
u8 reserved_at_1300[0x500];
- union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[];
+ union mlx5_ifc_dest_format_flow_counter_list_auto_bits destination[];
};
enum {
@@ -3919,7 +4028,8 @@ struct mlx5_ifc_sqc_bits {
u8 reg_umr[0x1];
u8 allow_swp[0x1];
u8 hairpin[0x1];
- u8 reserved_at_f[0xb];
+ u8 non_wire[0x1];
+ u8 reserved_at_10[0xa];
u8 ts_format[0x2];
u8 reserved_at_1c[0x4];
@@ -4961,6 +5071,16 @@ struct mlx5_ifc_set_fte_in_bits {
struct mlx5_ifc_flow_context_bits flow_context;
};
+struct mlx5_ifc_dest_format_bits {
+ u8 destination_type[0x8];
+ u8 destination_id[0x18];
+
+ u8 destination_eswitch_owner_vhca_id_valid[0x1];
+ u8 packet_reformat[0x1];
+ u8 reserved_at_22[0xe];
+ u8 destination_eswitch_owner_vhca_id[0x10];
+};
+
struct mlx5_ifc_rts2rts_qp_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -6127,7 +6247,8 @@ struct mlx5_ifc_flow_table_context_bits {
u8 termination_table[0x1];
u8 table_miss_action[0x4];
u8 level[0x8];
- u8 reserved_at_10[0x8];
+ u8 rtc_valid[0x1];
+ u8 reserved_at_11[0x7];
u8 log_size[0x8];
u8 reserved_at_20[0x8];
@@ -6137,11 +6258,21 @@ struct mlx5_ifc_flow_table_context_bits {
u8 lag_master_next_table_id[0x18];
u8 reserved_at_60[0x60];
+ union {
+ struct {
+ u8 sw_owner_icm_root_1[0x40];
+
+ u8 sw_owner_icm_root_0[0x40];
+ } sws;
+ struct {
+ u8 rtc_id_0[0x20];
- u8 sw_owner_icm_root_1[0x40];
+ u8 rtc_id_1[0x20];
- u8 sw_owner_icm_root_0[0x40];
+ u8 reserved_at_100[0x40];
+ } hws;
+ };
};
struct mlx5_ifc_query_flow_table_out_bits {
@@ -8923,7 +9054,9 @@ struct mlx5_ifc_create_qp_in_bits {
struct mlx5_ifc_qpc_bits qpc;
- u8 reserved_at_800[0x60];
+ u8 wq_umem_offset[0x40];
+
+ u8 wq_umem_id[0x20];
u8 wq_umem_valid[0x1];
u8 reserved_at_861[0x1f];
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index ad1ce650146c..fc7eeff99a8a 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -149,6 +149,7 @@ enum {
MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2,
MLX5_WQE_CTRL_CQ_UPDATE_AND_EQE = 3 << 2,
MLX5_WQE_CTRL_SOLICITED = 1 << 1,
+ MLX5_WQE_CTRL_INITIATOR_SMALL_FENCE = 1 << 5,
};
enum {
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 44d1dbb54ffe..2465bdb6037f 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3085,8 +3085,6 @@ void dev_disable_lro(struct net_device *dev);
int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev);
-u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev);
int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev);
int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index 2381e07429a2..5c01048860c4 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -598,6 +598,8 @@ int phylink_fwnode_phy_connect(struct phylink *pl,
const struct fwnode_handle *fwnode,
u32 flags);
void phylink_disconnect_phy(struct phylink *);
+int phylink_set_fixed_link(struct phylink *,
+ const struct phylink_link_state *);
void phylink_mac_change(struct phylink *, bool up);
void phylink_pcs_change(struct phylink_pcs *, bool up);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index cf8f6ce06742..5803eb8a157d 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1433,6 +1433,7 @@ void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
struct skb_seq_state *st);
void skb_abort_seq_read(struct skb_seq_state *st);
+int skb_copy_seq_read(struct skb_seq_state *st, int offset, void *to, int len);
unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
unsigned int to, struct ts_config *config);
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 338991c08f00..d79ff252cfdc 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -138,33 +138,6 @@ struct stmmac_txq_cfg {
int tbs_en;
};
-/* FPE link state */
-enum stmmac_fpe_state {
- FPE_STATE_OFF = 0,
- FPE_STATE_CAPABLE = 1,
- FPE_STATE_ENTERING_ON = 2,
- FPE_STATE_ON = 3,
-};
-
-/* FPE link-partner hand-shaking mPacket type */
-enum stmmac_mpacket_type {
- MPACKET_VERIFY = 0,
- MPACKET_RESPONSE = 1,
-};
-
-enum stmmac_fpe_task_state_t {
- __FPE_REMOVING,
- __FPE_TASK_SCHED,
-};
-
-struct stmmac_fpe_cfg {
- bool enable; /* FPE enable */
- bool hs_enable; /* FPE handshake enable */
- enum stmmac_fpe_state lp_fpe_state; /* Link Partner FPE state */
- enum stmmac_fpe_state lo_fpe_state; /* Local station FPE state */
- u32 fpe_csr; /* MAC_FPE_CTRL_STS reg cache */
-};
-
struct stmmac_safety_feature_cfg {
u32 tsoee;
u32 mrxpee;
@@ -232,7 +205,6 @@ struct plat_stmmacenet_data {
struct fwnode_handle *port_node;
struct device_node *mdio_node;
struct stmmac_dma_cfg *dma_cfg;
- struct stmmac_fpe_cfg *fpe_cfg;
struct stmmac_safety_feature_cfg *safety_feat_cfg;
int clk_csr;
int has_gmac;
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 192d72c8b465..69ec1eb41a09 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -4837,9 +4837,9 @@ struct cfg80211_ops {
int (*start_radar_detection)(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_chan_def *chandef,
- u32 cac_time_ms);
+ u32 cac_time_ms, int link_id);
void (*end_cac)(struct wiphy *wiphy,
- struct net_device *dev);
+ struct net_device *dev, unsigned int link_id);
int (*update_ft_ies)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_update_ft_ies_params *ftie);
int (*crit_proto_start)(struct wiphy *wiphy,
@@ -6194,9 +6194,6 @@ enum ieee80211_ap_reg_power {
* @address: The address for this device, valid only if @netdev is %NULL
* @is_running: true if this is a non-netdev device that has been started, e.g.
* the P2P Device.
- * @cac_started: true if DFS channel availability check has been started
- * @cac_start_time: timestamp (jiffies) when the dfs state was entered.
- * @cac_time_ms: CAC time in ms
* @ps: powersave mode is enabled
* @ps_timeout: dynamic powersave timeout
* @ap_unexpected_nlportid: (private) netlink port ID of application
@@ -6220,6 +6217,11 @@ enum ieee80211_ap_reg_power {
* unprotected beacon report
* @links: array of %IEEE80211_MLD_MAX_NUM_LINKS elements containing @addr
* @ap and @client for each link
+ * @links.cac_started: true if DFS channel availability check has been
+ * started
+ * @links.cac_start_time: timestamp (jiffies) when the dfs state was
+ * entered.
+ * @links.cac_time_ms: CAC time in ms
* @valid_links: bitmap describing what elements of @links are valid
*/
struct wireless_dev {
@@ -6261,11 +6263,6 @@ struct wireless_dev {
u32 owner_nlportid;
bool nl_owner_dead;
- /* FIXME: need to rework radar detection for MLO */
- bool cac_started;
- unsigned long cac_start_time;
- unsigned int cac_time_ms;
-
#ifdef CONFIG_CFG80211_WEXT
/* wext data */
struct {
@@ -6332,6 +6329,10 @@ struct wireless_dev {
struct cfg80211_internal_bss *current_bss;
} client;
};
+
+ bool cac_started;
+ unsigned long cac_start_time;
+ unsigned int cac_time_ms;
} links[IEEE80211_MLD_MAX_NUM_LINKS];
u16 valid_links;
};
@@ -8740,6 +8741,7 @@ void cfg80211_sta_opmode_change_notify(struct net_device *dev, const u8 *mac,
* @chandef: chandef for the current channel
* @event: type of event
* @gfp: context flags
+ * @link_id: valid link_id for MLO operation or 0 otherwise.
*
* This function is called when a Channel availability check (CAC) is finished
* or aborted. This must be called to notify the completion of a CAC process,
@@ -8747,7 +8749,8 @@ void cfg80211_sta_opmode_change_notify(struct net_device *dev, const u8 *mac,
*/
void cfg80211_cac_event(struct net_device *netdev,
const struct cfg80211_chan_def *chandef,
- enum nl80211_radar_event event, gfp_t gfp);
+ enum nl80211_radar_event event, gfp_t gfp,
+ unsigned int link_id);
/**
* cfg80211_background_cac_abort - Channel Availability Check offchan abort event
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index adfec877f392..954dff901b69 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -6748,8 +6748,11 @@ void ieee80211_cqm_beacon_loss_notify(struct ieee80211_vif *vif, gfp_t gfp);
* ieee80211_radar_detected - inform that a radar was detected
*
* @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @chanctx_conf: Channel context on which radar is detected. Mandatory to
+ * pass a valid pointer during MLO. For non-MLO %NULL can be passed
*/
-void ieee80211_radar_detected(struct ieee80211_hw *hw);
+void ieee80211_radar_detected(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *chanctx_conf);
/**
* ieee80211_chswitch_done - Complete channel switch process
diff --git a/include/net/mptcp.h b/include/net/mptcp.h
index 0bc4ab03f487..814b5f2e3ed5 100644
--- a/include/net/mptcp.h
+++ b/include/net/mptcp.h
@@ -223,6 +223,8 @@ static inline __be32 mptcp_reset_option(const struct sk_buff *skb)
return htonl(0u);
}
+
+void mptcp_active_detect_blackhole(struct sock *sk, bool expired);
#else
static inline void mptcp_init(void)
@@ -307,6 +309,8 @@ static inline struct request_sock *mptcp_subflow_reqsk_alloc(const struct reques
}
static inline __be32 mptcp_reset_option(const struct sk_buff *skb) { return htonl(0u); }
+
+static inline void mptcp_active_detect_blackhole(struct sock *sk, bool expired) { }
#endif /* CONFIG_MPTCP */
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 54cef89f6c1e..b6bfdc6416c7 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -67,27 +67,27 @@
- instance of a transformer, struct xfrm_state (=SA)
- template to clone xfrm_state, struct xfrm_tmpl
- SPD is plain linear list of xfrm_policy rules, ordered by priority.
+ SPD is organized as hash table (for policies that meet minimum address prefix
+ length setting, net->xfrm.policy_hthresh). Other policies are stored in
+ lists, sorted into rbtree ordered by destination and source address networks.
+ See net/xfrm/xfrm_policy.c for details.
+
(To be compatible with existing pfkeyv2 implementations,
many rules with priority of 0x7fffffff are allowed to exist and
such rules are ordered in an unpredictable way, thanks to bsd folks.)
- Lookup is plain linear search until the first match with selector.
-
If "action" is "block", then we prohibit the flow, otherwise:
if "xfrms_nr" is zero, the flow passes untransformed. Otherwise,
policy entry has list of up to XFRM_MAX_DEPTH transformations,
described by templates xfrm_tmpl. Each template is resolved
to a complete xfrm_state (see below) and we pack bundle of transformations
- to a dst_entry returned to requestor.
+ to a dst_entry returned to requester.
dst -. xfrm .-> xfrm_state #1
|---. child .-> dst -. xfrm .-> xfrm_state #2
|---. child .-> dst -. xfrm .-> xfrm_state #3
|---. child .-> NULL
- Bundles are cached at xrfm_policy struct (field ->bundles).
-
Resolution of xrfm_tmpl
-----------------------
@@ -526,6 +526,36 @@ struct xfrm_policy_queue {
unsigned long timeout;
};
+/**
+ * struct xfrm_policy - xfrm policy
+ * @xp_net: network namespace the policy lives in
+ * @bydst: hlist node for SPD hash table or rbtree list
+ * @byidx: hlist node for index hash table
+ * @lock: serialize changes to policy structure members
+ * @refcnt: reference count, freed once it reaches 0
+ * @pos: kernel internal tie-breaker to determine age of policy
+ * @timer: timer
+ * @genid: generation, used to invalidate old policies
+ * @priority: priority, set by userspace
+ * @index: policy index (autogenerated)
+ * @if_id: virtual xfrm interface id
+ * @mark: packet mark
+ * @selector: selector
+ * @lft: liftime configuration data
+ * @curlft: liftime state
+ * @walk: list head on pernet policy list
+ * @polq: queue to hold packets while aqcuire operaion in progress
+ * @bydst_reinsert: policy tree node needs to be merged
+ * @type: XFRM_POLICY_TYPE_MAIN or _SUB
+ * @action: XFRM_POLICY_ALLOW or _BLOCK
+ * @flags: XFRM_POLICY_LOCALOK, XFRM_POLICY_ICMP
+ * @xfrm_nr: number of used templates in @xfrm_vec
+ * @family: protocol family
+ * @security: SELinux security label
+ * @xfrm_vec: array of templates to resolve state
+ * @rcu: rcu head, used to defer memory release
+ * @xdo: hardware offload state
+ */
struct xfrm_policy {
possible_net_t xp_net;
struct hlist_node bydst;
@@ -555,7 +585,6 @@ struct xfrm_policy {
u16 family;
struct xfrm_sec_ctx *security;
struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH];
- struct hlist_node bydst_inexact_list;
struct rcu_head rcu;
struct xfrm_dev_offload xdo;
@@ -1016,7 +1045,7 @@ void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
struct xfrm_if_parms {
int link; /* ifindex of underlying L2 interface */
- u32 if_id; /* interface identifyer */
+ u32 if_id; /* interface identifier */
bool collect_md;
};
diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h
index a2c66b3d7f0f..858339d1c1c4 100644
--- a/include/uapi/linux/net_tstamp.h
+++ b/include/uapi/linux/net_tstamp.h
@@ -32,8 +32,9 @@ enum {
SOF_TIMESTAMPING_OPT_TX_SWHW = (1<<14),
SOF_TIMESTAMPING_BIND_PHC = (1 << 15),
SOF_TIMESTAMPING_OPT_ID_TCP = (1 << 16),
+ SOF_TIMESTAMPING_OPT_RX_FILTER = (1 << 17),
- SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_ID_TCP,
+ SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_RX_FILTER,
SOF_TIMESTAMPING_MASK = (SOF_TIMESTAMPING_LAST - 1) |
SOF_TIMESTAMPING_LAST
};
diff --git a/net/core/dev.c b/net/core/dev.c
index 22c3f14d9287..8f4dead64284 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4247,13 +4247,6 @@ u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
}
EXPORT_SYMBOL(dev_pick_tx_zero);
-u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev)
-{
- return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
-}
-EXPORT_SYMBOL(dev_pick_tx_cpu_id);
-
u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index a52638363ea5..038a059b5924 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4411,6 +4411,41 @@ void skb_abort_seq_read(struct skb_seq_state *st)
}
EXPORT_SYMBOL(skb_abort_seq_read);
+/**
+ * skb_copy_seq_read() - copy from a skb_seq_state to a buffer
+ * @st: source skb_seq_state
+ * @offset: offset in source
+ * @to: destination buffer
+ * @len: number of bytes to copy
+ *
+ * Copy @len bytes from @offset bytes into the source @st to the destination
+ * buffer @to. `offset` should increase (or be unchanged) with each subsequent
+ * call to this function. If offset needs to decrease from the previous use `st`
+ * should be reset first.
+ *
+ * Return: 0 on success or -EINVAL if the copy ended early
+ */
+int skb_copy_seq_read(struct skb_seq_state *st, int offset, void *to, int len)
+{
+ const u8 *data;
+ u32 sqlen;
+
+ for (;;) {
+ sqlen = skb_seq_read(offset, &data, st);
+ if (sqlen == 0)
+ return -EINVAL;
+ if (sqlen >= len) {
+ memcpy(to, data, len);
+ return 0;
+ }
+ memcpy(to, data, sqlen);
+ to += sqlen;
+ offset += sqlen;
+ len -= sqlen;
+ }
+}
+EXPORT_SYMBOL(skb_copy_seq_read);
+
#define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))
static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c
index 1f46de394f2e..281bbac5539d 100644
--- a/net/dsa/tag_ksz.c
+++ b/net/dsa/tag_ksz.c
@@ -178,8 +178,9 @@ MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_KSZ8795, KSZ8795_NAME);
#define KSZ9477_INGRESS_TAG_LEN 2
#define KSZ9477_PTP_TAG_LEN 4
-#define KSZ9477_PTP_TAG_INDICATION 0x80
+#define KSZ9477_PTP_TAG_INDICATION BIT(7)
+#define KSZ9477_TAIL_TAG_EG_PORT_M GENMASK(2, 0)
#define KSZ9477_TAIL_TAG_PRIO GENMASK(8, 7)
#define KSZ9477_TAIL_TAG_OVERRIDE BIT(9)
#define KSZ9477_TAIL_TAG_LOOKUP BIT(10)
@@ -312,7 +313,7 @@ static struct sk_buff *ksz9477_rcv(struct sk_buff *skb, struct net_device *dev)
{
/* Tag decoding */
u8 *tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN;
- unsigned int port = tag[0] & 7;
+ unsigned int port = tag[0] & KSZ9477_TAIL_TAG_EG_PORT_M;
unsigned int len = KSZ_EGRESS_TAG_LEN;
/* Extra 4-bytes PTP timestamp */
diff --git a/net/ethtool/common.c b/net/ethtool/common.c
index 781834ef57c3..6c245e59bbc1 100644
--- a/net/ethtool/common.c
+++ b/net/ethtool/common.c
@@ -427,6 +427,7 @@ const char sof_timestamping_names[][ETH_GSTRING_LEN] = {
[const_ilog2(SOF_TIMESTAMPING_OPT_TX_SWHW)] = "option-tx-swhw",
[const_ilog2(SOF_TIMESTAMPING_BIND_PHC)] = "bind-phc",
[const_ilog2(SOF_TIMESTAMPING_OPT_ID_TCP)] = "option-id-tcp",
+ [const_ilog2(SOF_TIMESTAMPING_OPT_RX_FILTER)] = "option-rx-filter",
};
static_assert(ARRAY_SIZE(sof_timestamping_names) == __SOF_TIMESTAMPING_CNT);
diff --git a/net/ethtool/phy.c b/net/ethtool/phy.c
index 560dd039c662..4ef7c6e32d10 100644
--- a/net/ethtool/phy.c
+++ b/net/ethtool/phy.c
@@ -164,7 +164,7 @@ int ethnl_phy_doit(struct sk_buff *skb, struct genl_info *info)
goto err_unlock_rtnl;
/* No PHY, return early */
- if (!req_info.pdn->phy)
+ if (!req_info.pdn)
goto err_unlock_rtnl;
ret = ethnl_phy_reply_size(&req_info.base, info->extack);
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index a06e790042e2..10393836992d 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -625,7 +625,6 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
/* Overflow soon to find bugs easier: */
hsr->sequence_nr = HSR_SEQNR_START;
hsr->sup_sequence_nr = HSR_SUP_SEQNR_START;
- hsr->interlink_sequence_nr = HSR_SEQNR_START;
timer_setup(&hsr->announce_timer, hsr_announce, 0);
timer_setup(&hsr->prune_timer, hsr_prune_nodes, 0);
diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
index ab1f8d35d9dc..fcfeb79bb040 100644
--- a/net/hsr/hsr_main.h
+++ b/net/hsr/hsr_main.h
@@ -203,7 +203,6 @@ struct hsr_priv {
struct timer_list prune_proxy_timer;
int announce_count;
u16 sequence_nr;
- u16 interlink_sequence_nr; /* Interlink port seq_nr */
u16 sup_sequence_nr; /* For HSRv1 separate seq_nr for supervision */
enum hsr_version prot_version; /* Indicate if HSRv0, HSRv1 or PRPv1 */
spinlock_t seqnr_lock; /* locking for sequence_nr */
diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c
index af6cf64a00e0..464f683e016d 100644
--- a/net/hsr/hsr_slave.c
+++ b/net/hsr/hsr_slave.c
@@ -67,7 +67,16 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
skb_set_network_header(skb, ETH_HLEN + HSR_HLEN);
skb_reset_mac_len(skb);
- hsr_forward_skb(skb, port);
+ /* Only the frames received over the interlink port will assign a
+ * sequence number and require synchronisation vs other sender.
+ */
+ if (port->type == HSR_PT_INTERLINK) {
+ spin_lock_bh(&hsr->seqnr_lock);
+ hsr_forward_skb(skb, port);
+ spin_unlock_bh(&hsr->seqnr_lock);
+ } else {
+ hsr_forward_skb(skb, port);
+ }
finish_consume:
return RX_HANDLER_CONSUMED;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 8a5680b4e786..e359a9161445 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2235,6 +2235,7 @@ void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
struct scm_timestamping_internal *tss)
{
int new_tstamp = sock_flag(sk, SOCK_TSTAMP_NEW);
+ u32 tsflags = READ_ONCE(sk->sk_tsflags);
bool has_timestamping = false;
if (tss->ts[0].tv_sec || tss->ts[0].tv_nsec) {
@@ -2274,14 +2275,18 @@ void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
}
}
- if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_SOFTWARE)
+ if (tsflags & SOF_TIMESTAMPING_SOFTWARE &&
+ (tsflags & SOF_TIMESTAMPING_RX_SOFTWARE ||
+ !(tsflags & SOF_TIMESTAMPING_OPT_RX_FILTER)))
has_timestamping = true;
else
tss->ts[0] = (struct timespec64) {0};
}
if (tss->ts[2].tv_sec || tss->ts[2].tv_nsec) {
- if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_RAW_HARDWARE)
+ if (tsflags & SOF_TIMESTAMPING_RAW_HARDWARE &&
+ (tsflags & SOF_TIMESTAMPING_RX_HARDWARE ||
+ !(tsflags & SOF_TIMESTAMPING_OPT_RX_FILTER)))
has_timestamping = true;
else
tss->ts[2] = (struct timespec64) {0};
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 86169127e4d1..79064580c8c0 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -282,6 +282,7 @@ static int tcp_write_timeout(struct sock *sk)
expired = retransmits_timed_out(sk, retry_until,
READ_ONCE(icsk->icsk_user_timeout));
tcp_fastopen_active_detect_blackhole(sk, expired);
+ mptcp_active_detect_blackhole(sk, expired);
if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RTO_CB_FLAG))
tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RTO_CB,
diff --git a/net/mac80211/airtime.c b/net/mac80211/airtime.c
index fdf8b658fede..c61df637232a 100644
--- a/net/mac80211/airtime.c
+++ b/net/mac80211/airtime.c
@@ -55,10 +55,21 @@
#define HE_DURATION_S(shift, streams, gi, bps) \
(HE_DURATION(streams, gi, bps) >> shift)
+/* gi in HE/EHT is identical. It matches enum nl80211_eht_gi as well */
+#define EHT_GI_08 HE_GI_08
+#define EHT_GI_16 HE_GI_16
+#define EHT_GI_32 HE_GI_32
+
+#define EHT_DURATION(streams, gi, bps) \
+ HE_DURATION(streams, gi, bps)
+#define EHT_DURATION_S(shift, streams, gi, bps) \
+ HE_DURATION_S(shift, streams, gi, bps)
+
#define BW_20 0
#define BW_40 1
#define BW_80 2
#define BW_160 3
+#define BW_320 4
/*
* Define group sort order: HT40 -> SGI -> #streams
@@ -68,17 +79,26 @@
#define IEEE80211_VHT_STREAM_GROUPS 8 /* BW(=4) * SGI(=2) */
#define IEEE80211_HE_MAX_STREAMS 8
+#define IEEE80211_HE_STREAM_GROUPS 12 /* BW(=4) * GI(=3) */
+
+#define IEEE80211_EHT_MAX_STREAMS 8
+#define IEEE80211_EHT_STREAM_GROUPS 15 /* BW(=5) * GI(=3) */
#define IEEE80211_HT_GROUPS_NB (IEEE80211_MAX_STREAMS * \
IEEE80211_HT_STREAM_GROUPS)
#define IEEE80211_VHT_GROUPS_NB (IEEE80211_MAX_STREAMS * \
IEEE80211_VHT_STREAM_GROUPS)
+#define IEEE80211_HE_GROUPS_NB (IEEE80211_HE_MAX_STREAMS * \
+ IEEE80211_HE_STREAM_GROUPS)
+#define IEEE80211_EHT_GROUPS_NB (IEEE80211_EHT_MAX_STREAMS * \
+ IEEE80211_EHT_STREAM_GROUPS)
#define IEEE80211_HT_GROUP_0 0
#define IEEE80211_VHT_GROUP_0 (IEEE80211_HT_GROUP_0 + IEEE80211_HT_GROUPS_NB)
#define IEEE80211_HE_GROUP_0 (IEEE80211_VHT_GROUP_0 + IEEE80211_VHT_GROUPS_NB)
+#define IEEE80211_EHT_GROUP_0 (IEEE80211_HE_GROUP_0 + IEEE80211_HE_GROUPS_NB)
-#define MCS_GROUP_RATES 12
+#define MCS_GROUP_RATES 14
#define HT_GROUP_IDX(_streams, _sgi, _ht40) \
IEEE80211_HT_GROUP_0 + \
@@ -203,6 +223,69 @@
#define HE_GROUP(_streams, _gi, _bw) \
__HE_GROUP(_streams, _gi, _bw, \
HE_GROUP_SHIFT(_streams, _gi, _bw))
+
+#define EHT_BW2VBPS(_bw, r5, r4, r3, r2, r1) \
+ ((_bw) == BW_320 ? r5 : BW2VBPS(_bw, r4, r3, r2, r1))
+
+#define EHT_GROUP_IDX(_streams, _gi, _bw) \
+ (IEEE80211_EHT_GROUP_0 + \
+ IEEE80211_EHT_MAX_STREAMS * 3 * (_bw) + \
+ IEEE80211_EHT_MAX_STREAMS * (_gi) + \
+ (_streams) - 1)
+
+#define __EHT_GROUP(_streams, _gi, _bw, _s) \
+ [EHT_GROUP_IDX(_streams, _gi, _bw)] = { \
+ .shift = _s, \
+ .duration = { \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 1960, 980, 490, 234, 117)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 3920, 1960, 980, 468, 234)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 5880, 2937, 1470, 702, 351)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 7840, 3920, 1960, 936, 468)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 11760, 5880, 2940, 1404, 702)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 15680, 7840, 3920, 1872, 936)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 17640, 8820, 4410, 2106, 1053)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 19600, 9800, 4900, 2340, 1170)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 23520, 11760, 5880, 2808, 1404)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 26133, 13066, 6533, 3120, 1560)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 29400, 14700, 7350, 3510, 1755)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 32666, 16333, 8166, 3900, 1950)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 35280, 17640, 8820, 4212, 2106)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 39200, 19600, 9800, 4680, 2340)) \
+ } \
+}
+
+#define EHT_GROUP_SHIFT(_streams, _gi, _bw) \
+ GROUP_SHIFT(EHT_DURATION(_streams, _gi, \
+ EHT_BW2VBPS(_bw, 1960, 980, 490, 234, 117)))
+
+#define EHT_GROUP(_streams, _gi, _bw) \
+ __EHT_GROUP(_streams, _gi, _bw, \
+ EHT_GROUP_SHIFT(_streams, _gi, _bw))
+
+#define EHT_GROUP_RANGE(_gi, _bw) \
+ EHT_GROUP(1, _gi, _bw), \
+ EHT_GROUP(2, _gi, _bw), \
+ EHT_GROUP(3, _gi, _bw), \
+ EHT_GROUP(4, _gi, _bw), \
+ EHT_GROUP(5, _gi, _bw), \
+ EHT_GROUP(6, _gi, _bw), \
+ EHT_GROUP(7, _gi, _bw), \
+ EHT_GROUP(8, _gi, _bw)
+
struct mcs_group {
u8 shift;
u16 duration[MCS_GROUP_RATES];
@@ -376,6 +459,26 @@ static const struct mcs_group airtime_mcs_groups[] = {
HE_GROUP(6, HE_GI_32, BW_160),
HE_GROUP(7, HE_GI_32, BW_160),
HE_GROUP(8, HE_GI_32, BW_160),
+
+ EHT_GROUP_RANGE(EHT_GI_08, BW_20),
+ EHT_GROUP_RANGE(EHT_GI_16, BW_20),
+ EHT_GROUP_RANGE(EHT_GI_32, BW_20),
+
+ EHT_GROUP_RANGE(EHT_GI_08, BW_40),
+ EHT_GROUP_RANGE(EHT_GI_16, BW_40),
+ EHT_GROUP_RANGE(EHT_GI_32, BW_40),
+
+ EHT_GROUP_RANGE(EHT_GI_08, BW_80),
+ EHT_GROUP_RANGE(EHT_GI_16, BW_80),
+ EHT_GROUP_RANGE(EHT_GI_32, BW_80),
+
+ EHT_GROUP_RANGE(EHT_GI_08, BW_160),
+ EHT_GROUP_RANGE(EHT_GI_16, BW_160),
+ EHT_GROUP_RANGE(EHT_GI_32, BW_160),
+
+ EHT_GROUP_RANGE(EHT_GI_08, BW_320),
+ EHT_GROUP_RANGE(EHT_GI_16, BW_320),
+ EHT_GROUP_RANGE(EHT_GI_32, BW_320),
};
static u32
@@ -422,6 +525,9 @@ static u32 ieee80211_get_rate_duration(struct ieee80211_hw *hw,
case RATE_INFO_BW_160:
bw = BW_160;
break;
+ case RATE_INFO_BW_320:
+ bw = BW_320;
+ break;
default:
WARN_ON_ONCE(1);
return 0;
@@ -443,14 +549,27 @@ static u32 ieee80211_get_rate_duration(struct ieee80211_hw *hw,
idx = status->rate_idx;
group = HE_GROUP_IDX(streams, status->he_gi, bw);
break;
+ case RX_ENC_EHT:
+ streams = status->nss;
+ idx = status->rate_idx;
+ group = EHT_GROUP_IDX(streams, status->eht.gi, bw);
+ break;
default:
WARN_ON_ONCE(1);
return 0;
}
- if (WARN_ON_ONCE((status->encoding != RX_ENC_HE && streams > 4) ||
- (status->encoding == RX_ENC_HE && streams > 8)))
- return 0;
+ switch (status->encoding) {
+ case RX_ENC_EHT:
+ case RX_ENC_HE:
+ if (WARN_ON_ONCE(streams > 8))
+ return 0;
+ break;
+ default:
+ if (WARN_ON_ONCE(streams > 4))
+ return 0;
+ break;
+ }
if (idx >= MCS_GROUP_RATES)
return 0;
@@ -517,7 +636,9 @@ static bool ieee80211_fill_rate_info(struct ieee80211_hw *hw,
stat->nss = ri->nss;
stat->rate_idx = ri->mcs;
- if (ri->flags & RATE_INFO_FLAGS_HE_MCS)
+ if (ri->flags & RATE_INFO_FLAGS_EHT_MCS)
+ stat->encoding = RX_ENC_EHT;
+ else if (ri->flags & RATE_INFO_FLAGS_HE_MCS)
stat->encoding = RX_ENC_HE;
else if (ri->flags & RATE_INFO_FLAGS_VHT_MCS)
stat->encoding = RX_ENC_VHT;
@@ -529,7 +650,14 @@ static bool ieee80211_fill_rate_info(struct ieee80211_hw *hw,
if (ri->flags & RATE_INFO_FLAGS_SHORT_GI)
stat->enc_flags |= RX_ENC_FLAG_SHORT_GI;
- stat->he_gi = ri->he_gi;
+ switch (stat->encoding) {
+ case RX_ENC_EHT:
+ stat->eht.gi = ri->eht_gi;
+ break;
+ default:
+ stat->he_gi = ri->he_gi;
+ break;
+ }
if (stat->encoding != RX_ENC_LEGACY)
return true;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index b02b84ce2130..847304a3a29a 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1662,12 +1662,12 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
ieee80211_link_info_change_notify(sdata, link,
BSS_CHANGED_BEACON_ENABLED);
- if (sdata->wdev.cac_started) {
+ if (sdata->wdev.links[link_id].cac_started) {
chandef = link_conf->chanreq.oper;
- wiphy_delayed_work_cancel(wiphy, &sdata->dfs_cac_timer_work);
+ wiphy_delayed_work_cancel(wiphy, &link->dfs_cac_timer_work);
cfg80211_cac_event(sdata->dev, &chandef,
NL80211_RADAR_CAC_ABORTED,
- GFP_KERNEL);
+ GFP_KERNEL, link_id);
}
drv_stop_ap(sdata->local, sdata, link_conf);
@@ -3462,51 +3462,58 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
static int ieee80211_start_radar_detection(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_chan_def *chandef,
- u32 cac_time_ms)
+ u32 cac_time_ms, int link_id)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_chan_req chanreq = { .oper = *chandef };
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_link_data *link_data;
int err;
lockdep_assert_wiphy(local->hw.wiphy);
- if (!list_empty(&local->roc_list) || local->scanning) {
- err = -EBUSY;
- goto out_unlock;
- }
+ if (!list_empty(&local->roc_list) || local->scanning)
+ return -EBUSY;
+
+ link_data = sdata_dereference(sdata->link[link_id], sdata);
+ if (!link_data)
+ return -ENOLINK;
/* whatever, but channel contexts should not complain about that one */
- sdata->deflink.smps_mode = IEEE80211_SMPS_OFF;
- sdata->deflink.needed_rx_chains = local->rx_chains;
+ link_data->smps_mode = IEEE80211_SMPS_OFF;
+ link_data->needed_rx_chains = local->rx_chains;
- err = ieee80211_link_use_channel(&sdata->deflink, &chanreq,
+ err = ieee80211_link_use_channel(link_data, &chanreq,
IEEE80211_CHANCTX_SHARED);
if (err)
- goto out_unlock;
+ return err;
- wiphy_delayed_work_queue(wiphy, &sdata->dfs_cac_timer_work,
+ wiphy_delayed_work_queue(wiphy, &link_data->dfs_cac_timer_work,
msecs_to_jiffies(cac_time_ms));
- out_unlock:
- return err;
+ return 0;
}
static void ieee80211_end_cac(struct wiphy *wiphy,
- struct net_device *dev)
+ struct net_device *dev, unsigned int link_id)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_link_data *link_data;
lockdep_assert_wiphy(local->hw.wiphy);
list_for_each_entry(sdata, &local->interfaces, list) {
+ link_data = sdata_dereference(sdata->link[link_id], sdata);
+ if (!link_data)
+ continue;
+
wiphy_delayed_work_cancel(wiphy,
- &sdata->dfs_cac_timer_work);
+ &link_data->dfs_cac_timer_work);
- if (sdata->wdev.cac_started) {
- ieee80211_link_release_channel(&sdata->deflink);
- sdata->wdev.cac_started = false;
+ if (sdata->wdev.links[link_id].cac_started) {
+ ieee80211_link_release_channel(link_data);
+ sdata->wdev.links[link_id].cac_started = false;
}
}
}
@@ -3961,7 +3968,7 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
if (!list_empty(&local->roc_list) || local->scanning)
return -EBUSY;
- if (sdata->wdev.cac_started)
+ if (sdata->wdev.links[link_id].cac_started)
return -EBUSY;
if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS))
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index b72e4036526b..cca6d14084d2 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -683,6 +683,7 @@ ieee80211_alloc_chanctx(struct ieee80211_local *local,
ctx->mode = mode;
ctx->conf.radar_enabled = false;
ctx->conf.radio_idx = radio_idx;
+ ctx->radar_detected = false;
_ieee80211_recalc_chanctx_min_def(local, ctx, NULL, false);
return ctx;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 6305c4e9cca1..4f0390918b60 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -893,6 +893,8 @@ struct ieee80211_chanctx {
struct ieee80211_chan_req req;
struct ieee80211_chanctx_conf conf;
+
+ bool radar_detected;
};
struct mac80211_qos_map {
@@ -1067,6 +1069,7 @@ struct ieee80211_link_data {
int ap_power_level; /* in dBm */
bool radar_required;
+ struct wiphy_delayed_work dfs_cac_timer_work;
union {
struct ieee80211_link_data_managed mgd;
@@ -1165,8 +1168,6 @@ struct ieee80211_sub_if_data {
struct ieee80211_link_data deflink;
struct ieee80211_link_data __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
- struct wiphy_delayed_work dfs_cac_timer_work;
-
/* for ieee80211_set_active_links_async() */
struct wiphy_work activate_links_work;
u16 desired_active_links;
@@ -2650,7 +2651,8 @@ void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
bool ieee80211_is_radar_required(struct ieee80211_local *local);
void ieee80211_dfs_cac_timer_work(struct wiphy *wiphy, struct wiphy_work *work);
-void ieee80211_dfs_cac_cancel(struct ieee80211_local *local);
+void ieee80211_dfs_cac_cancel(struct ieee80211_local *local,
+ struct ieee80211_chanctx *chanctx);
void ieee80211_dfs_radar_detected_work(struct wiphy *wiphy,
struct wiphy_work *work);
int ieee80211_send_action_csa(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index b4ad66af3af3..6ef0990d3d29 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -462,6 +462,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
{
struct ieee80211_local *local = sdata->local;
unsigned long flags;
+ struct sk_buff_head freeq;
struct sk_buff *skb, *tmp;
u32 hw_reconf_flags = 0;
int i, flushed;
@@ -550,15 +551,15 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
wiphy_work_cancel(local->hw.wiphy,
&sdata->deflink.color_change_finalize_work);
wiphy_delayed_work_cancel(local->hw.wiphy,
- &sdata->dfs_cac_timer_work);
+ &sdata->deflink.dfs_cac_timer_work);
- if (sdata->wdev.cac_started) {
+ if (sdata->wdev.links[0].cac_started) {
chandef = sdata->vif.bss_conf.chanreq.oper;
WARN_ON(local->suspended);
ieee80211_link_release_channel(&sdata->deflink);
cfg80211_cac_event(sdata->dev, &chandef,
NL80211_RADAR_CAC_ABORTED,
- GFP_KERNEL);
+ GFP_KERNEL, 0);
}
if (sdata->vif.type == NL80211_IFTYPE_AP) {
@@ -637,18 +638,32 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
skb_queue_purge(&sdata->status_queue);
}
+ /*
+ * Since ieee80211_free_txskb() may issue __dev_queue_xmit()
+ * which should be called with interrupts enabled, reclamation
+ * is done in two phases:
+ */
+ __skb_queue_head_init(&freeq);
+
+ /* unlink from local queues... */
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
for (i = 0; i < IEEE80211_MAX_QUEUES; i++) {
skb_queue_walk_safe(&local->pending[i], skb, tmp) {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (info->control.vif == &sdata->vif) {
__skb_unlink(skb, &local->pending[i]);
- ieee80211_free_txskb(&local->hw, skb);
+ __skb_queue_tail(&freeq, skb);
}
}
}
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+ /* ... and perform actual reclamation with interrupts enabled. */
+ skb_queue_walk_safe(&freeq, skb, tmp) {
+ __skb_unlink(skb, &freeq);
+ ieee80211_free_txskb(&local->hw, skb);
+ }
+
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
ieee80211_txq_remove_vlan(local, sdata);
@@ -1729,8 +1744,6 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
wiphy_work_init(&sdata->work, ieee80211_iface_work);
wiphy_work_init(&sdata->activate_links_work,
ieee80211_activate_links_work);
- wiphy_delayed_work_init(&sdata->dfs_cac_timer_work,
- ieee80211_dfs_cac_timer_work);
switch (type) {
case NL80211_IFTYPE_P2P_GO:
diff --git a/net/mac80211/link.c b/net/mac80211/link.c
index 1a211b8d4057..0bbac64d5fa0 100644
--- a/net/mac80211/link.c
+++ b/net/mac80211/link.c
@@ -45,6 +45,8 @@ void ieee80211_link_init(struct ieee80211_sub_if_data *sdata,
ieee80211_color_collision_detection_work);
INIT_LIST_HEAD(&link->assigned_chanctx_list);
INIT_LIST_HEAD(&link->reserved_chanctx_list);
+ wiphy_delayed_work_init(&link->dfs_cac_timer_work,
+ ieee80211_dfs_cac_timer_work);
if (!deflink) {
switch (sdata->vif.type) {
@@ -75,6 +77,16 @@ void ieee80211_link_stop(struct ieee80211_link_data *link)
&link->color_change_finalize_work);
wiphy_work_cancel(link->sdata->local->hw.wiphy,
&link->csa.finalize_work);
+
+ if (link->sdata->wdev.links[link->link_id].cac_started) {
+ wiphy_delayed_work_cancel(link->sdata->local->hw.wiphy,
+ &link->dfs_cac_timer_work);
+ cfg80211_cac_event(link->sdata->dev,
+ &link->conf->chanreq.oper,
+ NL80211_RADAR_CAC_ABORTED,
+ GFP_KERNEL, link->link_id);
+ }
+
ieee80211_link_release_channel(link);
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 746f51ac0306..735e78adb0db 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -3031,18 +3031,19 @@ void ieee80211_dynamic_ps_timer(struct timer_list *t)
void ieee80211_dfs_cac_timer_work(struct wiphy *wiphy, struct wiphy_work *work)
{
- struct ieee80211_sub_if_data *sdata =
- container_of(work, struct ieee80211_sub_if_data,
+ struct ieee80211_link_data *link =
+ container_of(work, struct ieee80211_link_data,
dfs_cac_timer_work.work);
- struct cfg80211_chan_def chandef = sdata->vif.bss_conf.chanreq.oper;
+ struct cfg80211_chan_def chandef = link->conf->chanreq.oper;
+ struct ieee80211_sub_if_data *sdata = link->sdata;
lockdep_assert_wiphy(sdata->local->hw.wiphy);
- if (sdata->wdev.cac_started) {
- ieee80211_link_release_channel(&sdata->deflink);
+ if (sdata->wdev.links[link->link_id].cac_started) {
+ ieee80211_link_release_channel(link);
cfg80211_cac_event(sdata->dev, &chandef,
NL80211_RADAR_CAC_FINISHED,
- GFP_KERNEL);
+ GFP_KERNEL, link->link_id);
}
}
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index d823d58303e8..7be52345f218 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -32,7 +32,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
ieee80211_scan_cancel(local);
- ieee80211_dfs_cac_cancel(local);
+ ieee80211_dfs_cac_cancel(local, NULL);
ieee80211_roc_purge(local, NULL);
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 14e18fd9e919..adb88c06b598 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -575,6 +575,7 @@ static bool __ieee80211_can_leave_ch(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_sub_if_data *sdata_iter;
+ unsigned int link_id;
lockdep_assert_wiphy(local->hw.wiphy);
@@ -585,8 +586,9 @@ static bool __ieee80211_can_leave_ch(struct ieee80211_sub_if_data *sdata)
return false;
list_for_each_entry(sdata_iter, &local->interfaces, list) {
- if (sdata_iter->wdev.cac_started)
- return false;
+ for_each_valid_link(&sdata_iter->wdev, link_id)
+ if (sdata_iter->wdev.links[link_id].cac_started)
+ return false;
}
return true;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 3b9468dc3029..f94faa86ba8a 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -3467,24 +3467,44 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
return ts;
}
-void ieee80211_dfs_cac_cancel(struct ieee80211_local *local)
+/* Cancel CAC for the interfaces under the specified @local. If @ctx is
+ * also provided, only the interfaces using that ctx will be canceled.
+ */
+void ieee80211_dfs_cac_cancel(struct ieee80211_local *local,
+ struct ieee80211_chanctx *ctx)
{
struct ieee80211_sub_if_data *sdata;
struct cfg80211_chan_def chandef;
+ struct ieee80211_link_data *link;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ unsigned int link_id;
lockdep_assert_wiphy(local->hw.wiphy);
list_for_each_entry(sdata, &local->interfaces, list) {
- wiphy_delayed_work_cancel(local->hw.wiphy,
- &sdata->dfs_cac_timer_work);
-
- if (sdata->wdev.cac_started) {
- chandef = sdata->vif.bss_conf.chanreq.oper;
- ieee80211_link_release_channel(&sdata->deflink);
- cfg80211_cac_event(sdata->dev,
- &chandef,
+ for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS;
+ link_id++) {
+ link = sdata_dereference(sdata->link[link_id],
+ sdata);
+ if (!link)
+ continue;
+
+ chanctx_conf = sdata_dereference(link->conf->chanctx_conf,
+ sdata);
+ if (ctx && &ctx->conf != chanctx_conf)
+ continue;
+
+ wiphy_delayed_work_cancel(local->hw.wiphy,
+ &link->dfs_cac_timer_work);
+
+ if (!sdata->wdev.links[link_id].cac_started)
+ continue;
+
+ chandef = link->conf->chanreq.oper;
+ ieee80211_link_release_channel(link);
+ cfg80211_cac_event(sdata->dev, &chandef,
NL80211_RADAR_CAC_ABORTED,
- GFP_KERNEL);
+ GFP_KERNEL, link_id);
}
}
}
@@ -3494,9 +3514,8 @@ void ieee80211_dfs_radar_detected_work(struct wiphy *wiphy,
{
struct ieee80211_local *local =
container_of(work, struct ieee80211_local, radar_detected_work);
- struct cfg80211_chan_def chandef = local->hw.conf.chandef;
+ struct cfg80211_chan_def chandef;
struct ieee80211_chanctx *ctx;
- int num_chanctx = 0;
lockdep_assert_wiphy(local->hw.wiphy);
@@ -3504,25 +3523,46 @@ void ieee80211_dfs_radar_detected_work(struct wiphy *wiphy,
if (ctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER)
continue;
- num_chanctx++;
+ if (!ctx->radar_detected)
+ continue;
+
+ ctx->radar_detected = false;
+
chandef = ctx->conf.def;
+
+ ieee80211_dfs_cac_cancel(local, ctx);
+ cfg80211_radar_event(local->hw.wiphy, &chandef, GFP_KERNEL);
}
+}
- ieee80211_dfs_cac_cancel(local);
+static void
+ieee80211_radar_mark_chan_ctx_iterator(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *chanctx_conf,
+ void *data)
+{
+ struct ieee80211_chanctx *ctx =
+ container_of(chanctx_conf, struct ieee80211_chanctx,
+ conf);
- if (num_chanctx > 1)
- /* XXX: multi-channel is not supported yet */
- WARN_ON(1);
- else
- cfg80211_radar_event(local->hw.wiphy, &chandef, GFP_KERNEL);
+ if (ctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER)
+ return;
+
+ if (data && data != chanctx_conf)
+ return;
+
+ ctx->radar_detected = true;
}
-void ieee80211_radar_detected(struct ieee80211_hw *hw)
+void ieee80211_radar_detected(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *chanctx_conf)
{
struct ieee80211_local *local = hw_to_local(hw);
trace_api_radar_detected(local);
+ ieee80211_iter_chan_contexts_atomic(hw, ieee80211_radar_mark_chan_ctx_iterator,
+ chanctx_conf);
+
wiphy_work_queue(hw->wiphy, &local->radar_detected_work);
}
EXPORT_SYMBOL(ieee80211_radar_detected);
diff --git a/net/mptcp/ctrl.c b/net/mptcp/ctrl.c
index 99382c317ebb..38d8121331d4 100644
--- a/net/mptcp/ctrl.c
+++ b/net/mptcp/ctrl.c
@@ -12,6 +12,7 @@
#include <net/netns/generic.h>
#include "protocol.h"
+#include "mib.h"
#define MPTCP_SYSCTL_PATH "net/mptcp"
@@ -27,8 +28,11 @@ struct mptcp_pernet {
#endif
unsigned int add_addr_timeout;
+ unsigned int blackhole_timeout;
unsigned int close_timeout;
unsigned int stale_loss_cnt;
+ atomic_t active_disable_times;
+ unsigned long active_disable_stamp;
u8 mptcp_enabled;
u8 checksum_enabled;
u8 allow_join_initial_addr_port;
@@ -87,6 +91,8 @@ static void mptcp_pernet_set_defaults(struct mptcp_pernet *pernet)
{
pernet->mptcp_enabled = 1;
pernet->add_addr_timeout = TCP_RTO_MAX;
+ pernet->blackhole_timeout = 3600;
+ atomic_set(&pernet->active_disable_times, 0);
pernet->close_timeout = TCP_TIMEWAIT_LEN;
pernet->checksum_enabled = 0;
pernet->allow_join_initial_addr_port = 1;
@@ -151,6 +157,20 @@ static int proc_available_schedulers(const struct ctl_table *ctl,
return ret;
}
+static int proc_blackhole_detect_timeout(const struct ctl_table *table,
+ int write, void *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ struct mptcp_pernet *pernet = mptcp_get_pernet(current->nsproxy->net_ns);
+ int ret;
+
+ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ if (write && ret == 0)
+ atomic_set(&pernet->active_disable_times, 0);
+
+ return ret;
+}
+
static struct ctl_table mptcp_sysctl_table[] = {
{
.procname = "enabled",
@@ -217,6 +237,13 @@ static struct ctl_table mptcp_sysctl_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
+ {
+ .procname = "blackhole_timeout",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_blackhole_detect_timeout,
+ .extra1 = SYSCTL_ZERO,
+ },
};
static int mptcp_pernet_new_table(struct net *net, struct mptcp_pernet *pernet)
@@ -240,6 +267,7 @@ static int mptcp_pernet_new_table(struct net *net, struct mptcp_pernet *pernet)
table[6].data = &pernet->scheduler;
/* table[7] is for available_schedulers which is read-only info */
table[8].data = &pernet->close_timeout;
+ table[9].data = &pernet->blackhole_timeout;
hdr = register_net_sysctl_sz(net, MPTCP_SYSCTL_PATH, table,
ARRAY_SIZE(mptcp_sysctl_table));
@@ -277,6 +305,111 @@ static void mptcp_pernet_del_table(struct mptcp_pernet *pernet) {}
#endif /* CONFIG_SYSCTL */
+/* The following code block is to deal with middle box issues with MPTCP,
+ * similar to what is done with TFO.
+ * The proposed solution is to disable active MPTCP globally when SYN+MPC are
+ * dropped, while SYN without MPC aren't. In this case, active side MPTCP is
+ * disabled globally for 1hr at first. Then if it happens again, it is disabled
+ * for 2h, then 4h, 8h, ...
+ * The timeout is reset back to 1hr when a successful active MPTCP connection is
+ * fully established.
+ */
+
+/* Disable active MPTCP and record current jiffies and active_disable_times */
+void mptcp_active_disable(struct sock *sk)
+{
+ struct net *net = sock_net(sk);
+ struct mptcp_pernet *pernet;
+
+ pernet = mptcp_get_pernet(net);
+
+ if (!READ_ONCE(pernet->blackhole_timeout))
+ return;
+
+ /* Paired with READ_ONCE() in mptcp_active_should_disable() */
+ WRITE_ONCE(pernet->active_disable_stamp, jiffies);
+
+ /* Paired with smp_rmb() in mptcp_active_should_disable().
+ * We want pernet->active_disable_stamp to be updated first.
+ */
+ smp_mb__before_atomic();
+ atomic_inc(&pernet->active_disable_times);
+
+ MPTCP_INC_STATS(net, MPTCP_MIB_BLACKHOLE);
+}
+
+/* Calculate timeout for MPTCP active disable
+ * Return true if we are still in the active MPTCP disable period
+ * Return false if timeout already expired and we should use active MPTCP
+ */
+bool mptcp_active_should_disable(struct sock *ssk)
+{
+ struct net *net = sock_net(ssk);
+ unsigned int blackhole_timeout;
+ struct mptcp_pernet *pernet;
+ unsigned long timeout;
+ int disable_times;
+ int multiplier;
+
+ pernet = mptcp_get_pernet(net);
+ blackhole_timeout = READ_ONCE(pernet->blackhole_timeout);
+
+ if (!blackhole_timeout)
+ return false;
+
+ disable_times = atomic_read(&pernet->active_disable_times);
+ if (!disable_times)
+ return false;
+
+ /* Paired with smp_mb__before_atomic() in mptcp_active_disable() */
+ smp_rmb();
+
+ /* Limit timeout to max: 2^6 * initial timeout */
+ multiplier = 1 << min(disable_times - 1, 6);
+
+ /* Paired with the WRITE_ONCE() in mptcp_active_disable(). */
+ timeout = READ_ONCE(pernet->active_disable_stamp) +
+ multiplier * blackhole_timeout * HZ;
+
+ return time_before(jiffies, timeout);
+}
+
+/* Enable active MPTCP and reset active_disable_times if needed */
+void mptcp_active_enable(struct sock *sk)
+{
+ struct mptcp_pernet *pernet = mptcp_get_pernet(sock_net(sk));
+
+ if (atomic_read(&pernet->active_disable_times)) {
+ struct dst_entry *dst = sk_dst_get(sk);
+
+ if (dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK))
+ atomic_set(&pernet->active_disable_times, 0);
+ }
+}
+
+/* Check the number of retransmissions, and fallback to TCP if needed */
+void mptcp_active_detect_blackhole(struct sock *ssk, bool expired)
+{
+ struct mptcp_subflow_context *subflow;
+ u32 timeouts;
+
+ if (!sk_is_mptcp(ssk))
+ return;
+
+ timeouts = inet_csk(ssk)->icsk_retransmits;
+ subflow = mptcp_subflow_ctx(ssk);
+
+ if (subflow->request_mptcp && ssk->sk_state == TCP_SYN_SENT) {
+ if (timeouts == 2 || (timeouts < 2 && expired)) {
+ MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPCAPABLEACTIVEDROP);
+ subflow->mpc_drop = 1;
+ mptcp_subflow_early_fallback(mptcp_sk(subflow->conn), subflow);
+ } else {
+ subflow->mpc_drop = 0;
+ }
+ }
+}
+
static int __net_init mptcp_net_init(struct net *net)
{
struct mptcp_pernet *pernet = mptcp_get_pernet(net);
diff --git a/net/mptcp/mib.c b/net/mptcp/mib.c
index ec0d461cb921..38c2efc82b94 100644
--- a/net/mptcp/mib.c
+++ b/net/mptcp/mib.c
@@ -15,6 +15,8 @@ static const struct snmp_mib mptcp_snmp_list[] = {
SNMP_MIB_ITEM("MPCapableACKRX", MPTCP_MIB_MPCAPABLEPASSIVEACK),
SNMP_MIB_ITEM("MPCapableFallbackACK", MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK),
SNMP_MIB_ITEM("MPCapableFallbackSYNACK", MPTCP_MIB_MPCAPABLEACTIVEFALLBACK),
+ SNMP_MIB_ITEM("MPCapableSYNTXDrop", MPTCP_MIB_MPCAPABLEACTIVEDROP),
+ SNMP_MIB_ITEM("MPCapableSYNTXDisabled", MPTCP_MIB_MPCAPABLEACTIVEDISABLED),
SNMP_MIB_ITEM("MPFallbackTokenInit", MPTCP_MIB_TOKENFALLBACKINIT),
SNMP_MIB_ITEM("MPTCPRetrans", MPTCP_MIB_RETRANSSEGS),
SNMP_MIB_ITEM("MPJoinNoTokenFound", MPTCP_MIB_JOINNOTOKEN),
@@ -73,6 +75,7 @@ static const struct snmp_mib mptcp_snmp_list[] = {
SNMP_MIB_ITEM("RcvWndConflictUpdate", MPTCP_MIB_RCVWNDCONFLICTUPDATE),
SNMP_MIB_ITEM("RcvWndConflict", MPTCP_MIB_RCVWNDCONFLICT),
SNMP_MIB_ITEM("MPCurrEstab", MPTCP_MIB_CURRESTAB),
+ SNMP_MIB_ITEM("Blackhole", MPTCP_MIB_BLACKHOLE),
SNMP_MIB_SENTINEL
};
diff --git a/net/mptcp/mib.h b/net/mptcp/mib.h
index d68136f93dac..c8ffe18a8722 100644
--- a/net/mptcp/mib.h
+++ b/net/mptcp/mib.h
@@ -10,6 +10,8 @@ enum linux_mptcp_mib_field {
MPTCP_MIB_MPCAPABLEPASSIVEACK, /* Received third ACK with MP_CAPABLE */
MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK,/* Server-side fallback during 3-way handshake */
MPTCP_MIB_MPCAPABLEACTIVEFALLBACK, /* Client-side fallback during 3-way handshake */
+ MPTCP_MIB_MPCAPABLEACTIVEDROP, /* Client-side fallback due to a MPC drop */
+ MPTCP_MIB_MPCAPABLEACTIVEDISABLED, /* Client-side disabled due to past issues */
MPTCP_MIB_TOKENFALLBACKINIT, /* Could not init/allocate token */
MPTCP_MIB_RETRANSSEGS, /* Segments retransmitted at the MPTCP-level */
MPTCP_MIB_JOINNOTOKEN, /* Received MP_JOIN but the token was not found */
@@ -74,6 +76,7 @@ enum linux_mptcp_mib_field {
*/
MPTCP_MIB_RCVWNDCONFLICT, /* Conflict with while updating msk rcv wnd */
MPTCP_MIB_CURRESTAB, /* Current established MPTCP connections */
+ MPTCP_MIB_BLACKHOLE, /* A blackhole has been detected */
__MPTCP_MIB_MAX
};
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 37ebcb7640eb..c2317919fc14 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -3717,13 +3717,6 @@ static int mptcp_ioctl(struct sock *sk, int cmd, int *karg)
return 0;
}
-static void mptcp_subflow_early_fallback(struct mptcp_sock *msk,
- struct mptcp_subflow_context *subflow)
-{
- subflow->request_mptcp = 0;
- __mptcp_do_fallback(msk);
-}
-
static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct mptcp_subflow_context *subflow;
@@ -3744,9 +3737,14 @@ static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
if (rcu_access_pointer(tcp_sk(ssk)->md5sig_info))
mptcp_subflow_early_fallback(msk, subflow);
#endif
- if (subflow->request_mptcp && mptcp_token_new_connect(ssk)) {
- MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_TOKENFALLBACKINIT);
- mptcp_subflow_early_fallback(msk, subflow);
+ if (subflow->request_mptcp) {
+ if (mptcp_active_should_disable(sk)) {
+ MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPCAPABLEACTIVEDISABLED);
+ mptcp_subflow_early_fallback(msk, subflow);
+ } else if (mptcp_token_new_connect(ssk) < 0) {
+ MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_TOKENFALLBACKINIT);
+ mptcp_subflow_early_fallback(msk, subflow);
+ }
}
WRITE_ONCE(msk->write_seq, subflow->idsn);
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index bf03bff9ac44..74417aae08d0 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -531,7 +531,8 @@ struct mptcp_subflow_context {
valid_csum_seen : 1, /* at least one csum validated */
is_mptfo : 1, /* subflow is doing TFO */
close_event_done : 1, /* has done the post-closed part */
- __unused : 9;
+ mpc_drop : 1, /* the MPC option has been dropped in a rtx */
+ __unused : 8;
bool data_avail;
bool scheduled;
u32 remote_nonce;
@@ -697,6 +698,11 @@ unsigned int mptcp_stale_loss_cnt(const struct net *net);
unsigned int mptcp_close_timeout(const struct sock *sk);
int mptcp_get_pm_type(const struct net *net);
const char *mptcp_get_scheduler(const struct net *net);
+
+void mptcp_active_disable(struct sock *sk);
+bool mptcp_active_should_disable(struct sock *ssk);
+void mptcp_active_enable(struct sock *sk);
+
void mptcp_get_available_schedulers(char *buf, size_t maxlen);
void __mptcp_subflow_fully_established(struct mptcp_sock *msk,
struct mptcp_subflow_context *subflow,
@@ -1215,6 +1221,14 @@ static inline void mptcp_do_fallback(struct sock *ssk)
#define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)\n", __func__, a)
+static inline void mptcp_subflow_early_fallback(struct mptcp_sock *msk,
+ struct mptcp_subflow_context *subflow)
+{
+ pr_fallback(msk);
+ subflow->request_mptcp = 0;
+ __mptcp_do_fallback(msk);
+}
+
static inline bool mptcp_check_infinite_map(struct sk_buff *skb)
{
struct mptcp_ext *mpext;
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index b9b14e75e8c2..1040b3b9696b 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -546,6 +546,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
subflow->mp_capable = 1;
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEACK);
mptcp_finish_connect(sk);
+ mptcp_active_enable(parent);
mptcp_propagate_state(parent, sk, subflow, &mp_opt);
} else if (subflow->request_join) {
u8 hmac[SHA256_DIGEST_SIZE];
@@ -591,6 +592,9 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINPORTSYNACKRX);
}
} else if (mptcp_check_fallback(sk)) {
+ /* It looks like MPTCP is blocked, while TCP is not */
+ if (subflow->mpc_drop)
+ mptcp_active_disable(parent);
fallback:
mptcp_propagate_state(parent, sk, subflow, NULL);
}
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index d2f49db70523..f2f9b75008bb 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -361,8 +361,24 @@ static const u8 besteffort[] = {
static const u8 normal_order[] = {0, 1, 2, 3, 4, 5, 6, 7};
static const u8 bulk_order[] = {1, 0, 2, 3};
+/* There is a big difference in timing between the accurate values placed in the
+ * cache and the approximations given by a single Newton step for small count
+ * values, particularly when stepping from count 1 to 2 or vice versa. Hence,
+ * these values are calculated using eight Newton steps, using the
+ * implementation below. Above 16, a single Newton step gives sufficient
+ * accuracy in either direction, given the precision stored.
+ *
+ * The magnitude of the error when stepping up to count 2 is such as to give the
+ * value that *should* have been produced at count 4.
+ */
+
#define REC_INV_SQRT_CACHE (16)
-static u32 cobalt_rec_inv_sqrt_cache[REC_INV_SQRT_CACHE] = {0};
+static const u32 inv_sqrt_cache[REC_INV_SQRT_CACHE] = {
+ ~0, ~0, 3037000500, 2479700525,
+ 2147483647, 1920767767, 1753413056, 1623345051,
+ 1518500250, 1431655765, 1358187914, 1294981364,
+ 1239850263, 1191209601, 1147878294, 1108955788
+};
/* http://en.wikipedia.org/wiki/Methods_of_computing_square_roots
* new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2)
@@ -388,47 +404,14 @@ static void cobalt_newton_step(struct cobalt_vars *vars)
static void cobalt_invsqrt(struct cobalt_vars *vars)
{
if (vars->count < REC_INV_SQRT_CACHE)
- vars->rec_inv_sqrt = cobalt_rec_inv_sqrt_cache[vars->count];
+ vars->rec_inv_sqrt = inv_sqrt_cache[vars->count];
else
cobalt_newton_step(vars);
}
-/* There is a big difference in timing between the accurate values placed in
- * the cache and the approximations given by a single Newton step for small
- * count values, particularly when stepping from count 1 to 2 or vice versa.
- * Above 16, a single Newton step gives sufficient accuracy in either
- * direction, given the precision stored.
- *
- * The magnitude of the error when stepping up to count 2 is such as to give
- * the value that *should* have been produced at count 4.
- */
-
-static void cobalt_cache_init(void)
-{
- struct cobalt_vars v;
-
- memset(&v, 0, sizeof(v));
- v.rec_inv_sqrt = ~0U;
- cobalt_rec_inv_sqrt_cache[0] = v.rec_inv_sqrt;
-
- for (v.count = 1; v.count < REC_INV_SQRT_CACHE; v.count++) {
- cobalt_newton_step(&v);
- cobalt_newton_step(&v);
- cobalt_newton_step(&v);
- cobalt_newton_step(&v);
-
- cobalt_rec_inv_sqrt_cache[v.count] = v.rec_inv_sqrt;
- }
-}
-
static void cobalt_vars_init(struct cobalt_vars *vars)
{
memset(vars, 0, sizeof(*vars));
-
- if (!cobalt_rec_inv_sqrt_cache[0]) {
- cobalt_cache_init();
- cobalt_rec_inv_sqrt_cache[0] = ~0;
- }
}
/* CoDel control_law is t + interval/sqrt(count)
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index 2adb92b8c469..1dd362326c0a 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -887,9 +887,6 @@ int smc_pnet_net_init(struct net *net)
smc_pnet_create_pnetids_list(net);
- /* disable handshake limitation by default */
- net->smc.limit_smc_hs = 0;
-
return 0;
}
diff --git a/net/smc/smc_sysctl.c b/net/smc/smc_sysctl.c
index 13f2bc092db1..2fab6456f765 100644
--- a/net/smc/smc_sysctl.c
+++ b/net/smc/smc_sysctl.c
@@ -90,6 +90,15 @@ static struct ctl_table smc_table[] = {
.extra1 = &conns_per_lgr_min,
.extra2 = &conns_per_lgr_max,
},
+ {
+ .procname = "limit_smc_hs",
+ .data = &init_net.smc.limit_smc_hs,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
};
int __net_init smc_sysctl_net_init(struct net *net)
@@ -121,6 +130,8 @@ int __net_init smc_sysctl_net_init(struct net *net)
WRITE_ONCE(net->smc.sysctl_rmem, net_smc_rmem_init);
net->smc.sysctl_max_links_per_lgr = SMC_LINKS_PER_LGR_MAX_PREFER;
net->smc.sysctl_max_conns_per_lgr = SMC_CONN_PER_LGR_PREFER;
+ /* disable handshake limitation by default */
+ net->smc.limit_smc_hs = 0;
return 0;
diff --git a/net/socket.c b/net/socket.c
index 0a2bd22ec105..8d8b84fa404a 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -946,11 +946,17 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
memset(&tss, 0, sizeof(tss));
tsflags = READ_ONCE(sk->sk_tsflags);
- if ((tsflags & SOF_TIMESTAMPING_SOFTWARE) &&
+ if ((tsflags & SOF_TIMESTAMPING_SOFTWARE &&
+ (tsflags & SOF_TIMESTAMPING_RX_SOFTWARE ||
+ skb_is_err_queue(skb) ||
+ !(tsflags & SOF_TIMESTAMPING_OPT_RX_FILTER))) &&
ktime_to_timespec64_cond(skb->tstamp, tss.ts + 0))
empty = 0;
if (shhwtstamps &&
- (tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
+ (tsflags & SOF_TIMESTAMPING_RAW_HARDWARE &&
+ (tsflags & SOF_TIMESTAMPING_RX_HARDWARE ||
+ skb_is_err_queue(skb) ||
+ !(tsflags & SOF_TIMESTAMPING_OPT_RX_FILTER))) &&
!skb_is_swtx_tstamp(skb, false_tstamp)) {
if_index = 0;
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP_NETDEV)
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index a1894019ebd5..001ccc55ef0f 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2654,51 +2654,52 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state)
static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
int flags, int copied)
{
+ struct sk_buff *read_skb = NULL, *unread_skb = NULL;
struct unix_sock *u = unix_sk(sk);
- if (!unix_skb_len(skb)) {
- struct sk_buff *unlinked_skb = NULL;
+ if (likely(unix_skb_len(skb) && skb != READ_ONCE(u->oob_skb)))
+ return skb;
- spin_lock(&sk->sk_receive_queue.lock);
+ spin_lock(&sk->sk_receive_queue.lock);
+ if (!unix_skb_len(skb)) {
if (copied && (!u->oob_skb || skb == u->oob_skb)) {
skb = NULL;
} else if (flags & MSG_PEEK) {
skb = skb_peek_next(skb, &sk->sk_receive_queue);
} else {
- unlinked_skb = skb;
+ read_skb = skb;
skb = skb_peek_next(skb, &sk->sk_receive_queue);
- __skb_unlink(unlinked_skb, &sk->sk_receive_queue);
+ __skb_unlink(read_skb, &sk->sk_receive_queue);
}
- spin_unlock(&sk->sk_receive_queue.lock);
+ if (!skb)
+ goto unlock;
+ }
- consume_skb(unlinked_skb);
- } else {
- struct sk_buff *unlinked_skb = NULL;
+ if (skb != u->oob_skb)
+ goto unlock;
- spin_lock(&sk->sk_receive_queue.lock);
+ if (copied) {
+ skb = NULL;
+ } else if (!(flags & MSG_PEEK)) {
+ WRITE_ONCE(u->oob_skb, NULL);
- if (skb == u->oob_skb) {
- if (copied) {
- skb = NULL;
- } else if (!(flags & MSG_PEEK)) {
- WRITE_ONCE(u->oob_skb, NULL);
-
- if (!sock_flag(sk, SOCK_URGINLINE)) {
- __skb_unlink(skb, &sk->sk_receive_queue);
- unlinked_skb = skb;
- skb = skb_peek(&sk->sk_receive_queue);
- }
- } else if (!sock_flag(sk, SOCK_URGINLINE)) {
- skb = skb_peek_next(skb, &sk->sk_receive_queue);
- }
+ if (!sock_flag(sk, SOCK_URGINLINE)) {
+ __skb_unlink(skb, &sk->sk_receive_queue);
+ unread_skb = skb;
+ skb = skb_peek(&sk->sk_receive_queue);
}
+ } else if (!sock_flag(sk, SOCK_URGINLINE)) {
+ skb = skb_peek_next(skb, &sk->sk_receive_queue);
+ }
- spin_unlock(&sk->sk_receive_queue.lock);
+unlock:
+ spin_unlock(&sk->sk_receive_queue.lock);
+
+ consume_skb(read_skb);
+ kfree_skb(unread_skb);
- kfree_skb(unlinked_skb);
- }
return skb;
}
#endif
@@ -3175,9 +3176,13 @@ static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
skb = skb_peek(&sk->sk_receive_queue);
if (skb) {
struct sk_buff *oob_skb = READ_ONCE(u->oob_skb);
+ struct sk_buff *next_skb;
+
+ next_skb = skb_peek_next(skb, &sk->sk_receive_queue);
if (skb == oob_skb ||
- (!oob_skb && !unix_skb_len(skb)))
+ (!unix_skb_len(skb) &&
+ (!oob_skb || next_skb == oob_skb)))
answ = 1;
}
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 41c8c0e3ba2e..3b3e3cd7027a 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -170,6 +170,12 @@ static inline int for_each_rdev_check_rtnl(void)
if (for_each_rdev_check_rtnl()) {} else \
list_for_each_entry(rdev, &cfg80211_rdev_list, list)
+enum bss_source_type {
+ BSS_SOURCE_DIRECT = 0,
+ BSS_SOURCE_MBSSID,
+ BSS_SOURCE_STA_PROFILE,
+};
+
struct cfg80211_internal_bss {
struct list_head list;
struct list_head hidden_list;
@@ -191,6 +197,8 @@ struct cfg80211_internal_bss {
*/
u8 parent_bssid[ETH_ALEN] __aligned(2);
+ enum bss_source_type bss_source;
+
/* must be last because of priv member */
struct cfg80211_bss pub;
};
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 34e5acff3935..1e3ed29f7cfc 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -94,7 +94,7 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
lockdep_assert_held(&rdev->wiphy.mtx);
- if (wdev->cac_started)
+ if (wdev->links[0].cac_started)
return -EBUSY;
if (wdev->u.ibss.ssid_len)
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index aaca65b66af4..2c6654075ca9 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -127,7 +127,7 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
if (!rdev->ops->join_mesh)
return -EOPNOTSUPP;
- if (wdev->cac_started)
+ if (wdev->links[0].cac_started)
return -EBUSY;
if (!setup->chandef.chan) {
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 4052041a19ea..4dac81854721 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -1110,26 +1110,28 @@ EXPORT_SYMBOL(__cfg80211_radar_event);
void cfg80211_cac_event(struct net_device *netdev,
const struct cfg80211_chan_def *chandef,
- enum nl80211_radar_event event, gfp_t gfp)
+ enum nl80211_radar_event event, gfp_t gfp,
+ unsigned int link_id)
{
struct wireless_dev *wdev = netdev->ieee80211_ptr;
struct wiphy *wiphy = wdev->wiphy;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
unsigned long timeout;
- /* not yet supported */
- if (wdev->valid_links)
+ if (WARN_ON(wdev->valid_links &&
+ !(wdev->valid_links & BIT(link_id))))
return;
- trace_cfg80211_cac_event(netdev, event);
+ trace_cfg80211_cac_event(netdev, event, link_id);
- if (WARN_ON(!wdev->cac_started && event != NL80211_RADAR_CAC_STARTED))
+ if (WARN_ON(!wdev->links[link_id].cac_started &&
+ event != NL80211_RADAR_CAC_STARTED))
return;
switch (event) {
case NL80211_RADAR_CAC_FINISHED:
- timeout = wdev->cac_start_time +
- msecs_to_jiffies(wdev->cac_time_ms);
+ timeout = wdev->links[link_id].cac_start_time +
+ msecs_to_jiffies(wdev->links[link_id].cac_time_ms);
WARN_ON(!time_after_eq(jiffies, timeout));
cfg80211_set_dfs_state(wiphy, chandef, NL80211_DFS_AVAILABLE);
memcpy(&rdev->cac_done_chandef, chandef,
@@ -1138,10 +1140,10 @@ void cfg80211_cac_event(struct net_device *netdev,
cfg80211_sched_dfs_chan_update(rdev);
fallthrough;
case NL80211_RADAR_CAC_ABORTED:
- wdev->cac_started = false;
+ wdev->links[link_id].cac_started = false;
break;
case NL80211_RADAR_CAC_STARTED:
- wdev->cac_started = true;
+ wdev->links[link_id].cac_started = true;
break;
default:
WARN_ON(1);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index b368e23847dd..9ab777e0bd4d 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -6066,7 +6066,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
if (!rdev->ops->start_ap)
return -EOPNOTSUPP;
- if (wdev->cac_started)
+ if (wdev->links[link_id].cac_started)
return -EBUSY;
if (wdev->links[link_id].ap.beacon_interval)
@@ -9778,7 +9778,8 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
return ERR_PTR(-ENOMEM);
if (n_ssids)
- request->ssids = (void *)&request->channels[n_channels];
+ request->ssids = (void *)request +
+ struct_size(request, channels, n_channels);
request->n_ssids = n_ssids;
if (ie_len) {
if (n_ssids)
@@ -10072,6 +10073,7 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct wireless_dev *wdev = dev->ieee80211_ptr;
+ int link_id = nl80211_link_id(info->attrs);
struct wiphy *wiphy = wdev->wiphy;
struct cfg80211_chan_def chandef;
enum nl80211_dfs_regions dfs_region;
@@ -10121,7 +10123,20 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
goto unlock;
}
- if (cfg80211_beaconing_iface_active(wdev) || wdev->cac_started) {
+ if (cfg80211_beaconing_iface_active(wdev)) {
+ /* During MLO other link(s) can beacon, only the current link
+ * can not already beacon
+ */
+ if (wdev->valid_links &&
+ !wdev->links[link_id].ap.beacon_interval) {
+ /* nothing */
+ } else {
+ err = -EBUSY;
+ goto unlock;
+ }
+ }
+
+ if (wdev->links[link_id].cac_started) {
err = -EBUSY;
goto unlock;
}
@@ -10141,7 +10156,8 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
if (WARN_ON(!cac_time_ms))
cac_time_ms = IEEE80211_DFS_MIN_CAC_TIME_MS;
- err = rdev_start_radar_detection(rdev, dev, &chandef, cac_time_ms);
+ err = rdev_start_radar_detection(rdev, dev, &chandef, cac_time_ms,
+ link_id);
if (!err) {
switch (wdev->iftype) {
case NL80211_IFTYPE_AP:
@@ -10157,9 +10173,9 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
default:
break;
}
- wdev->cac_started = true;
- wdev->cac_start_time = jiffies;
- wdev->cac_time_ms = cac_time_ms;
+ wdev->links[link_id].cac_started = true;
+ wdev->links[link_id].cac_start_time = jiffies;
+ wdev->links[link_id].cac_time_ms = cac_time_ms;
}
unlock:
wiphy_unlock(wiphy);
@@ -10507,17 +10523,21 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
NL80211_BSS_CHAIN_SIGNAL))
goto nla_put_failure;
- switch (rdev->wiphy.signal_type) {
- case CFG80211_SIGNAL_TYPE_MBM:
- if (nla_put_u32(msg, NL80211_BSS_SIGNAL_MBM, res->signal))
- goto nla_put_failure;
- break;
- case CFG80211_SIGNAL_TYPE_UNSPEC:
- if (nla_put_u8(msg, NL80211_BSS_SIGNAL_UNSPEC, res->signal))
- goto nla_put_failure;
- break;
- default:
- break;
+ if (intbss->bss_source != BSS_SOURCE_STA_PROFILE) {
+ switch (rdev->wiphy.signal_type) {
+ case CFG80211_SIGNAL_TYPE_MBM:
+ if (nla_put_u32(msg, NL80211_BSS_SIGNAL_MBM,
+ res->signal))
+ goto nla_put_failure;
+ break;
+ case CFG80211_SIGNAL_TYPE_UNSPEC:
+ if (nla_put_u8(msg, NL80211_BSS_SIGNAL_UNSPEC,
+ res->signal))
+ goto nla_put_failure;
+ break;
+ default:
+ break;
+ }
}
switch (wdev->iftype) {
@@ -16511,10 +16531,10 @@ nl80211_set_ttlm(struct sk_buff *skb, struct genl_info *info)
SELECTOR(__sel, NETDEV_UP_NOTMX, \
NL80211_FLAG_NEED_NETDEV_UP | \
NL80211_FLAG_NO_WIPHY_MTX) \
- SELECTOR(__sel, NETDEV_UP_NOTMX_NOMLO, \
+ SELECTOR(__sel, NETDEV_UP_NOTMX_MLO, \
NL80211_FLAG_NEED_NETDEV_UP | \
NL80211_FLAG_NO_WIPHY_MTX | \
- NL80211_FLAG_MLO_UNSUPPORTED) \
+ NL80211_FLAG_MLO_VALID_LINK_ID) \
SELECTOR(__sel, NETDEV_UP_CLEAR, \
NL80211_FLAG_NEED_NETDEV_UP | \
NL80211_FLAG_CLEAR_SKB) \
@@ -17409,7 +17429,7 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.flags = GENL_UNS_ADMIN_PERM,
.internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NO_WIPHY_MTX |
- NL80211_FLAG_MLO_UNSUPPORTED),
+ NL80211_FLAG_MLO_VALID_LINK_ID),
},
{
.cmd = NL80211_CMD_GET_PROTOCOL_FEATURES,
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index ec3f4aa1c807..f5adbf6b5c84 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -1200,26 +1200,27 @@ static inline int
rdev_start_radar_detection(struct cfg80211_registered_device *rdev,
struct net_device *dev,
struct cfg80211_chan_def *chandef,
- u32 cac_time_ms)
+ u32 cac_time_ms, int link_id)
{
int ret = -EOPNOTSUPP;
trace_rdev_start_radar_detection(&rdev->wiphy, dev, chandef,
- cac_time_ms);
+ cac_time_ms, link_id);
if (rdev->ops->start_radar_detection)
ret = rdev->ops->start_radar_detection(&rdev->wiphy, dev,
- chandef, cac_time_ms);
+ chandef, cac_time_ms,
+ link_id);
trace_rdev_return_int(&rdev->wiphy, ret);
return ret;
}
static inline void
rdev_end_cac(struct cfg80211_registered_device *rdev,
- struct net_device *dev)
+ struct net_device *dev, unsigned int link_id)
{
- trace_rdev_end_cac(&rdev->wiphy, dev);
+ trace_rdev_end_cac(&rdev->wiphy, dev, link_id);
if (rdev->ops->end_cac)
- rdev->ops->end_cac(&rdev->wiphy, dev);
+ rdev->ops->end_cac(&rdev->wiphy, dev, link_id);
trace_rdev_return_void(&rdev->wiphy);
}
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 4a27f3823e25..6489ba943a63 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -4229,6 +4229,8 @@ EXPORT_SYMBOL(regulatory_pre_cac_allowed);
static void cfg80211_check_and_end_cac(struct cfg80211_registered_device *rdev)
{
struct wireless_dev *wdev;
+ unsigned int link_id;
+
/* If we finished CAC or received radar, we should end any
* CAC running on the same channels.
* the check !cfg80211_chandef_dfs_usable contain 2 options:
@@ -4241,16 +4243,17 @@ static void cfg80211_check_and_end_cac(struct cfg80211_registered_device *rdev)
list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
struct cfg80211_chan_def *chandef;
- if (!wdev->cac_started)
- continue;
+ for_each_valid_link(wdev, link_id) {
+ if (!wdev->links[link_id].cac_started)
+ continue;
- /* FIXME: radar detection is tied to link 0 for now */
- chandef = wdev_chandef(wdev, 0);
- if (!chandef)
- continue;
+ chandef = wdev_chandef(wdev, link_id);
+ if (!chandef)
+ continue;
- if (!cfg80211_chandef_dfs_usable(&rdev->wiphy, chandef))
- rdev_end_cac(rdev, wdev->netdev);
+ if (!cfg80211_chandef_dfs_usable(&rdev->wiphy, chandef))
+ rdev_end_cac(rdev, wdev->netdev, link_id);
+ }
}
}
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 64eeed82d43d..59a90bf3c0d6 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -1910,6 +1910,7 @@ cfg80211_update_known_bss(struct cfg80211_registered_device *rdev,
known->pub.bssid_index = new->pub.bssid_index;
known->pub.use_for &= new->pub.use_for;
known->pub.cannot_use_reasons = new->pub.cannot_use_reasons;
+ known->bss_source = new->bss_source;
return true;
}
@@ -2008,10 +2009,10 @@ __cfg80211_bss_update(struct cfg80211_registered_device *rdev,
return found;
free_ies:
- ies = (void *)rcu_dereference(tmp->pub.beacon_ies);
+ ies = (void *)rcu_access_pointer(tmp->pub.beacon_ies);
if (ies)
kfree_rcu(ies, rcu_head);
- ies = (void *)rcu_dereference(tmp->pub.proberesp_ies);
+ ies = (void *)rcu_access_pointer(tmp->pub.proberesp_ies);
if (ies)
kfree_rcu(ies, rcu_head);
@@ -2149,11 +2150,7 @@ struct cfg80211_inform_single_bss_data {
const u8 *ie;
size_t ielen;
- enum {
- BSS_SOURCE_DIRECT = 0,
- BSS_SOURCE_MBSSID,
- BSS_SOURCE_STA_PROFILE,
- } bss_source;
+ enum bss_source_type bss_source;
/* Set if reporting bss_source != BSS_SOURCE_DIRECT */
struct cfg80211_bss *source_bss;
u8 max_bssid_indicator;
@@ -2268,6 +2265,7 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
IEEE80211_MAX_CHAINS);
tmp.pub.use_for = data->use_for;
tmp.pub.cannot_use_reasons = data->cannot_use_reasons;
+ tmp.bss_source = data->bss_source;
switch (data->bss_source) {
case BSS_SOURCE_MBSSID:
@@ -2907,6 +2905,9 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
struct element *reporter_rnr = NULL;
struct ieee80211_multi_link_elem *ml_elem;
struct cfg80211_mle *mle;
+ const struct element *ssid_elem;
+ const u8 *ssid = NULL;
+ size_t ssid_len = 0;
u16 control;
u8 ml_common_len;
u8 *new_ie = NULL;
@@ -2961,6 +2962,13 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
bss_change_count,
gfp);
+ ssid_elem = cfg80211_find_elem(WLAN_EID_SSID, tx_data->ie,
+ tx_data->ielen);
+ if (ssid_elem) {
+ ssid = ssid_elem->data;
+ ssid_len = ssid_elem->datalen;
+ }
+
for (i = 0; i < ARRAY_SIZE(mle->sta_prof) && mle->sta_prof[i]; i++) {
const struct ieee80211_neighbor_ap_info *ap_info;
enum nl80211_band band;
@@ -3042,6 +3050,23 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
freq = ieee80211_channel_to_freq_khz(ap_info->channel, band);
data.channel = ieee80211_get_channel_khz(wiphy, freq);
+ /* Skip if BSS entry generated from MBSSID or DIRECT source
+ * frame data available already.
+ */
+ bss = cfg80211_get_bss(wiphy, data.channel, data.bssid, ssid,
+ ssid_len, IEEE80211_BSS_TYPE_ANY,
+ IEEE80211_PRIVACY_ANY);
+ if (bss) {
+ struct cfg80211_internal_bss *ibss = bss_from_pub(bss);
+
+ if (data.capability == bss->capability &&
+ ibss->bss_source != BSS_SOURCE_STA_PROFILE) {
+ cfg80211_put_bss(wiphy, bss);
+ continue;
+ }
+ cfg80211_put_bss(wiphy, bss);
+ }
+
if (use_for == NL80211_BSS_USE_FOR_MLD_LINK &&
!(wiphy->flags & WIPHY_FLAG_SUPPORTS_NSTR_NONPRIMARY)) {
use_for = 0;
@@ -3467,8 +3492,8 @@ int cfg80211_wext_siwscan(struct net_device *dev,
n_channels = ieee80211_get_num_supported_channels(wiphy);
}
- creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) +
- n_channels * sizeof(void *),
+ creq = kzalloc(struct_size(creq, channels, n_channels) +
+ sizeof(struct cfg80211_ssid),
GFP_ATOMIC);
if (!creq)
return -ENOMEM;
@@ -3476,7 +3501,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,
creq->wiphy = wiphy;
creq->wdev = dev->ieee80211_ptr;
/* SSIDs come after channels */
- creq->ssids = (void *)&creq->channels[n_channels];
+ creq->ssids = (void *)creq + struct_size(creq, channels, n_channels);
creq->n_channels = n_channels;
creq->n_ssids = 1;
creq->scan_start = jiffies;
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index d9d7bf8bb5c1..431da30817a6 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -115,7 +115,8 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
n_channels = i;
}
request->n_channels = n_channels;
- request->ssids = (void *)&request->channels[n_channels];
+ request->ssids = (void *)request +
+ struct_size(request, channels, n_channels);
request->n_ssids = 1;
memcpy(request->ssids[0].ssid, wdev->conn->params.ssid,
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 5c26f065bd68..97c21b627791 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -805,9 +805,22 @@ DEFINE_EVENT(wiphy_netdev_evt, rdev_flush_pmksa,
TP_ARGS(wiphy, netdev)
);
-DEFINE_EVENT(wiphy_netdev_evt, rdev_end_cac,
- TP_PROTO(struct wiphy *wiphy, struct net_device *netdev),
- TP_ARGS(wiphy, netdev)
+TRACE_EVENT(rdev_end_cac,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ unsigned int link_id),
+ TP_ARGS(wiphy, netdev, link_id),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ __field(unsigned int, link_id)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ __entry->link_id = link_id;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", link_id: %d",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->link_id)
);
DECLARE_EVENT_CLASS(station_add_change,
@@ -2652,24 +2665,26 @@ TRACE_EVENT(rdev_external_auth,
TRACE_EVENT(rdev_start_radar_detection,
TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
struct cfg80211_chan_def *chandef,
- u32 cac_time_ms),
- TP_ARGS(wiphy, netdev, chandef, cac_time_ms),
+ u32 cac_time_ms, int link_id),
+ TP_ARGS(wiphy, netdev, chandef, cac_time_ms, link_id),
TP_STRUCT__entry(
WIPHY_ENTRY
NETDEV_ENTRY
CHAN_DEF_ENTRY
__field(u32, cac_time_ms)
+ __field(int, link_id)
),
TP_fast_assign(
WIPHY_ASSIGN;
NETDEV_ASSIGN;
CHAN_DEF_ASSIGN(chandef);
__entry->cac_time_ms = cac_time_ms;
+ __entry->link_id = link_id;
),
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT
- ", cac_time_ms=%u",
+ ", cac_time_ms=%u, link_id=%d",
WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG,
- __entry->cac_time_ms)
+ __entry->cac_time_ms, __entry->link_id)
);
TRACE_EVENT(rdev_set_mcast_rate,
@@ -3483,18 +3498,21 @@ TRACE_EVENT(cfg80211_radar_event,
);
TRACE_EVENT(cfg80211_cac_event,
- TP_PROTO(struct net_device *netdev, enum nl80211_radar_event evt),
- TP_ARGS(netdev, evt),
+ TP_PROTO(struct net_device *netdev, enum nl80211_radar_event evt,
+ unsigned int link_id),
+ TP_ARGS(netdev, evt, link_id),
TP_STRUCT__entry(
NETDEV_ENTRY
__field(enum nl80211_radar_event, evt)
+ __field(unsigned int, link_id)
),
TP_fast_assign(
NETDEV_ASSIGN;
__entry->evt = evt;
+ __entry->link_id = link_id;
),
- TP_printk(NETDEV_PR_FMT ", event: %d",
- NETDEV_PR_ARG, __entry->evt)
+ TP_printk(NETDEV_PR_FMT ", event: %d, link_id=%u",
+ NETDEV_PR_ARG, __entry->evt, __entry->link_id)
);
DECLARE_EVENT_CLASS(cfg80211_rx_evt,
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index 9a44d363ba62..f123b7c9ec82 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -328,12 +328,8 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
/* User explicitly requested packet offload mode and configured
* policy in addition to the XFRM state. So be civil to users,
* and return an error instead of taking fallback path.
- *
- * This WARN_ON() can be seen as a documentation for driver
- * authors to do not return -EOPNOTSUPP in packet offload mode.
*/
- WARN_ON(err == -EOPNOTSUPP && is_packet_offload);
- if (err != -EOPNOTSUPP || is_packet_offload) {
+ if ((err != -EOPNOTSUPP && !is_packet_offload) || is_packet_offload) {
NL_SET_ERR_MSG_WEAK(extack, "Device failed to offload this state");
return err;
}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index b22767c0c078..914bac03b52a 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -110,7 +110,11 @@ struct xfrm_pol_inexact_node {
* 4. saddr:any list from saddr tree
*
* This result set then needs to be searched for the policy with
- * the lowest priority. If two results have same prio, youngest one wins.
+ * the lowest priority. If two candidates have the same priority, the
+ * struct xfrm_policy pos member with the lower number is used.
+ *
+ * This replicates previous single-list-search algorithm which would
+ * return first matching policy in the (ordered-by-priority) list.
*/
struct xfrm_pol_inexact_key {
@@ -197,8 +201,6 @@ xfrm_policy_inexact_lookup_rcu(struct net *net,
static struct xfrm_policy *
xfrm_policy_insert_list(struct hlist_head *chain, struct xfrm_policy *policy,
bool excl);
-static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
- struct xfrm_policy *policy);
static bool
xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
@@ -411,7 +413,6 @@ struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
if (policy) {
write_pnet(&policy->xp_net, net);
INIT_LIST_HEAD(&policy->walk.all);
- INIT_HLIST_NODE(&policy->bydst_inexact_list);
INIT_HLIST_NODE(&policy->bydst);
INIT_HLIST_NODE(&policy->byidx);
rwlock_init(&policy->lock);
@@ -1229,26 +1230,31 @@ xfrm_policy_inexact_insert(struct xfrm_policy *policy, u8 dir, int excl)
return ERR_PTR(-EEXIST);
}
- chain = &net->xfrm.policy_inexact[dir];
- xfrm_policy_insert_inexact_list(chain, policy);
-
if (delpol)
__xfrm_policy_inexact_prune_bin(bin, false);
return delpol;
}
+static bool xfrm_policy_is_dead_or_sk(const struct xfrm_policy *policy)
+{
+ int dir;
+
+ if (policy->walk.dead)
+ return true;
+
+ dir = xfrm_policy_id2dir(policy->index);
+ return dir >= XFRM_POLICY_MAX;
+}
+
static void xfrm_hash_rebuild(struct work_struct *work)
{
struct net *net = container_of(work, struct net,
xfrm.policy_hthresh.work);
- unsigned int hmask;
struct xfrm_policy *pol;
struct xfrm_policy *policy;
struct hlist_head *chain;
- struct hlist_head *odst;
struct hlist_node *newpos;
- int i;
int dir;
unsigned seq;
u8 lbits4, rbits4, lbits6, rbits6;
@@ -1275,13 +1281,10 @@ static void xfrm_hash_rebuild(struct work_struct *work)
struct xfrm_pol_inexact_bin *bin;
u8 dbits, sbits;
- if (policy->walk.dead)
+ if (xfrm_policy_is_dead_or_sk(policy))
continue;
dir = xfrm_policy_id2dir(policy->index);
- if (dir >= XFRM_POLICY_MAX)
- continue;
-
if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
if (policy->family == AF_INET) {
dbits = rbits4;
@@ -1312,23 +1315,7 @@ static void xfrm_hash_rebuild(struct work_struct *work)
goto out_unlock;
}
- /* reset the bydst and inexact table in all directions */
for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
- struct hlist_node *n;
-
- hlist_for_each_entry_safe(policy, n,
- &net->xfrm.policy_inexact[dir],
- bydst_inexact_list) {
- hlist_del_rcu(&policy->bydst);
- hlist_del_init(&policy->bydst_inexact_list);
- }
-
- hmask = net->xfrm.policy_bydst[dir].hmask;
- odst = net->xfrm.policy_bydst[dir].table;
- for (i = hmask; i >= 0; i--) {
- hlist_for_each_entry_safe(policy, n, odst + i, bydst)
- hlist_del_rcu(&policy->bydst);
- }
if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
/* dir out => dst = remote, src = local */
net->xfrm.policy_bydst[dir].dbits4 = rbits4;
@@ -1346,14 +1333,13 @@ static void xfrm_hash_rebuild(struct work_struct *work)
/* re-insert all policies by order of creation */
list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
- if (policy->walk.dead)
+ if (xfrm_policy_is_dead_or_sk(policy))
continue;
- dir = xfrm_policy_id2dir(policy->index);
- if (dir >= XFRM_POLICY_MAX) {
- /* skip socket policies */
- continue;
- }
+
+ hlist_del_rcu(&policy->bydst);
+
newpos = NULL;
+ dir = xfrm_policy_id2dir(policy->index);
chain = policy_hash_bysel(net, &policy->selector,
policy->family, dir);
@@ -1520,42 +1506,6 @@ static const struct rhashtable_params xfrm_pol_inexact_params = {
.automatic_shrinking = true,
};
-static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
- struct xfrm_policy *policy)
-{
- struct xfrm_policy *pol, *delpol = NULL;
- struct hlist_node *newpos = NULL;
- int i = 0;
-
- hlist_for_each_entry(pol, chain, bydst_inexact_list) {
- if (pol->type == policy->type &&
- pol->if_id == policy->if_id &&
- !selector_cmp(&pol->selector, &policy->selector) &&
- xfrm_policy_mark_match(&policy->mark, pol) &&
- xfrm_sec_ctx_match(pol->security, policy->security) &&
- !WARN_ON(delpol)) {
- delpol = pol;
- if (policy->priority > pol->priority)
- continue;
- } else if (policy->priority >= pol->priority) {
- newpos = &pol->bydst_inexact_list;
- continue;
- }
- if (delpol)
- break;
- }
-
- if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
- hlist_add_behind_rcu(&policy->bydst_inexact_list, newpos);
- else
- hlist_add_head_rcu(&policy->bydst_inexact_list, chain);
-
- hlist_for_each_entry(pol, chain, bydst_inexact_list) {
- pol->pos = i;
- i++;
- }
-}
-
static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain,
struct xfrm_policy *policy,
bool excl)
@@ -2295,10 +2245,52 @@ out:
return pol;
}
+static u32 xfrm_gen_pos_slow(struct net *net)
+{
+ struct xfrm_policy *policy;
+ u32 i = 0;
+
+ /* oldest entry is last in list */
+ list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
+ if (!xfrm_policy_is_dead_or_sk(policy))
+ policy->pos = ++i;
+ }
+
+ return i;
+}
+
+static u32 xfrm_gen_pos(struct net *net)
+{
+ const struct xfrm_policy *policy;
+ u32 i = 0;
+
+ /* most recently added policy is at the head of the list */
+ list_for_each_entry(policy, &net->xfrm.policy_all, walk.all) {
+ if (xfrm_policy_is_dead_or_sk(policy))
+ continue;
+
+ if (policy->pos == UINT_MAX)
+ return xfrm_gen_pos_slow(net);
+
+ i = policy->pos + 1;
+ break;
+ }
+
+ return i;
+}
+
static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
{
struct net *net = xp_net(pol);
+ switch (dir) {
+ case XFRM_POLICY_IN:
+ case XFRM_POLICY_FWD:
+ case XFRM_POLICY_OUT:
+ pol->pos = xfrm_gen_pos(net);
+ break;
+ }
+
list_add(&pol->walk.all, &net->xfrm.policy_all);
net->xfrm.policy_count[dir]++;
xfrm_pol_hold(pol);
@@ -2315,7 +2307,6 @@ static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
/* Socket policies are not hashed. */
if (!hlist_unhashed(&pol->bydst)) {
hlist_del_rcu(&pol->bydst);
- hlist_del_init(&pol->bydst_inexact_list);
hlist_del(&pol->byidx);
}
@@ -4438,63 +4429,50 @@ EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
#endif
#ifdef CONFIG_XFRM_MIGRATE
-static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
- const struct xfrm_selector *sel_tgt)
-{
- if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
- if (sel_tgt->family == sel_cmp->family &&
- xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
- sel_cmp->family) &&
- xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
- sel_cmp->family) &&
- sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
- sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
- return true;
- }
- } else {
- if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
- return true;
- }
- }
- return false;
-}
-
static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
u8 dir, u8 type, struct net *net, u32 if_id)
{
- struct xfrm_policy *pol, *ret = NULL;
- struct hlist_head *chain;
- u32 priority = ~0U;
+ struct xfrm_policy *pol;
+ struct flowi fl;
- spin_lock_bh(&net->xfrm.xfrm_policy_lock);
- chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
- hlist_for_each_entry(pol, chain, bydst) {
- if ((if_id == 0 || pol->if_id == if_id) &&
- xfrm_migrate_selector_match(sel, &pol->selector) &&
- pol->type == type) {
- ret = pol;
- priority = ret->priority;
- break;
- }
- }
- chain = &net->xfrm.policy_inexact[dir];
- hlist_for_each_entry(pol, chain, bydst_inexact_list) {
- if ((pol->priority >= priority) && ret)
- break;
+ memset(&fl, 0, sizeof(fl));
- if ((if_id == 0 || pol->if_id == if_id) &&
- xfrm_migrate_selector_match(sel, &pol->selector) &&
- pol->type == type) {
- ret = pol;
+ fl.flowi_proto = sel->proto;
+
+ switch (sel->family) {
+ case AF_INET:
+ fl.u.ip4.saddr = sel->saddr.a4;
+ fl.u.ip4.daddr = sel->daddr.a4;
+ if (sel->proto == IPSEC_ULPROTO_ANY)
break;
- }
+ fl.u.flowi4_oif = sel->ifindex;
+ fl.u.ip4.fl4_sport = sel->sport;
+ fl.u.ip4.fl4_dport = sel->dport;
+ break;
+ case AF_INET6:
+ fl.u.ip6.saddr = sel->saddr.in6;
+ fl.u.ip6.daddr = sel->daddr.in6;
+ if (sel->proto == IPSEC_ULPROTO_ANY)
+ break;
+ fl.u.flowi6_oif = sel->ifindex;
+ fl.u.ip6.fl4_sport = sel->sport;
+ fl.u.ip6.fl4_dport = sel->dport;
+ break;
+ default:
+ return ERR_PTR(-EAFNOSUPPORT);
}
- xfrm_pol_hold(ret);
+ rcu_read_lock();
- spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ pol = xfrm_policy_lookup_bytype(net, type, &fl, sel->family, dir, if_id);
+ if (IS_ERR_OR_NULL(pol))
+ goto out_unlock;
- return ret;
+ if (!xfrm_pol_hold_rcu(pol))
+ pol = NULL;
+out_unlock:
+ rcu_read_unlock();
+ return pol;
}
static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
@@ -4631,9 +4609,9 @@ int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
/* Stage 1 - find policy */
pol = xfrm_migrate_policy_find(sel, dir, type, net, if_id);
- if (!pol) {
+ if (IS_ERR_OR_NULL(pol)) {
NL_SET_ERR_MSG(extack, "Target policy not found");
- err = -ENOENT;
+ err = IS_ERR(pol) ? PTR_ERR(pol) : -ENOENT;
goto out;
}
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index a5f1c0c27dff..3b7df5477317 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -65,10 +65,11 @@ TARGETS += net/af_unix
TARGETS += net/forwarding
TARGETS += net/hsr
TARGETS += net/mptcp
-TARGETS += net/openvswitch
-TARGETS += net/tcp_ao
TARGETS += net/netfilter
+TARGETS += net/openvswitch
+TARGETS += net/packetdrill
TARGETS += net/rds
+TARGETS += net/tcp_ao
TARGETS += nsfs
TARGETS += perf_events
TARGETS += pidfd
diff --git a/tools/testing/selftests/kselftest/runner.sh b/tools/testing/selftests/kselftest/runner.sh
index 74954f6a8f94..2c3c58e65a41 100644
--- a/tools/testing/selftests/kselftest/runner.sh
+++ b/tools/testing/selftests/kselftest/runner.sh
@@ -111,8 +111,11 @@ run_one()
stdbuf="/usr/bin/stdbuf --output=L "
fi
eval kselftest_cmd_args="\$${kselftest_cmd_args_ref:-}"
- cmd="$stdbuf ./$BASENAME_TEST $kselftest_cmd_args"
- if [ ! -x "$TEST" ]; then
+ if [ -x "$TEST" ]; then
+ cmd="$stdbuf ./$BASENAME_TEST $kselftest_cmd_args"
+ elif [ -x "./ksft_runner.sh" ]; then
+ cmd="$stdbuf ./ksft_runner.sh ./$BASENAME_TEST"
+ else
echo "# Warning: file $TEST is not executable"
if [ $(head -n 1 "$TEST" | cut -c -2) = "#!" ]
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 27362e40eb37..22a5d6a7c3f3 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -56,7 +56,7 @@ TEST_PROGS += ip_local_port_range.sh
TEST_PROGS += rps_default_mask.sh
TEST_PROGS += big_tcp.sh
TEST_PROGS += netns-sysctl.sh
-TEST_PROGS_EXTENDED := toeplitz_client.sh toeplitz.sh
+TEST_PROGS_EXTENDED := toeplitz_client.sh toeplitz.sh xfrm_policy_add_speed.sh
TEST_GEN_FILES = socket nettest
TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any
TEST_GEN_FILES += tcp_mmap tcp_inq psock_snd txring_overwrite
diff --git a/tools/testing/selftests/net/af_unix/msg_oob.c b/tools/testing/selftests/net/af_unix/msg_oob.c
index 535eb2c3d7d1..3ed3882a93b8 100644
--- a/tools/testing/selftests/net/af_unix/msg_oob.c
+++ b/tools/testing/selftests/net/af_unix/msg_oob.c
@@ -525,6 +525,29 @@ TEST_F(msg_oob, ex_oob_drop_2)
}
}
+TEST_F(msg_oob, ex_oob_oob)
+{
+ sendpair("x", 1, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(true);
+
+ recvpair("x", 1, 1, MSG_OOB);
+ epollpair(false);
+ siocatmarkpair(true);
+
+ sendpair("y", 1, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(true);
+
+ recvpair("", -EAGAIN, 1, 0);
+ epollpair(false);
+ siocatmarkpair(false);
+
+ recvpair("", -EINVAL, 1, MSG_OOB);
+ epollpair(false);
+ siocatmarkpair(false);
+}
+
TEST_F(msg_oob, ex_oob_ahead_break)
{
sendpair("hello", 5, MSG_OOB);
diff --git a/tools/testing/selftests/net/mptcp/diag.sh b/tools/testing/selftests/net/mptcp/diag.sh
index 776d43a6922d..2bd0c1eb70c5 100755
--- a/tools/testing/selftests/net/mptcp/diag.sh
+++ b/tools/testing/selftests/net/mptcp/diag.sh
@@ -284,7 +284,7 @@ echo "b" | \
./mptcp_connect -p 10000 -r 0 -t ${timeout_poll} -w 20 \
127.0.0.1 >/dev/null &
wait_connected $ns 10000
-chk_msk_nr 2 "after MPC handshake "
+chk_msk_nr 2 "after MPC handshake"
chk_last_time_info 10000
chk_msk_remote_key_nr 2 "....chk remote_key"
chk_msk_fallback_nr 0 "....chk no fallback"
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
index b77fb7065bfb..57325d57e4c6 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
@@ -345,9 +345,11 @@ do_transfer()
local addr_port
addr_port=$(printf "%s:%d" ${connect_addr} ${port})
- local result_msg
- result_msg="$(printf "%.3s %-5s -> %.3s (%-20s) %-5s" ${connector_ns} ${cl_proto} ${listener_ns} ${addr_port} ${srv_proto})"
- mptcp_lib_print_title "${result_msg}"
+ local pretty_title
+ pretty_title="$(printf "%.3s %-5s -> %.3s (%-20s) %-5s" ${connector_ns} ${cl_proto} ${listener_ns} ${addr_port} ${srv_proto})"
+ mptcp_lib_print_title "${pretty_title}"
+
+ local tap_title="${connector_ns:0:3} ${cl_proto} -> ${listener_ns:0:3} (${addr_port}) ${srv_proto}"
if $capture; then
local capuser
@@ -431,7 +433,6 @@ do_transfer()
local duration
duration=$((stop-start))
- result_msg+=" # time=${duration}ms"
printf "(duration %05sms) " "${duration}"
if [ ${rets} -ne 0 ] || [ ${retc} -ne 0 ]; then
mptcp_lib_pr_fail "client exit code $retc, server $rets"
@@ -444,7 +445,7 @@ do_transfer()
echo
cat "$capout"
- mptcp_lib_result_fail "${TEST_GROUP}: ${result_msg}"
+ mptcp_lib_result_fail "${TEST_GROUP}: ${tap_title}"
return 1
fi
@@ -544,12 +545,12 @@ do_transfer()
if [ $retc -eq 0 ] && [ $rets -eq 0 ]; then
mptcp_lib_pr_ok "${extra:1}"
- mptcp_lib_result_pass "${TEST_GROUP}: ${result_msg}"
+ mptcp_lib_result_pass "${TEST_GROUP}: ${tap_title}"
else
if [ -n "${extra}" ]; then
mptcp_lib_print_warn "${extra:1}"
fi
- mptcp_lib_result_fail "${TEST_GROUP}: ${result_msg}"
+ mptcp_lib_result_fail "${TEST_GROUP}: ${tap_title}"
fi
cat "$capout"
@@ -848,6 +849,8 @@ stop_if_error()
make_file "$cin" "client"
make_file "$sin" "server"
+mptcp_lib_subtests_last_ts_reset
+
check_mptcp_disabled
stop_if_error "The kernel configuration is not valid for MPTCP"
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index 43f8a9bd84c4..3564cd06643c 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -3959,9 +3959,11 @@ if [ ${#tests[@]} -eq 0 ]; then
tests=("${all_tests_names[@]}")
fi
+mptcp_lib_subtests_last_ts_reset
for subtests in "${tests[@]}"; do
"${subtests}"
done
+append_prev_results
if [ ${ret} -ne 0 ]; then
echo
@@ -3972,7 +3974,6 @@ if [ ${ret} -ne 0 ]; then
echo
fi
-append_prev_results
mptcp_lib_result_print_all_tap
exit $ret
diff --git a/tools/testing/selftests/net/mptcp/mptcp_lib.sh b/tools/testing/selftests/net/mptcp/mptcp_lib.sh
index 4578a331041e..975d4d4c862a 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_lib.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_lib.sh
@@ -29,6 +29,7 @@ declare -rx MPTCP_LIB_AF_INET6=10
MPTCP_LIB_SUBTESTS=()
MPTCP_LIB_SUBTESTS_DUPLICATED=0
MPTCP_LIB_SUBTEST_FLAKY=0
+MPTCP_LIB_SUBTESTS_LAST_TS_MS=
MPTCP_LIB_TEST_COUNTER=0
MPTCP_LIB_TEST_FORMAT="%02u %-50s"
MPTCP_LIB_IP_MPTCP=0
@@ -205,6 +206,11 @@ mptcp_lib_kversion_ge() {
mptcp_lib_fail_if_expected_feature "kernel version ${1} lower than ${v}"
}
+mptcp_lib_subtests_last_ts_reset() {
+ MPTCP_LIB_SUBTESTS_LAST_TS_MS="$(date +%s%3N)"
+}
+mptcp_lib_subtests_last_ts_reset
+
__mptcp_lib_result_check_duplicated() {
local subtest
@@ -219,13 +225,22 @@ __mptcp_lib_result_check_duplicated() {
__mptcp_lib_result_add() {
local result="${1}"
+ local time="time="
+ local ts_prev_ms
shift
local id=$((${#MPTCP_LIB_SUBTESTS[@]} + 1))
__mptcp_lib_result_check_duplicated "${*}"
- MPTCP_LIB_SUBTESTS+=("${result} ${id} - ${KSFT_TEST}: ${*}")
+ # not to add two '#'
+ [[ "${*}" != *"#"* ]] && time="# ${time}"
+
+ ts_prev_ms="${MPTCP_LIB_SUBTESTS_LAST_TS_MS}"
+ mptcp_lib_subtests_last_ts_reset
+ time+="$((MPTCP_LIB_SUBTESTS_LAST_TS_MS - ts_prev_ms))ms"
+
+ MPTCP_LIB_SUBTESTS+=("${result} ${id} - ${KSFT_TEST}: ${*} ${time}")
}
# $1: test name
diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
index 68899a303a1a..5e8d5b83e2d0 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
@@ -349,6 +349,7 @@ init
make_file "$cin" "client" 1
make_file "$sin" "server" 1
trap cleanup EXIT
+mptcp_lib_subtests_last_ts_reset
run_tests $ns1 $ns2 10.0.1.1
run_tests $ns1 $ns2 dead:beef:1::1
diff --git a/tools/testing/selftests/net/mptcp/pm_netlink.sh b/tools/testing/selftests/net/mptcp/pm_netlink.sh
index 2757378b1b13..2e6648a2b2c0 100755
--- a/tools/testing/selftests/net/mptcp/pm_netlink.sh
+++ b/tools/testing/selftests/net/mptcp/pm_netlink.sh
@@ -137,6 +137,8 @@ check()
fi
}
+mptcp_lib_subtests_last_ts_reset
+
check "show_endpoints" "" "defaults addr list"
default_limits="$(get_limits)"
diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh
index f74e1c3c126d..8fa77c8e9b65 100755
--- a/tools/testing/selftests/net/mptcp/simult_flows.sh
+++ b/tools/testing/selftests/net/mptcp/simult_flows.sh
@@ -286,6 +286,7 @@ while getopts "bcdhi" option;do
done
setup
+mptcp_lib_subtests_last_ts_reset
run_test 10 10 0 0 "balanced bwidth"
run_test 10 10 1 25 "balanced bwidth with unbalanced delay"
diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh
index 9cb05978269d..3651f73451cf 100755
--- a/tools/testing/selftests/net/mptcp/userspace_pm.sh
+++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh
@@ -150,6 +150,7 @@ mptcp_lib_events "${ns2}" "${client_evts}" client_evts_pid
server_evts=$(mktemp)
mptcp_lib_events "${ns1}" "${server_evts}" server_evts_pid
sleep 0.5
+mptcp_lib_subtests_last_ts_reset
print_title "Init"
print_test "Created network namespaces ns1, ns2"
diff --git a/tools/testing/selftests/net/packetdrill/Makefile b/tools/testing/selftests/net/packetdrill/Makefile
new file mode 100644
index 000000000000..870f7258dc8d
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+
+TEST_INCLUDES := ksft_runner.sh \
+ defaults.sh \
+ ../../kselftest/ktap_helpers.sh
+
+TEST_PROGS := $(wildcard *.pkt)
+
+include ../../lib.mk
diff --git a/tools/testing/selftests/net/packetdrill/config b/tools/testing/selftests/net/packetdrill/config
new file mode 100644
index 000000000000..0d402830f18d
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/config
@@ -0,0 +1,5 @@
+CONFIG_IPV6=y
+CONFIG_NET_SCH_FIFO=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_TCP_MD5SIG=y
+CONFIG_TUN=y
diff --git a/tools/testing/selftests/net/packetdrill/defaults.sh b/tools/testing/selftests/net/packetdrill/defaults.sh
new file mode 100755
index 000000000000..1095a7b22f44
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/defaults.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Set standard production config values that relate to TCP behavior.
+
+# Flush old cached data (fastopen cookies).
+ip tcp_metrics flush all > /dev/null 2>&1
+
+# TCP min, default, and max receive and send buffer sizes.
+sysctl -q net.ipv4.tcp_rmem="4096 540000 $((15*1024*1024))"
+sysctl -q net.ipv4.tcp_wmem="4096 $((256*1024)) 4194304"
+
+# TCP timestamps.
+sysctl -q net.ipv4.tcp_timestamps=1
+
+# TCP SYN(ACK) retry thresholds
+sysctl -q net.ipv4.tcp_syn_retries=5
+sysctl -q net.ipv4.tcp_synack_retries=5
+
+# TCP Forward RTO-Recovery, RFC 5682.
+sysctl -q net.ipv4.tcp_frto=2
+
+# TCP Selective Acknowledgements (SACK)
+sysctl -q net.ipv4.tcp_sack=1
+
+# TCP Duplicate Selective Acknowledgements (DSACK)
+sysctl -q net.ipv4.tcp_dsack=1
+
+# TCP FACK (Forward Acknowldgement)
+sysctl -q net.ipv4.tcp_fack=0
+
+# TCP reordering degree ("dupthresh" threshold for entering Fast Recovery).
+sysctl -q net.ipv4.tcp_reordering=3
+
+# TCP congestion control.
+sysctl -q net.ipv4.tcp_congestion_control=cubic
+
+# TCP slow start after idle.
+sysctl -q net.ipv4.tcp_slow_start_after_idle=0
+
+# TCP RACK and TLP.
+sysctl -q net.ipv4.tcp_early_retrans=4 net.ipv4.tcp_recovery=1
+
+# TCP method for deciding when to defer sending to accumulate big TSO packets.
+sysctl -q net.ipv4.tcp_tso_win_divisor=3
+
+# TCP Explicit Congestion Notification (ECN)
+sysctl -q net.ipv4.tcp_ecn=0
+
+sysctl -q net.ipv4.tcp_pacing_ss_ratio=200
+sysctl -q net.ipv4.tcp_pacing_ca_ratio=120
+sysctl -q net.ipv4.tcp_notsent_lowat=4294967295 > /dev/null 2>&1
+
+sysctl -q net.ipv4.tcp_fastopen=0x70403
+sysctl -q net.ipv4.tcp_fastopen_key=a1a1a1a1-b2b2b2b2-c3c3c3c3-d4d4d4d4
+
+sysctl -q net.ipv4.tcp_syncookies=1
+
+# Override the default qdisc on the tun device.
+# Many tests fail with timing errors if the default
+# is FQ and that paces their flows.
+tc qdisc add dev tun0 root pfifo
+
diff --git a/tools/testing/selftests/net/packetdrill/ksft_runner.sh b/tools/testing/selftests/net/packetdrill/ksft_runner.sh
new file mode 100755
index 000000000000..2f62caccbbbc
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/ksft_runner.sh
@@ -0,0 +1,41 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source "$(dirname $(realpath $0))/../../kselftest/ktap_helpers.sh"
+
+readonly ipv4_args=('--ip_version=ipv4 '
+ '--local_ip=192.168.0.1 '
+ '--gateway_ip=192.168.0.1 '
+ '--netmask_ip=255.255.0.0 '
+ '--remote_ip=192.0.2.1 '
+ '-D CMSG_LEVEL_IP=SOL_IP '
+ '-D CMSG_TYPE_RECVERR=IP_RECVERR ')
+
+readonly ipv6_args=('--ip_version=ipv6 '
+ '--mtu=1520 '
+ '--local_ip=fd3d:0a0b:17d6::1 '
+ '--gateway_ip=fd3d:0a0b:17d6:8888::1 '
+ '--remote_ip=fd3d:fa7b:d17d::1 '
+ '-D CMSG_LEVEL_IP=SOL_IPV6 '
+ '-D CMSG_TYPE_RECVERR=IPV6_RECVERR ')
+
+if [ $# -ne 1 ]; then
+ ktap_exit_fail_msg "usage: $0 <script>"
+ exit "$KSFT_FAIL"
+fi
+script="$1"
+
+if [ -z "$(which packetdrill)" ]; then
+ ktap_skip_all "packetdrill not found in PATH"
+ exit "$KSFT_SKIP"
+fi
+
+ktap_print_header
+ktap_set_plan 2
+
+packetdrill ${ipv4_args[@]} $(basename $script) > /dev/null \
+ && ktap_test_pass "ipv4" || ktap_test_fail "ipv4"
+packetdrill ${ipv6_args[@]} $(basename $script) > /dev/null \
+ && ktap_test_pass "ipv6" || ktap_test_fail "ipv6"
+
+ktap_finished
diff --git a/tools/testing/selftests/net/packetdrill/tcp_inq_client.pkt b/tools/testing/selftests/net/packetdrill/tcp_inq_client.pkt
new file mode 100644
index 000000000000..df49c67645ac
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_inq_client.pkt
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+// Test TCP_INQ and TCP_CM_INQ on the client side.
+`./defaults.sh
+`
+
+// Create a socket and set it to non-blocking.
+ 0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
+ +0 fcntl(3, F_GETFL) = 0x2 (flags O_RDWR)
+ +0 fcntl(3, F_SETFL, O_RDWR|O_NONBLOCK) = 0
+
+// Connect to the server and enable TCP_INQ.
+ +0 connect(3, ..., ...) = -1 EINPROGRESS (Operation now in progress)
+ +0 setsockopt(3, SOL_TCP, TCP_INQ, [1], 4) = 0
+
+ +0 > S 0:0(0) <mss 1460,sackOK,TS val 100 ecr 0,nop,wscale 8>
+ +.01 < S. 0:0(0) ack 1 win 5792 <mss 1460,sackOK,TS val 700 ecr 100,nop,wscale 7>
+ +0 > . 1:1(0) ack 1 <nop,nop,TS val 200 ecr 700>
+
+// Now we have 10K of data ready on the socket.
+ +0 < . 1:10001(10000) ack 1 win 514
+ +0 > . 1:1(0) ack 10001 <nop,nop,TS val 200 ecr 700>
+
+// We read 1K and we should have 9K ready to read.
+ +0 recvmsg(3, {msg_name(...)=...,
+ msg_iov(1)=[{..., 1000}],
+ msg_flags=0,
+ msg_control=[{cmsg_level=SOL_TCP,
+ cmsg_type=TCP_CM_INQ,
+ cmsg_data=9000}]}, 0) = 1000
+// We read 9K and we should have no further data ready to read.
+ +0 recvmsg(3, {msg_name(...)=...,
+ msg_iov(1)=[{..., 9000}],
+ msg_flags=0,
+ msg_control=[{cmsg_level=SOL_TCP,
+ cmsg_type=TCP_CM_INQ,
+ cmsg_data=0}]}, 0) = 9000
+
+// Server sends more data and closes the connections.
+ +0 < F. 10001:20001(10000) ack 1 win 514
+ +0 > . 1:1(0) ack 20002 <nop,nop,TS val 200 ecr 700>
+
+// We read 10K and we should have one "fake" byte because the connection is
+// closed.
+ +0 recvmsg(3, {msg_name(...)=...,
+ msg_iov(1)=[{..., 10000}],
+ msg_flags=0,
+ msg_control=[{cmsg_level=SOL_TCP,
+ cmsg_type=TCP_CM_INQ,
+ cmsg_data=1}]}, 0) = 10000
+// Now, receive EOF.
+ +0 read(3, ..., 2000) = 0
diff --git a/tools/testing/selftests/net/packetdrill/tcp_inq_server.pkt b/tools/testing/selftests/net/packetdrill/tcp_inq_server.pkt
new file mode 100644
index 000000000000..04a5e2590c62
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_inq_server.pkt
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+// Test TCP_INQ and TCP_CM_INQ on the server side.
+`./defaults.sh
+`
+
+// Initialize connection
+ 0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+
+ +0 < S 0:0(0) win 32792 <mss 1000,sackOK,nop,nop,nop,wscale 10>
+ +0 > S. 0:0(0) ack 1 <mss 1460,nop,nop,sackOK,nop,wscale 8>
+ +.01 < . 1:1(0) ack 1 win 514
+
+// Accept the connection and enable TCP_INQ.
+ +0 accept(3, ..., ...) = 4
+ +0 setsockopt(4, SOL_TCP, TCP_INQ, [1], 4) = 0
+
+// Now we have 10K of data ready on the socket.
+ +0 < . 1:10001(10000) ack 1 win 514
+ +0 > . 1:1(0) ack 10001
+
+// We read 2K and we should have 8K ready to read.
+ +0 recvmsg(4, {msg_name(...)=...,
+ msg_iov(1)=[{..., 2000}],
+ msg_flags=0,
+ msg_control=[{cmsg_level=SOL_TCP,
+ cmsg_type=TCP_CM_INQ,
+ cmsg_data=8000}]}, 0) = 2000
+// We read 8K and we should have no further data ready to read.
+ +0 recvmsg(4, {msg_name(...)=...,
+ msg_iov(1)=[{..., 8000}],
+ msg_flags=0,
+ msg_control=[{cmsg_level=SOL_TCP,
+ cmsg_type=TCP_CM_INQ,
+ cmsg_data=0}]}, 0) = 8000
+// Client sends more data and closes the connections.
+ +0 < F. 10001:20001(10000) ack 1 win 514
+ +0 > . 1:1(0) ack 20002
+
+// We read 10K and we should have one "fake" byte because the connection is
+// closed.
+ +0 recvmsg(4, {msg_name(...)=...,
+ msg_iov(1)=[{..., 10000}],
+ msg_flags=0,
+ msg_control=[{cmsg_level=SOL_TCP,
+ cmsg_type=TCP_CM_INQ,
+ cmsg_data=1}]}, 0) = 10000
+// Now, receive error.
+ +0 read(3, ..., 2000) = -1 ENOTCONN (Transport endpoint is not connected)
diff --git a/tools/testing/selftests/net/packetdrill/tcp_md5_md5-only-on-client-ack.pkt b/tools/testing/selftests/net/packetdrill/tcp_md5_md5-only-on-client-ack.pkt
new file mode 100644
index 000000000000..25dfef95d3f8
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_md5_md5-only-on-client-ack.pkt
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+// Test what happens when client does not provide MD5 on SYN,
+// but then does on the ACK that completes the three-way handshake.
+
+`./defaults.sh`
+
+// Establish a connection.
+ 0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+
+ +0 < S 0:0(0) win 32792 <mss 1000,sackOK,nop,nop,nop,wscale 10>
+ +0 > S. 0:0(0) ack 1 <mss 1460,nop,nop,sackOK,nop,wscale 8>
+// Ooh, weird: client provides MD5 option on the ACK:
+ +.01 < . 1:1(0) ack 1 win 514 <md5 000102030405060708090a0b0c0d0e0f,nop,nop>
+ +.01 < . 1:1(0) ack 1 win 514 <md5 000102030405060708090a0b0c0d0e0f,nop,nop>
+
+// The TCP listener refcount should be 2, but on buggy kernels it can be 0:
+ +0 `grep " 0A " /proc/net/tcp /proc/net/tcp6 | grep ":1F90"`
+
+// Now here comes the legit ACK:
+ +.01 < . 1:1(0) ack 1 win 514
+
+// Make sure the connection is OK:
+ +0 accept(3, ..., ...) = 4
+
+ +.01 write(4, ..., 1000) = 1000
diff --git a/tools/testing/selftests/net/rxtimestamp.c b/tools/testing/selftests/net/rxtimestamp.c
index 9eb42570294d..16ac4df55fdb 100644
--- a/tools/testing/selftests/net/rxtimestamp.c
+++ b/tools/testing/selftests/net/rxtimestamp.c
@@ -57,6 +57,8 @@ static struct sof_flag sof_flags[] = {
SOF_FLAG(SOF_TIMESTAMPING_SOFTWARE),
SOF_FLAG(SOF_TIMESTAMPING_RX_SOFTWARE),
SOF_FLAG(SOF_TIMESTAMPING_RX_HARDWARE),
+ SOF_FLAG(SOF_TIMESTAMPING_OPT_RX_FILTER),
+ SOF_FLAG(SOF_TIMESTAMPING_RAW_HARDWARE),
};
static struct socket_type socket_types[] = {
@@ -98,6 +100,22 @@ static struct test_case test_cases[] = {
{}
},
{
+ { .so_timestamping = SOF_TIMESTAMPING_RAW_HARDWARE
+ | SOF_TIMESTAMPING_OPT_RX_FILTER },
+ {}
+ },
+ {
+ { .so_timestamping = SOF_TIMESTAMPING_SOFTWARE
+ | SOF_TIMESTAMPING_OPT_RX_FILTER },
+ {}
+ },
+ {
+ { .so_timestamping = SOF_TIMESTAMPING_SOFTWARE
+ | SOF_TIMESTAMPING_RX_SOFTWARE
+ | SOF_TIMESTAMPING_OPT_RX_FILTER },
+ { .swtstamp = true }
+ },
+ {
{ .so_timestamping = SOF_TIMESTAMPING_SOFTWARE
| SOF_TIMESTAMPING_RX_SOFTWARE },
{ .swtstamp = true }
diff --git a/tools/testing/selftests/net/txtimestamp.c b/tools/testing/selftests/net/txtimestamp.c
index ec60a16c9307..d626f22f9550 100644
--- a/tools/testing/selftests/net/txtimestamp.c
+++ b/tools/testing/selftests/net/txtimestamp.c
@@ -356,8 +356,12 @@ static void __recv_errmsg_cmsg(struct msghdr *msg, int payload_len)
}
}
- if (batch > 1)
+ if (batch > 1) {
fprintf(stderr, "batched %d timestamps\n", batch);
+ } else if (!batch) {
+ fprintf(stderr, "Failed to report timestamps\n");
+ test_failed = true;
+ }
}
static int recv_errmsg(int fd)
diff --git a/tools/testing/selftests/net/xfrm_policy_add_speed.sh b/tools/testing/selftests/net/xfrm_policy_add_speed.sh
new file mode 100755
index 000000000000..2fab29d3cb91
--- /dev/null
+++ b/tools/testing/selftests/net/xfrm_policy_add_speed.sh
@@ -0,0 +1,83 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+source lib.sh
+
+timeout=4m
+ret=0
+tmp=$(mktemp)
+cleanup() {
+ cleanup_all_ns
+ rm -f "$tmp"
+}
+
+trap cleanup EXIT
+
+maxpolicies=100000
+[ "$KSFT_MACHINE_SLOW" = "yes" ] && maxpolicies=10000
+
+do_dummies4() {
+ local dir="$1"
+ local max="$2"
+
+ local policies
+ local pfx
+ pfx=30
+ policies=0
+
+ ip netns exec "$ns" ip xfrm policy flush
+
+ for i in $(seq 1 100);do
+ local s
+ local d
+ for j in $(seq 1 255);do
+ s=$((i+0))
+ d=$((i+100))
+
+ for a in $(seq 1 8 255); do
+ policies=$((policies+1))
+ [ "$policies" -gt "$max" ] && return
+ echo xfrm policy add src 10.$s.$j.0/30 dst 10.$d.$j.$a/$pfx dir $dir action block
+ done
+ for a in $(seq 1 8 255); do
+ policies=$((policies+1))
+ [ "$policies" -gt "$max" ] && return
+ echo xfrm policy add src 10.$s.$j.$a/30 dst 10.$d.$j.0/$pfx dir $dir action block
+ done
+ done
+ done
+}
+
+setup_ns ns
+
+do_bench()
+{
+ local max="$1"
+
+ start=$(date +%s%3N)
+ do_dummies4 "out" "$max" > "$tmp"
+ if ! timeout "$timeout" ip netns exec "$ns" ip -batch "$tmp";then
+ echo "WARNING: policy insertion cancelled after $timeout"
+ ret=1
+ fi
+ stop=$(date +%s%3N)
+
+ result=$((stop-start))
+
+ policies=$(wc -l < "$tmp")
+ printf "Inserted %-06s policies in $result ms\n" $policies
+
+ have=$(ip netns exec "$ns" ip xfrm policy show | grep "action block" | wc -l)
+ if [ "$have" -ne "$policies" ]; then
+ echo "WARNING: mismatch, have $have policies, expected $policies"
+ ret=1
+ fi
+}
+
+p=100
+while [ $p -le "$maxpolicies" ]; do
+ do_bench "$p"
+ p="${p}0"
+done
+
+exit $ret